From 851d99d99e9015d2d9918df56b01885bae5234e5 Mon Sep 17 00:00:00 2001 From: Vlad <129996061+vvysokikh1@users.noreply.github.com> Date: Fri, 4 Apr 2025 13:28:33 +0100 Subject: [PATCH 001/244] fix: uint128 ambiguousness breaking macos unity build (#5386) --- src/libxrpl/basics/mulDiv.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/libxrpl/basics/mulDiv.cpp b/src/libxrpl/basics/mulDiv.cpp index 882878ebec..57c3dd81cd 100644 --- a/src/libxrpl/basics/mulDiv.cpp +++ b/src/libxrpl/basics/mulDiv.cpp @@ -31,9 +31,7 @@ namespace ripple { std::optional mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div) { - using namespace boost::multiprecision; - - uint128_t result; + boost::multiprecision::uint128_t result; result = multiply(result, value, mul); result /= div; From e923ec6d3641d6f03de2b2bc53b0f533ea999b74 Mon Sep 17 00:00:00 2001 From: Valentin Balaschenko <13349202+vlntb@users.noreply.github.com> Date: Fri, 4 Apr 2025 19:21:17 +0100 Subject: [PATCH 002/244] Fix to correct memory ordering for compare_exchange_weak and wait in the intrusive reference counting logic (#5381) This change addresses a memory ordering assertion failure observed on one of the Windows test machines during the IntrusiveShared_test suite. --- include/xrpl/basics/IntrusiveRefCounts.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/include/xrpl/basics/IntrusiveRefCounts.h b/include/xrpl/basics/IntrusiveRefCounts.h index f3c707422b..750b945803 100644 --- a/include/xrpl/basics/IntrusiveRefCounts.h +++ b/include/xrpl/basics/IntrusiveRefCounts.h @@ -294,7 +294,7 @@ IntrusiveRefCounts::releaseStrongRef() const } if (refCounts.compare_exchange_weak( - prevIntVal, nextIntVal, std::memory_order_release)) + prevIntVal, nextIntVal, std::memory_order_acq_rel)) { // Can't be in partial destroy because only decrementing the strong // count to zero can start a partial destroy, and that can't happen @@ -351,7 +351,7 @@ IntrusiveRefCounts::addWeakReleaseStrongRef() const } } if (refCounts.compare_exchange_weak( - prevIntVal, nextIntVal, std::memory_order_release)) + prevIntVal, nextIntVal, std::memory_order_acq_rel)) { XRPL_ASSERT( (!(prevIntVal & partialDestroyStartedMask)), @@ -374,7 +374,7 @@ IntrusiveRefCounts::releaseWeakRef() const // This case should only be hit if the partialDestroyStartedBit is // set non-atomically (and even then very rarely). The code is kept // in case we need to set the flag non-atomically for perf reasons. - refCounts.wait(prevIntVal, std::memory_order_acq_rel); + refCounts.wait(prevIntVal, std::memory_order_acquire); prevIntVal = refCounts.load(std::memory_order_acquire); prev = RefCountPair{prevIntVal}; } @@ -382,7 +382,7 @@ IntrusiveRefCounts::releaseWeakRef() const { // partial destroy MUST finish before running a full destroy (when // using weak pointers) - refCounts.wait(prevIntVal - weakDelta, std::memory_order_acq_rel); + refCounts.wait(prevIntVal - weakDelta, std::memory_order_acquire); } return ReleaseWeakRefAction::destroy; } @@ -396,7 +396,7 @@ IntrusiveRefCounts::checkoutStrongRefFromWeak() const noexcept auto desiredValue = RefCountPair{2, 1}.combinedValue(); while (!refCounts.compare_exchange_weak( - curValue, desiredValue, std::memory_order_release)) + curValue, desiredValue, std::memory_order_acq_rel)) { RefCountPair const prev{curValue}; if (!prev.strong) From 4ba9288935672b237a36b894d34733ec6f7bd485 Mon Sep 17 00:00:00 2001 From: Mayukha Vadari Date: Fri, 4 Apr 2025 21:08:34 -0400 Subject: [PATCH 003/244] fix: disable `channel_authorize` when `signing_support` is disabled (#5385) --- API-CHANGELOG.md | 10 +++++++++- src/xrpld/rpc/handlers/PayChanClaim.cpp | 7 +++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/API-CHANGELOG.md b/API-CHANGELOG.md index 0d5d8a8196..dd3fcd018b 100644 --- a/API-CHANGELOG.md +++ b/API-CHANGELOG.md @@ -83,9 +83,17 @@ The [commandline](https://xrpl.org/docs/references/http-websocket-apis/api-conve The `network_id` field was added in the `server_info` response in version 1.5.0 (2019), but it is not returned in [reporting mode](https://xrpl.org/rippled-server-modes.html#reporting-mode). However, use of reporting mode is now discouraged, in favor of using [Clio](https://github.com/XRPLF/clio) instead. +## XRP Ledger server version 2.5.0 + +As of 2025-04-04, version 2.5.0 is in development. You can use a pre-release version by building from source or [using the `nightly` package](https://xrpl.org/docs/infrastructure/installation/install-rippled-on-ubuntu). + +### Additions and bugfixes in 2.5.0 + +- `channel_authorize`: If `signing_support` is not enabled in the config, the RPC is disabled. + ## XRP Ledger server version 2.4.0 -As of 2025-01-28, version 2.4.0 is in development. You can use a pre-release version by building from source or [using the `nightly` package](https://xrpl.org/docs/infrastructure/installation/install-rippled-on-ubuntu). +[Version 2.4.0](https://github.com/XRPLF/rippled/releases/tag/2.4.0) was released on March 4, 2025. ### Additions and bugfixes in 2.4.0 diff --git a/src/xrpld/rpc/handlers/PayChanClaim.cpp b/src/xrpld/rpc/handlers/PayChanClaim.cpp index aaa559895c..b62f5e54e5 100644 --- a/src/xrpld/rpc/handlers/PayChanClaim.cpp +++ b/src/xrpld/rpc/handlers/PayChanClaim.cpp @@ -17,6 +17,7 @@ */ //============================================================================== +#include #include #include #include @@ -40,6 +41,12 @@ namespace ripple { Json::Value doChannelAuthorize(RPC::JsonContext& context) { + if (context.role != Role::ADMIN && !context.app.config().canSign()) + { + return RPC::make_error( + rpcNOT_SUPPORTED, "Signing is not supported by this server."); + } + auto const& params(context.params); for (auto const& p : {jss::channel_id, jss::amount}) if (!params.isMember(p)) From ca0bc767fee9c10532754c874de5697722c43986 Mon Sep 17 00:00:00 2001 From: Michael Legleux Date: Fri, 4 Apr 2025 19:24:31 -0700 Subject: [PATCH 004/244] fix: Use the build image from ghcr.io (#5390) The ci pipelines are constantly hitting Docker Hub's public rate limiting since increasing the number of jobs we're running. This change switches over to images hosted in GitHub's registry. --- .github/workflows/doxygen.yml | 2 +- .github/workflows/libxrpl.yml | 2 +- .github/workflows/nix.yml | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/doxygen.yml b/.github/workflows/doxygen.yml index e2265d1b83..0693308ef0 100644 --- a/.github/workflows/doxygen.yml +++ b/.github/workflows/doxygen.yml @@ -14,7 +14,7 @@ jobs: runs-on: ubuntu-latest permissions: contents: write - container: rippleci/rippled-build-ubuntu:aaf5e3e + container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e steps: - name: checkout uses: actions/checkout@v4 diff --git a/.github/workflows/libxrpl.yml b/.github/workflows/libxrpl.yml index 2834595e3a..92deff7810 100644 --- a/.github/workflows/libxrpl.yml +++ b/.github/workflows/libxrpl.yml @@ -20,7 +20,7 @@ jobs: version: ${{ steps.version.outputs.version }} channel: ${{ steps.channel.outputs.channel }} runs-on: [self-hosted, heavy] - container: rippleci/rippled-build-ubuntu:aaf5e3e + container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e steps: - name: Wait for essential checks to succeed uses: lewagon/wait-on-check-action@v1.3.4 diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index c4507c8440..abae2ee84a 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -62,7 +62,7 @@ jobs: cc: /usr/bin/clang-14 cxx: /usr/bin/clang++-14 runs-on: [self-hosted, heavy] - container: rippleci/rippled-build-ubuntu:aaf5e3e + container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e env: build_dir: .build steps: @@ -124,7 +124,7 @@ jobs: - "-Dunity=ON" needs: dependencies runs-on: [self-hosted, heavy] - container: rippleci/rippled-build-ubuntu:aaf5e3e + container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e env: build_dir: .build steps: @@ -178,7 +178,7 @@ jobs: - "-DUNIT_TEST_REFERENCE_FEE=1000" needs: dependencies runs-on: [self-hosted, heavy] - container: rippleci/rippled-build-ubuntu:aaf5e3e + container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e env: build_dir: .build steps: @@ -229,7 +229,7 @@ jobs: - Debug needs: dependencies runs-on: [self-hosted, heavy] - container: rippleci/rippled-build-ubuntu:aaf5e3e + container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e env: build_dir: .build steps: @@ -303,7 +303,7 @@ jobs: conan: needs: dependencies runs-on: [self-hosted, heavy] - container: rippleci/rippled-build-ubuntu:aaf5e3e + container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e env: build_dir: .build configuration: Release From a099f5a804e7597b99afb108a7973056fedabee0 Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Tue, 8 Apr 2025 15:43:34 +0100 Subject: [PATCH 005/244] Remove UNREACHABLE from `NetworkOPsImp::processTrustedProposal` (#5387) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It’s possible for this to happen legitimately if a set of peers, including a validator, are connected in a cycle, and the latency and message processing time between those peers is significantly less than the latency between the validator and the last peer. It’s unlikely in the real world, but obviously easy to simulate with Antithesis. --- src/xrpld/app/misc/NetworkOPs.cpp | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/xrpld/app/misc/NetworkOPs.cpp b/src/xrpld/app/misc/NetworkOPs.cpp index 0acc5a215d..b72963aa81 100644 --- a/src/xrpld/app/misc/NetworkOPs.cpp +++ b/src/xrpld/app/misc/NetworkOPs.cpp @@ -1936,11 +1936,14 @@ NetworkOPsImp::processTrustedProposal(RCLCxPeerPos peerPos) // is a trusted message, it could be a very big deal. Either way, we // don't want to relay the proposal. Note that the byzantine behavior // detection in handleNewValidation will notify other peers. - UNREACHABLE( - "ripple::NetworkOPsImp::processTrustedProposal : received own " - "proposal"); + // + // Another, innocuous explanation is unusual message routing and delays, + // causing this node to receive its own messages back. JLOG(m_journal.error()) - << "Received a TRUSTED proposal signed with my key from a peer"; + << "Received a proposal signed by MY KEY from a peer. This may " + "indicate a misconfiguration where another node has the same " + "validator key, or may be caused by unusual message routing and " + "delays."; return false; } From 7692eeb9a0fc86089b166ea2130aef10dce5e69b Mon Sep 17 00:00:00 2001 From: Vito Tumas <5780819+Tapanito@users.noreply.github.com> Date: Wed, 9 Apr 2025 15:33:17 +0200 Subject: [PATCH 006/244] Instrument proposal, validation and transaction messages (#5348) Adds metric counters for the following P2P message types: * Untrusted proposal and validation messages * Duplicate proposal, validation and transaction messages --- src/test/overlay/traffic_count_test.cpp | 157 ++++++++++++++ src/xrpld/overlay/Message.h | 2 +- src/xrpld/overlay/detail/Message.cpp | 2 +- src/xrpld/overlay/detail/OverlayImpl.cpp | 41 ++-- src/xrpld/overlay/detail/OverlayImpl.h | 38 +++- src/xrpld/overlay/detail/PeerImp.cpp | 81 +++++-- src/xrpld/overlay/detail/TrafficCount.cpp | 74 +++---- src/xrpld/overlay/detail/TrafficCount.h | 244 ++++++++++++++++------ 8 files changed, 485 insertions(+), 154 deletions(-) create mode 100644 src/test/overlay/traffic_count_test.cpp diff --git a/src/test/overlay/traffic_count_test.cpp b/src/test/overlay/traffic_count_test.cpp new file mode 100644 index 0000000000..768ec21938 --- /dev/null +++ b/src/test/overlay/traffic_count_test.cpp @@ -0,0 +1,157 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +#include +#include + +namespace ripple { + +namespace test { + +class traffic_count_test : public beast::unit_test::suite +{ +public: + traffic_count_test() = default; + + void + testCategorize() + { + testcase("categorize"); + protocol::TMPing message; + message.set_type(protocol::TMPing::ptPING); + + // a known message is categorized to a proper category + auto const known = + TrafficCount::categorize(message, protocol::mtPING, false); + BEAST_EXPECT(known == TrafficCount::category::base); + + // an unknown message type is categorized as unknown + auto const unknown = TrafficCount::categorize( + message, static_cast(99), false); + BEAST_EXPECT(unknown == TrafficCount::category::unknown); + } + + struct TestCase + { + std::string name; + int size; + bool inbound; + int messageCount; + std::uint64_t expectedBytesIn; + std::uint64_t expectedBytesOut; + std::uint64_t expectedMessagesIn; + std::uint64_t expectedMessagesOut; + }; + + void + testAddCount() + { + auto run = [&](TestCase const& tc) { + testcase(tc.name); + TrafficCount m_traffic; + + auto const counts = m_traffic.getCounts(); + std::for_each(counts.begin(), counts.end(), [&](auto const& pair) { + for (auto i = 0; i < tc.messageCount; ++i) + m_traffic.addCount(pair.first, tc.inbound, tc.size); + }); + + auto const counts_new = m_traffic.getCounts(); + std::for_each( + counts_new.begin(), counts_new.end(), [&](auto const& pair) { + BEAST_EXPECT( + pair.second.bytesIn.load() == tc.expectedBytesIn); + BEAST_EXPECT( + pair.second.bytesOut.load() == tc.expectedBytesOut); + BEAST_EXPECT( + pair.second.messagesIn.load() == tc.expectedMessagesIn); + BEAST_EXPECT( + pair.second.messagesOut.load() == + tc.expectedMessagesOut); + }); + }; + + auto const testcases = { + TestCase{ + .name = "zero-counts", + .size = 0, + .inbound = false, + .messageCount = 0, + .expectedBytesIn = 0, + .expectedBytesOut = 0, + .expectedMessagesIn = 0, + .expectedMessagesOut = 0, + }, + TestCase{ + .name = "inbound-counts", + .size = 10, + .inbound = true, + .messageCount = 10, + .expectedBytesIn = 100, + .expectedBytesOut = 0, + .expectedMessagesIn = 10, + .expectedMessagesOut = 0, + }, + TestCase{ + .name = "outbound-counts", + .size = 10, + .inbound = false, + .messageCount = 10, + .expectedBytesIn = 0, + .expectedBytesOut = 100, + .expectedMessagesIn = 0, + .expectedMessagesOut = 10, + }, + }; + + for (auto const& tc : testcases) + run(tc); + } + + void + testToString() + { + testcase("category-to-string"); + + // known category returns known string value + BEAST_EXPECT( + TrafficCount::to_string(TrafficCount::category::total) == "total"); + + // return "unknown" for unknown categories + BEAST_EXPECT( + TrafficCount::to_string( + static_cast(1000)) == "unknown"); + } + + void + run() override + { + testCategorize(); + testAddCount(); + testToString(); + } +}; + +BEAST_DEFINE_TESTSUITE(traffic_count, overlay, ripple); + +} // namespace test +} // namespace ripple diff --git a/src/xrpld/overlay/Message.h b/src/xrpld/overlay/Message.h index 92e2824327..d8f9536975 100644 --- a/src/xrpld/overlay/Message.h +++ b/src/xrpld/overlay/Message.h @@ -60,7 +60,7 @@ public: */ Message( ::google::protobuf::Message const& message, - int type, + protocol::MessageType type, std::optional const& validator = {}); /** Retrieve the size of the packed but uncompressed message data. */ diff --git a/src/xrpld/overlay/detail/Message.cpp b/src/xrpld/overlay/detail/Message.cpp index 6473f30c89..9cf9407199 100644 --- a/src/xrpld/overlay/detail/Message.cpp +++ b/src/xrpld/overlay/detail/Message.cpp @@ -26,7 +26,7 @@ namespace ripple { Message::Message( ::google::protobuf::Message const& message, - int type, + protocol::MessageType type, std::optional const& validator) : category_(TrafficCount::categorize(message, type, false)) , validatorKey_(validator) diff --git a/src/xrpld/overlay/detail/OverlayImpl.cpp b/src/xrpld/overlay/detail/OverlayImpl.cpp index 2209414d9f..e1ccc2ee84 100644 --- a/src/xrpld/overlay/detail/OverlayImpl.cpp +++ b/src/xrpld/overlay/detail/OverlayImpl.cpp @@ -41,6 +41,8 @@ #include +#include "xrpld/overlay/detail/TrafficCount.h" + namespace ripple { namespace CrawlOptions { @@ -145,13 +147,11 @@ OverlayImpl::OverlayImpl( std::bind(&OverlayImpl::collect_metrics, this), collector, [counts = m_traffic.getCounts(), collector]() { - std::vector ret; - ret.reserve(counts.size()); + std::unordered_map ret; - for (size_t i = 0; i < counts.size(); ++i) - { - ret.push_back(TrafficGauges(counts[i].name, collector)); - } + for (auto const& pair : counts) + ret.emplace( + pair.first, TrafficGauges(pair.second.name, collector)); return ret; }()) @@ -580,17 +580,14 @@ OverlayImpl::onWrite(beast::PropertyStream::Map& stream) { beast::PropertyStream::Set set("traffic", stream); auto const stats = m_traffic.getCounts(); - for (auto const& i : stats) + for (auto const& pair : stats) { - if (i) - { - beast::PropertyStream::Map item(set); - item["category"] = i.name; - item["bytes_in"] = std::to_string(i.bytesIn.load()); - item["messages_in"] = std::to_string(i.messagesIn.load()); - item["bytes_out"] = std::to_string(i.bytesOut.load()); - item["messages_out"] = std::to_string(i.messagesOut.load()); - } + beast::PropertyStream::Map item(set); + item["category"] = pair.second.name; + item["bytes_in"] = std::to_string(pair.second.bytesIn.load()); + item["messages_in"] = std::to_string(pair.second.messagesIn.load()); + item["bytes_out"] = std::to_string(pair.second.bytesOut.load()); + item["messages_out"] = std::to_string(pair.second.messagesOut.load()); } } @@ -690,14 +687,16 @@ OverlayImpl::onManifests( } void -OverlayImpl::reportTraffic( - TrafficCount::category cat, - bool isInbound, - int number) +OverlayImpl::reportInboundTraffic(TrafficCount::category cat, int size) { - m_traffic.addCount(cat, isInbound, number); + m_traffic.addCount(cat, true, size); } +void +OverlayImpl::reportOutboundTraffic(TrafficCount::category cat, int size) +{ + m_traffic.addCount(cat, false, size); +} /** The number of active peers on the network Active peers are only those peers that have completed the handshake and are running the Ripple protocol. diff --git a/src/xrpld/overlay/detail/OverlayImpl.h b/src/xrpld/overlay/detail/OverlayImpl.h index 4112e6dfd9..86107fc591 100644 --- a/src/xrpld/overlay/detail/OverlayImpl.h +++ b/src/xrpld/overlay/detail/OverlayImpl.h @@ -345,7 +345,10 @@ public: makePrefix(std::uint32_t id); void - reportTraffic(TrafficCount::category cat, bool isInbound, int bytes); + reportInboundTraffic(TrafficCount::category cat, int bytes); + + void + reportOutboundTraffic(TrafficCount::category cat, int bytes); void incJqTransOverflow() override @@ -561,14 +564,16 @@ private: struct TrafficGauges { TrafficGauges( - char const* name, + std::string const& name, beast::insight::Collector::ptr const& collector) - : bytesIn(collector->make_gauge(name, "Bytes_In")) + : name(name) + , bytesIn(collector->make_gauge(name, "Bytes_In")) , bytesOut(collector->make_gauge(name, "Bytes_Out")) , messagesIn(collector->make_gauge(name, "Messages_In")) , messagesOut(collector->make_gauge(name, "Messages_Out")) { } + std::string const name; beast::insight::Gauge bytesIn; beast::insight::Gauge bytesOut; beast::insight::Gauge messagesIn; @@ -581,7 +586,8 @@ private: Stats( Handler const& handler, beast::insight::Collector::ptr const& collector, - std::vector&& trafficGauges_) + std::unordered_map&& + trafficGauges_) : peerDisconnects( collector->make_gauge("Overlay", "Peer_Disconnects")) , trafficGauges(std::move(trafficGauges_)) @@ -590,7 +596,7 @@ private: } beast::insight::Gauge peerDisconnects; - std::vector trafficGauges; + std::unordered_map trafficGauges; beast::insight::Hook hook; }; @@ -607,13 +613,25 @@ private: counts.size() == m_stats.trafficGauges.size(), "ripple::OverlayImpl::collect_metrics : counts size do match"); - for (std::size_t i = 0; i < counts.size(); ++i) + for (auto const& [key, value] : counts) { - m_stats.trafficGauges[i].bytesIn = counts[i].bytesIn; - m_stats.trafficGauges[i].bytesOut = counts[i].bytesOut; - m_stats.trafficGauges[i].messagesIn = counts[i].messagesIn; - m_stats.trafficGauges[i].messagesOut = counts[i].messagesOut; + auto it = m_stats.trafficGauges.find(key); + if (it == m_stats.trafficGauges.end()) + continue; + + auto& gauge = it->second; + + XRPL_ASSERT( + gauge.name == value.name, + "ripple::OverlayImpl::collect_metrics : gauge and counter " + "match"); + + gauge.bytesIn = value.bytesIn; + gauge.bytesOut = value.bytesOut; + gauge.messagesIn = value.messagesIn; + gauge.messagesOut = value.messagesOut; } + m_stats.peerDisconnects = getPeerDisconnect(); } }; diff --git a/src/xrpld/overlay/detail/PeerImp.cpp b/src/xrpld/overlay/detail/PeerImp.cpp index 6df9b78df8..1ee72ea5eb 100644 --- a/src/xrpld/overlay/detail/PeerImp.cpp +++ b/src/xrpld/overlay/detail/PeerImp.cpp @@ -249,11 +249,21 @@ PeerImp::send(std::shared_ptr const& m) auto validator = m->getValidatorKey(); if (validator && !squelch_.expireSquelch(*validator)) + { + overlay_.reportOutboundTraffic( + TrafficCount::category::squelch_suppressed, + static_cast(m->getBuffer(compressionEnabled_).size())); return; + } - overlay_.reportTraffic( + // report categorized outgoing traffic + overlay_.reportOutboundTraffic( safe_cast(m->getCategory()), - false, + static_cast(m->getBuffer(compressionEnabled_).size())); + + // report total outgoing traffic + overlay_.reportOutboundTraffic( + TrafficCount::category::total, static_cast(m->getBuffer(compressionEnabled_).size())); auto sendq_size = send_queue_.size(); @@ -1014,8 +1024,17 @@ PeerImp::onMessageBegin( auto const name = protocolMessageName(type); load_event_ = app_.getJobQueue().makeLoadEvent(jtPEER, name); fee_ = {Resource::feeTrivialPeer, name}; - auto const category = TrafficCount::categorize(*m, type, true); - overlay_.reportTraffic(category, true, static_cast(size)); + + auto const category = TrafficCount::categorize( + *m, static_cast(type), true); + + // report total incoming traffic + overlay_.reportInboundTraffic( + TrafficCount::category::total, static_cast(size)); + + // increase the traffic received for a specific category + overlay_.reportInboundTraffic(category, static_cast(size)); + using namespace protocol; if ((type == MessageType::mtTRANSACTION || type == MessageType::mtHAVE_TRANSACTIONS || @@ -1283,6 +1302,10 @@ PeerImp::handleTransaction( else if (eraseTxQueue && txReduceRelayEnabled()) removeTxQueue(txID); + overlay_.reportInboundTraffic( + TrafficCount::category::transaction_duplicate, + Message::messageSize(*m)); + return; } @@ -1670,8 +1693,16 @@ PeerImp::onMessage(std::shared_ptr const& m) // If the operator has specified that untrusted proposals be dropped then // this happens here I.e. before further wasting CPU verifying the signature // of an untrusted key - if (!isTrusted && app_.config().RELAY_UNTRUSTED_PROPOSALS == -1) - return; + if (!isTrusted) + { + // report untrusted proposal messages + overlay_.reportInboundTraffic( + TrafficCount::category::proposal_untrusted, + Message::messageSize(*m)); + + if (app_.config().RELAY_UNTRUSTED_PROPOSALS == -1) + return; + } uint256 const proposeHash{set.currenttxhash()}; uint256 const prevLedger{set.previousledger()}; @@ -1696,7 +1727,14 @@ PeerImp::onMessage(std::shared_ptr const& m) (stopwatch().now() - *relayed) < reduce_relay::IDLED) overlay_.updateSlotAndSquelch( suppression, publicKey, id_, protocol::mtPROPOSE_LEDGER); + + // report duplicate proposal messages + overlay_.reportInboundTraffic( + TrafficCount::category::proposal_duplicate, + Message::messageSize(*m)); + JLOG(p_journal_.trace()) << "Proposal: duplicate"; + return; } @@ -2310,17 +2348,26 @@ PeerImp::onMessage(std::shared_ptr const& m) auto const isTrusted = app_.validators().trusted(val->getSignerPublic()); - // If the operator has specified that untrusted validations be dropped - // then this happens here I.e. before further wasting CPU verifying the - // signature of an untrusted key - if (!isTrusted && app_.config().RELAY_UNTRUSTED_VALIDATIONS == -1) - return; + // If the operator has specified that untrusted validations be + // dropped then this happens here I.e. before further wasting CPU + // verifying the signature of an untrusted key + if (!isTrusted) + { + // increase untrusted validations received + overlay_.reportInboundTraffic( + TrafficCount::category::validation_untrusted, + Message::messageSize(*m)); + + if (app_.config().RELAY_UNTRUSTED_VALIDATIONS == -1) + return; + } auto key = sha512Half(makeSlice(m->validation())); - if (auto [added, relayed] = - app_.getHashRouter().addSuppressionPeerWithStatus(key, id_); - !added) + auto [added, relayed] = + app_.getHashRouter().addSuppressionPeerWithStatus(key, id_); + + if (!added) { // Count unique messages (Slots has it's own 'HashRouter'), which a // peer receives within IDLED seconds since the message has been @@ -2330,6 +2377,12 @@ PeerImp::onMessage(std::shared_ptr const& m) (stopwatch().now() - *relayed) < reduce_relay::IDLED) overlay_.updateSlotAndSquelch( key, val->getSignerPublic(), id_, protocol::mtVALIDATION); + + // increase duplicate validations received + overlay_.reportInboundTraffic( + TrafficCount::category::validation_duplicate, + Message::messageSize(*m)); + JLOG(p_journal_.trace()) << "Validation: duplicate"; return; } diff --git a/src/xrpld/overlay/detail/TrafficCount.cpp b/src/xrpld/overlay/detail/TrafficCount.cpp index c64a033e3e..a2234a432e 100644 --- a/src/xrpld/overlay/detail/TrafficCount.cpp +++ b/src/xrpld/overlay/detail/TrafficCount.cpp @@ -21,36 +21,41 @@ namespace ripple { +std::unordered_map const + type_lookup = { + {protocol::mtPING, TrafficCount::category::base}, + {protocol::mtSTATUS_CHANGE, TrafficCount::category::base}, + {protocol::mtMANIFESTS, TrafficCount::category::manifests}, + {protocol::mtENDPOINTS, TrafficCount::category::overlay}, + {protocol::mtTRANSACTION, TrafficCount::category::transaction}, + {protocol::mtVALIDATORLIST, TrafficCount::category::validatorlist}, + {protocol::mtVALIDATORLISTCOLLECTION, + TrafficCount::category::validatorlist}, + {protocol::mtVALIDATION, TrafficCount::category::validation}, + {protocol::mtPROPOSE_LEDGER, TrafficCount::category::proposal}, + {protocol::mtPROOF_PATH_REQ, + TrafficCount::category::proof_path_request}, + {protocol::mtPROOF_PATH_RESPONSE, + TrafficCount::category::proof_path_response}, + {protocol::mtREPLAY_DELTA_REQ, + TrafficCount::category::replay_delta_request}, + {protocol::mtREPLAY_DELTA_RESPONSE, + TrafficCount::category::replay_delta_response}, + {protocol::mtHAVE_TRANSACTIONS, + TrafficCount::category::have_transactions}, + {protocol::mtTRANSACTIONS, + TrafficCount::category::requested_transactions}, + {protocol::mtSQUELCH, TrafficCount::category::squelch}, +}; + TrafficCount::category TrafficCount::categorize( ::google::protobuf::Message const& message, - int type, + protocol::MessageType type, bool inbound) { - if ((type == protocol::mtPING) || (type == protocol::mtSTATUS_CHANGE)) - return TrafficCount::category::base; - - if (type == protocol::mtCLUSTER) - return TrafficCount::category::cluster; - - if (type == protocol::mtMANIFESTS) - return TrafficCount::category::manifests; - - if (type == protocol::mtENDPOINTS) - return TrafficCount::category::overlay; - - if (type == protocol::mtTRANSACTION) - return TrafficCount::category::transaction; - - if (type == protocol::mtVALIDATORLIST || - type == protocol::mtVALIDATORLISTCOLLECTION) - return TrafficCount::category::validatorlist; - - if (type == protocol::mtVALIDATION) - return TrafficCount::category::validation; - - if (type == protocol::mtPROPOSE_LEDGER) - return TrafficCount::category::proposal; + if (auto item = type_lookup.find(type); item != type_lookup.end()) + return item->second; if (type == protocol::mtHAVE_SET) return inbound ? TrafficCount::category::get_set @@ -139,25 +144,6 @@ TrafficCount::categorize( : TrafficCount::category::get_hash; } - if (type == protocol::mtPROOF_PATH_REQ) - return TrafficCount::category::proof_path_request; - - if (type == protocol::mtPROOF_PATH_RESPONSE) - return TrafficCount::category::proof_path_response; - - if (type == protocol::mtREPLAY_DELTA_REQ) - return TrafficCount::category::replay_delta_request; - - if (type == protocol::mtREPLAY_DELTA_RESPONSE) - return TrafficCount::category::replay_delta_response; - - if (type == protocol::mtHAVE_TRANSACTIONS) - return TrafficCount::category::have_transactions; - - if (type == protocol::mtTRANSACTIONS) - return TrafficCount::category::requested_transactions; - return TrafficCount::category::unknown; } - } // namespace ripple diff --git a/src/xrpld/overlay/detail/TrafficCount.h b/src/xrpld/overlay/detail/TrafficCount.h index b508970e44..9d1cce503b 100644 --- a/src/xrpld/overlay/detail/TrafficCount.h +++ b/src/xrpld/overlay/detail/TrafficCount.h @@ -23,26 +23,46 @@ #include #include -#include #include #include namespace ripple { +/** + TrafficCount is used to count ingress and egress wire bytes and number of + messages. The general intended usage is as follows: + 1. Determine the message category by callin TrafficCount::categorize + 2. Increment the counters for incoming or outgoing traffic by calling + TrafficCount::addCount + 3. Optionally, TrafficCount::addCount can be called at any time to + increment additional traffic categories, not captured by + TrafficCount::categorize. + + There are two special categories: + 1. category::total - this category is used to report the total traffic + amount. It should be incremented once just after receiving a new message, and + once just before sending a message to a peer. Messages whose category is not + in TrafficCount::categorize are not included in the total. + 2. category::unknown - this category is used to report traffic for + messages of unknown type. +*/ class TrafficCount { public: + enum category : std::size_t; + class TrafficStats { public: - char const* name; + std::string name; std::atomic bytesIn{0}; std::atomic bytesOut{0}; std::atomic messagesIn{0}; std::atomic messagesOut{0}; - TrafficStats(char const* n) : name(n) + TrafficStats(TrafficCount::category cat) + : name(TrafficCount::to_string(cat)) { } @@ -70,11 +90,26 @@ public: cluster, // cluster overhead overlay, // overlay management manifests, // manifest management - transaction, - proposal, - validation, + + transaction, // transaction messages + // The following categories breakdown transaction message type + transaction_duplicate, // duplicate transaction messages + + proposal, // proposal messages + // The following categories breakdown proposal message type + proposal_untrusted, // proposals from untrusted validators + proposal_duplicate, // proposals seen previously + + validation, // validation messages + // The following categories breakdown validation message type + validation_untrusted, // validations from untrusted validators + validation_duplicate, // validations seen previously + validatorlist, + squelch, + squelch_suppressed, // egress traffic amount suppressed by squelching + // TMHaveSet message: get_set, // transaction sets we try to get share_set, // transaction sets we get @@ -156,15 +191,20 @@ public: // TMTransactions requested_transactions, + // The total p2p bytes sent and received on the wire + total, + unknown // must be last }; + TrafficCount() = default; + /** Given a protocol message, determine which traffic category it belongs to */ static category categorize( ::google::protobuf::Message const& message, - int type, + protocol::MessageType type, bool inbound); /** Account for traffic associated with the given category */ @@ -175,20 +215,24 @@ public: cat <= category::unknown, "ripple::TrafficCount::addCount : valid category input"); + auto it = counts_.find(cat); + + // nothing to do, the category does not exist + if (it == counts_.end()) + return; + if (inbound) { - counts_[cat].bytesIn += bytes; - ++counts_[cat].messagesIn; + it->second.bytesIn += bytes; + ++it->second.messagesIn; } else { - counts_[cat].bytesOut += bytes; - ++counts_[cat].messagesOut; + it->second.bytesOut += bytes; + ++it->second.messagesOut; } } - TrafficCount() = default; - /** An up-to-date copy of all the counters @return an object which satisfies the requirements of Container @@ -199,57 +243,131 @@ public: return counts_; } + static std::string + to_string(category cat) + { + static const std::unordered_map category_map = { + {base, "overhead"}, + {cluster, "overhead_cluster"}, + {overlay, "overhead_overlay"}, + {manifests, "overhead_manifest"}, + {transaction, "transactions"}, + {transaction_duplicate, "transactions_duplicate"}, + {proposal, "proposals"}, + {proposal_untrusted, "proposals_untrusted"}, + {proposal_duplicate, "proposals_duplicate"}, + {validation, "validations"}, + {validation_untrusted, "validations_untrusted"}, + {validation_duplicate, "validations_duplicate"}, + {validatorlist, "validator_lists"}, + {squelch, "squelch"}, + {squelch_suppressed, "squelch_suppressed"}, + {get_set, "set_get"}, + {share_set, "set_share"}, + {ld_tsc_get, "ledger_data_Transaction_Set_candidate_get"}, + {ld_tsc_share, "ledger_data_Transaction_Set_candidate_share"}, + {ld_txn_get, "ledger_data_Transaction_Node_get"}, + {ld_txn_share, "ledger_data_Transaction_Node_share"}, + {ld_asn_get, "ledger_data_Account_State_Node_get"}, + {ld_asn_share, "ledger_data_Account_State_Node_share"}, + {ld_get, "ledger_data_get"}, + {ld_share, "ledger_data_share"}, + {gl_tsc_share, "ledger_Transaction_Set_candidate_share"}, + {gl_tsc_get, "ledger_Transaction_Set_candidate_get"}, + {gl_txn_share, "ledger_Transaction_node_share"}, + {gl_txn_get, "ledger_Transaction_node_get"}, + {gl_asn_share, "ledger_Account_State_node_share"}, + {gl_asn_get, "ledger_Account_State_node_get"}, + {gl_share, "ledger_share"}, + {gl_get, "ledger_get"}, + {share_hash_ledger, "getobject_Ledger_share"}, + {get_hash_ledger, "getobject_Ledger_get"}, + {share_hash_tx, "getobject_Transaction_share"}, + {get_hash_tx, "getobject_Transaction_get"}, + {share_hash_txnode, "getobject_Transaction_node_share"}, + {get_hash_txnode, "getobject_Transaction_node_get"}, + {share_hash_asnode, "getobject_Account_State_node_share"}, + {get_hash_asnode, "getobject_Account_State_node_get"}, + {share_cas_object, "getobject_CAS_share"}, + {get_cas_object, "getobject_CAS_get"}, + {share_fetch_pack, "getobject_Fetch_Pack_share"}, + {get_fetch_pack, "getobject_Fetch Pack_get"}, + {get_transactions, "getobject_Transactions_get"}, + {share_hash, "getobject_share"}, + {get_hash, "getobject_get"}, + {proof_path_request, "proof_path_request"}, + {proof_path_response, "proof_path_response"}, + {replay_delta_request, "replay_delta_request"}, + {replay_delta_response, "replay_delta_response"}, + {have_transactions, "have_transactions"}, + {requested_transactions, "requested_transactions"}, + {total, "total"}}; + + if (auto it = category_map.find(cat); it != category_map.end()) + return it->second; + + return "unknown"; + } + protected: - std::array counts_{{ - {"overhead"}, // category::base - {"overhead_cluster"}, // category::cluster - {"overhead_overlay"}, // category::overlay - {"overhead_manifest"}, // category::manifests - {"transactions"}, // category::transaction - {"proposals"}, // category::proposal - {"validations"}, // category::validation - {"validator_lists"}, // category::validatorlist - {"set_get"}, // category::get_set - {"set_share"}, // category::share_set - {"ledger_data_Transaction_Set_candidate_get"}, // category::ld_tsc_get - {"ledger_data_Transaction_Set_candidate_share"}, // category::ld_tsc_share - {"ledger_data_Transaction_Node_get"}, // category::ld_txn_get - {"ledger_data_Transaction_Node_share"}, // category::ld_txn_share - {"ledger_data_Account_State_Node_get"}, // category::ld_asn_get - {"ledger_data_Account_State_Node_share"}, // category::ld_asn_share - {"ledger_data_get"}, // category::ld_get - {"ledger_data_share"}, // category::ld_share - {"ledger_Transaction_Set_candidate_share"}, // category::gl_tsc_share - {"ledger_Transaction_Set_candidate_get"}, // category::gl_tsc_get - {"ledger_Transaction_node_share"}, // category::gl_txn_share - {"ledger_Transaction_node_get"}, // category::gl_txn_get - {"ledger_Account_State_node_share"}, // category::gl_asn_share - {"ledger_Account_State_node_get"}, // category::gl_asn_get - {"ledger_share"}, // category::gl_share - {"ledger_get"}, // category::gl_get - {"getobject_Ledger_share"}, // category::share_hash_ledger - {"getobject_Ledger_get"}, // category::get_hash_ledger - {"getobject_Transaction_share"}, // category::share_hash_tx - {"getobject_Transaction_get"}, // category::get_hash_tx - {"getobject_Transaction_node_share"}, // category::share_hash_txnode - {"getobject_Transaction_node_get"}, // category::get_hash_txnode - {"getobject_Account_State_node_share"}, // category::share_hash_asnode - {"getobject_Account_State_node_get"}, // category::get_hash_asnode - {"getobject_CAS_share"}, // category::share_cas_object - {"getobject_CAS_get"}, // category::get_cas_object - {"getobject_Fetch_Pack_share"}, // category::share_fetch_pack - {"getobject_Fetch Pack_get"}, // category::get_fetch_pack - {"getobject_Transactions_get"}, // category::get_transactions - {"getobject_share"}, // category::share_hash - {"getobject_get"}, // category::get_hash - {"proof_path_request"}, // category::proof_path_request - {"proof_path_response"}, // category::proof_path_response - {"replay_delta_request"}, // category::replay_delta_request - {"replay_delta_response"}, // category::replay_delta_response - {"have_transactions"}, // category::have_transactions - {"requested_transactions"}, // category::transactions - {"unknown"} // category::unknown - }}; + std::unordered_map counts_{ + {base, {base}}, + {cluster, {cluster}}, + {overlay, {overlay}}, + {manifests, {manifests}}, + {transaction, {transaction}}, + {transaction_duplicate, {transaction_duplicate}}, + {proposal, {proposal}}, + {proposal_untrusted, {proposal_untrusted}}, + {proposal_duplicate, {proposal_duplicate}}, + {validation, {validation}}, + {validation_untrusted, {validation_untrusted}}, + {validation_duplicate, {validation_duplicate}}, + {validatorlist, {validatorlist}}, + {squelch, {squelch}}, + {squelch_suppressed, {squelch_suppressed}}, + {get_set, {get_set}}, + {share_set, {share_set}}, + {ld_tsc_get, {ld_tsc_get}}, + {ld_tsc_share, {ld_tsc_share}}, + {ld_txn_get, {ld_txn_get}}, + {ld_txn_share, {ld_txn_share}}, + {ld_asn_get, {ld_asn_get}}, + {ld_asn_share, {ld_asn_share}}, + {ld_get, {ld_get}}, + {ld_share, {ld_share}}, + {gl_tsc_share, {gl_tsc_share}}, + {gl_tsc_get, {gl_tsc_get}}, + {gl_txn_share, {gl_txn_share}}, + {gl_txn_get, {gl_txn_get}}, + {gl_asn_share, {gl_asn_share}}, + {gl_asn_get, {gl_asn_get}}, + {gl_share, {gl_share}}, + {gl_get, {gl_get}}, + {share_hash_ledger, {share_hash_ledger}}, + {get_hash_ledger, {get_hash_ledger}}, + {share_hash_tx, {share_hash_tx}}, + {get_hash_tx, {get_hash_tx}}, + {share_hash_txnode, {share_hash_txnode}}, + {get_hash_txnode, {get_hash_txnode}}, + {share_hash_asnode, {share_hash_asnode}}, + {get_hash_asnode, {get_hash_asnode}}, + {share_cas_object, {share_cas_object}}, + {get_cas_object, {get_cas_object}}, + {share_fetch_pack, {share_fetch_pack}}, + {get_fetch_pack, {get_fetch_pack}}, + {get_transactions, {get_transactions}}, + {share_hash, {share_hash}}, + {get_hash, {get_hash}}, + {proof_path_request, {proof_path_request}}, + {proof_path_response, {proof_path_response}}, + {replay_delta_request, {replay_delta_request}}, + {replay_delta_response, {replay_delta_response}}, + {have_transactions, {have_transactions}}, + {requested_transactions, {requested_transactions}}, + {total, {total}}, + {unknown, {unknown}}, + }; }; } // namespace ripple From e429455f4d5ce8bdb5f024c447c9b3c656fe302f Mon Sep 17 00:00:00 2001 From: Mayukha Vadari Date: Wed, 9 Apr 2025 13:02:03 -0400 Subject: [PATCH 007/244] refactor(trivial): reorganize ledger entry tests and helper functions (#5376) This PR splits out `ledger_entry` tests into its own file (`LedgerEntry_test.cpp`) and alphabetizes the helper functions in `LedgerEntry.cpp`. These commits were split out of #5237 to make that PR a little more manageable, since these basic trivial changes are most of the diff. There is no code change, just moving code around. --- src/test/rpc/LedgerEntry_test.cpp | 2607 ++++++++++++++++++++++++ src/test/rpc/LedgerRPC_test.cpp | 2525 +---------------------- src/xrpld/rpc/handlers/LedgerEntry.cpp | 720 +++---- 3 files changed, 2969 insertions(+), 2883 deletions(-) create mode 100644 src/test/rpc/LedgerEntry_test.cpp diff --git a/src/test/rpc/LedgerEntry_test.cpp b/src/test/rpc/LedgerEntry_test.cpp new file mode 100644 index 0000000000..32332adb20 --- /dev/null +++ b/src/test/rpc/LedgerEntry_test.cpp @@ -0,0 +1,2607 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2012-2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace ripple { + +namespace test { + +class LedgerEntry_test : public beast::unit_test::suite +{ + void + checkErrorValue( + Json::Value const& jv, + std::string const& err, + std::string const& msg) + { + if (BEAST_EXPECT(jv.isMember(jss::status))) + BEAST_EXPECT(jv[jss::status] == "error"); + if (BEAST_EXPECT(jv.isMember(jss::error))) + BEAST_EXPECT(jv[jss::error] == err); + if (msg.empty()) + { + BEAST_EXPECT( + jv[jss::error_message] == Json::nullValue || + jv[jss::error_message] == ""); + } + else if (BEAST_EXPECT(jv.isMember(jss::error_message))) + BEAST_EXPECT(jv[jss::error_message] == msg); + } + + // Corrupt a valid address by replacing the 10th character with '!'. + // '!' is not part of the ripple alphabet. + std::string + makeBadAddress(std::string good) + { + std::string ret = std::move(good); + ret.replace(10, 1, 1, '!'); + return ret; + } + + void + testLedgerEntryInvalid() + { + testcase("Invalid requests"); + using namespace test::jtx; + Env env{*this}; + Account const alice{"alice"}; + env.fund(XRP(10000), alice); + env.close(); + + { + // Missing ledger_entry ledger_hash + Json::Value jvParams; + jvParams[jss::account_root] = alice.human(); + jvParams[jss::ledger_hash] = + "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + "AA"; + auto const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "lgrNotFound", "ledgerNotFound"); + } + + { + // ask for an zero index + Json::Value jvParams; + jvParams[jss::ledger_index] = "validated"; + jvParams[jss::index] = + "00000000000000000000000000000000000000000000000000000000000000" + "0000"; + auto const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + } + + void + testLedgerEntryAccountRoot() + { + testcase("ledger_entry Request AccountRoot"); + using namespace test::jtx; + + auto cfg = envconfig(); + cfg->FEES.reference_fee = 10; + Env env{*this, std::move(cfg)}; + + Account const alice{"alice"}; + env.fund(XRP(10000), alice); + env.close(); + + std::string const ledgerHash{to_string(env.closed()->info().hash)}; + { + // Exercise ledger_closed along the way. + Json::Value const jrr = env.rpc("ledger_closed")[jss::result]; + BEAST_EXPECT(jrr[jss::ledger_hash] == ledgerHash); + BEAST_EXPECT(jrr[jss::ledger_index] == 3); + } + + std::string accountRootIndex; + { + // Request alice's account root. + Json::Value jvParams; + jvParams[jss::account_root] = alice.human(); + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + BEAST_EXPECT(jrr.isMember(jss::node)); + BEAST_EXPECT(jrr[jss::node][jss::Account] == alice.human()); + BEAST_EXPECT(jrr[jss::node][sfBalance.jsonName] == "10000000000"); + accountRootIndex = jrr[jss::index].asString(); + } + { + constexpr char alicesAcctRootBinary[]{ + "1100612200800000240000000425000000032D00000000559CE54C3B934E4" + "73A995B477E92EC229F99CED5B62BF4D2ACE4DC42719103AE2F6240000002" + "540BE4008114AE123A8556F3CF91154711376AFB0F894F832B3D"}; + + // Request alice's account root, but with binary == true; + Json::Value jvParams; + jvParams[jss::account_root] = alice.human(); + jvParams[jss::binary] = 1; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + BEAST_EXPECT(jrr.isMember(jss::node_binary)); + BEAST_EXPECT(jrr[jss::node_binary] == alicesAcctRootBinary); + } + { + // Request alice's account root using the index. + Json::Value jvParams; + jvParams[jss::index] = accountRootIndex; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + BEAST_EXPECT(!jrr.isMember(jss::node_binary)); + BEAST_EXPECT(jrr.isMember(jss::node)); + BEAST_EXPECT(jrr[jss::node][jss::Account] == alice.human()); + BEAST_EXPECT(jrr[jss::node][sfBalance.jsonName] == "10000000000"); + } + { + // Request alice's account root by index, but with binary == false. + Json::Value jvParams; + jvParams[jss::index] = accountRootIndex; + jvParams[jss::binary] = 0; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + BEAST_EXPECT(jrr.isMember(jss::node)); + BEAST_EXPECT(jrr[jss::node][jss::Account] == alice.human()); + BEAST_EXPECT(jrr[jss::node][sfBalance.jsonName] == "10000000000"); + } + { + // Request using a corrupted AccountID. + Json::Value jvParams; + jvParams[jss::account_root] = makeBadAddress(alice.human()); + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedAddress", ""); + } + { + // Request an account that is not in the ledger. + Json::Value jvParams; + jvParams[jss::account_root] = Account("bob").human(); + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "entryNotFound", ""); + } + } + + void + testLedgerEntryCheck() + { + testcase("ledger_entry Request Check"); + using namespace test::jtx; + Env env{*this}; + Account const alice{"alice"}; + env.fund(XRP(10000), alice); + env.close(); + + auto const checkId = keylet::check(env.master, env.seq(env.master)); + + env(check::create(env.master, alice, XRP(100))); + env.close(); + + std::string const ledgerHash{to_string(env.closed()->info().hash)}; + { + // Request a check. + Json::Value jvParams; + jvParams[jss::check] = to_string(checkId.key); + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + BEAST_EXPECT( + jrr[jss::node][sfLedgerEntryType.jsonName] == jss::Check); + BEAST_EXPECT(jrr[jss::node][sfSendMax.jsonName] == "100000000"); + } + { + // Request an index that is not a check. We'll use alice's + // account root index. + std::string accountRootIndex; + { + Json::Value jvParams; + jvParams[jss::account_root] = alice.human(); + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + accountRootIndex = jrr[jss::index].asString(); + } + Json::Value jvParams; + jvParams[jss::check] = accountRootIndex; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "unexpectedLedgerType", ""); + } + } + + void + testLedgerEntryCredentials() + { + testcase("ledger_entry credentials"); + + using namespace test::jtx; + + Env env(*this); + Account const issuer{"issuer"}; + Account const alice{"alice"}; + Account const bob{"bob"}; + const char credType[] = "abcde"; + + env.fund(XRP(5000), issuer, alice, bob); + env.close(); + + // Setup credentials with DepositAuth object for Alice and Bob + env(credentials::create(alice, issuer, credType)); + env.close(); + + { + // Succeed + auto jv = credentials::ledgerEntry(env, alice, issuer, credType); + BEAST_EXPECT( + jv.isObject() && jv.isMember(jss::result) && + !jv[jss::result].isMember(jss::error) && + jv[jss::result].isMember(jss::node) && + jv[jss::result][jss::node].isMember( + sfLedgerEntryType.jsonName) && + jv[jss::result][jss::node][sfLedgerEntryType.jsonName] == + jss::Credential); + + std::string const credIdx = jv[jss::result][jss::index].asString(); + + jv = credentials::ledgerEntry(env, credIdx); + BEAST_EXPECT( + jv.isObject() && jv.isMember(jss::result) && + !jv[jss::result].isMember(jss::error) && + jv[jss::result].isMember(jss::node) && + jv[jss::result][jss::node].isMember( + sfLedgerEntryType.jsonName) && + jv[jss::result][jss::node][sfLedgerEntryType.jsonName] == + jss::Credential); + } + + { + // Fail, index not a hash + auto const jv = credentials::ledgerEntry(env, ""); + checkErrorValue(jv[jss::result], "malformedRequest", ""); + } + + { + // Fail, credential doesn't exist + auto const jv = credentials::ledgerEntry( + env, + "48004829F915654A81B11C4AB8218D96FED67F209B58328A72314FB6EA288B" + "E4"); + checkErrorValue(jv[jss::result], "entryNotFound", ""); + } + + { + // Fail, invalid subject + Json::Value jv; + jv[jss::ledger_index] = jss::validated; + jv[jss::credential][jss::subject] = 42; + jv[jss::credential][jss::issuer] = issuer.human(); + jv[jss::credential][jss::credential_type] = + strHex(std::string_view(credType)); + auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); + checkErrorValue(jrr[jss::result], "malformedRequest", ""); + } + + { + // Fail, invalid issuer + Json::Value jv; + jv[jss::ledger_index] = jss::validated; + jv[jss::credential][jss::subject] = alice.human(); + jv[jss::credential][jss::issuer] = 42; + jv[jss::credential][jss::credential_type] = + strHex(std::string_view(credType)); + auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); + checkErrorValue(jrr[jss::result], "malformedRequest", ""); + } + + { + // Fail, invalid credentials type + Json::Value jv; + jv[jss::ledger_index] = jss::validated; + jv[jss::credential][jss::subject] = alice.human(); + jv[jss::credential][jss::issuer] = issuer.human(); + jv[jss::credential][jss::credential_type] = 42; + auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); + checkErrorValue(jrr[jss::result], "malformedRequest", ""); + } + + { + // Fail, empty subject + Json::Value jv; + jv[jss::ledger_index] = jss::validated; + jv[jss::credential][jss::subject] = ""; + jv[jss::credential][jss::issuer] = issuer.human(); + jv[jss::credential][jss::credential_type] = + strHex(std::string_view(credType)); + auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); + checkErrorValue(jrr[jss::result], "malformedRequest", ""); + } + + { + // Fail, empty issuer + Json::Value jv; + jv[jss::ledger_index] = jss::validated; + jv[jss::credential][jss::subject] = alice.human(); + jv[jss::credential][jss::issuer] = ""; + jv[jss::credential][jss::credential_type] = + strHex(std::string_view(credType)); + auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); + checkErrorValue(jrr[jss::result], "malformedRequest", ""); + } + + { + // Fail, empty credentials type + Json::Value jv; + jv[jss::ledger_index] = jss::validated; + jv[jss::credential][jss::subject] = alice.human(); + jv[jss::credential][jss::issuer] = issuer.human(); + jv[jss::credential][jss::credential_type] = ""; + auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); + checkErrorValue(jrr[jss::result], "malformedRequest", ""); + } + + { + // Fail, no subject + Json::Value jv; + jv[jss::ledger_index] = jss::validated; + jv[jss::credential][jss::issuer] = issuer.human(); + jv[jss::credential][jss::credential_type] = + strHex(std::string_view(credType)); + auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); + checkErrorValue(jrr[jss::result], "malformedRequest", ""); + } + + { + // Fail, no issuer + Json::Value jv; + jv[jss::ledger_index] = jss::validated; + jv[jss::credential][jss::subject] = alice.human(); + jv[jss::credential][jss::credential_type] = + strHex(std::string_view(credType)); + auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); + checkErrorValue(jrr[jss::result], "malformedRequest", ""); + } + + { + // Fail, no credentials type + Json::Value jv; + jv[jss::ledger_index] = jss::validated; + jv[jss::credential][jss::subject] = alice.human(); + jv[jss::credential][jss::issuer] = issuer.human(); + auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); + checkErrorValue(jrr[jss::result], "malformedRequest", ""); + } + + { + // Fail, not AccountID subject + Json::Value jv; + jv[jss::ledger_index] = jss::validated; + jv[jss::credential][jss::subject] = "wehsdbvasbdfvj"; + jv[jss::credential][jss::issuer] = issuer.human(); + jv[jss::credential][jss::credential_type] = + strHex(std::string_view(credType)); + auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); + checkErrorValue(jrr[jss::result], "malformedRequest", ""); + } + + { + // Fail, not AccountID issuer + Json::Value jv; + jv[jss::ledger_index] = jss::validated; + jv[jss::credential][jss::subject] = alice.human(); + jv[jss::credential][jss::issuer] = "c4p93ugndfbsiu"; + jv[jss::credential][jss::credential_type] = + strHex(std::string_view(credType)); + auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); + checkErrorValue(jrr[jss::result], "malformedRequest", ""); + } + + { + // Fail, credentials type isn't hex encoded + Json::Value jv; + jv[jss::ledger_index] = jss::validated; + jv[jss::credential][jss::subject] = alice.human(); + jv[jss::credential][jss::issuer] = issuer.human(); + jv[jss::credential][jss::credential_type] = "12KK"; + auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); + checkErrorValue(jrr[jss::result], "malformedRequest", ""); + } + } + + void + testLedgerEntryDepositPreauth() + { + testcase("ledger_entry Deposit Preauth"); + + using namespace test::jtx; + + Env env{*this}; + Account const alice{"alice"}; + Account const becky{"becky"}; + + env.fund(XRP(10000), alice, becky); + env.close(); + + env(deposit::auth(alice, becky)); + env.close(); + + std::string const ledgerHash{to_string(env.closed()->info().hash)}; + std::string depositPreauthIndex; + { + // Request a depositPreauth by owner and authorized. + Json::Value jvParams; + jvParams[jss::deposit_preauth][jss::owner] = alice.human(); + jvParams[jss::deposit_preauth][jss::authorized] = becky.human(); + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + + BEAST_EXPECT( + jrr[jss::node][sfLedgerEntryType.jsonName] == + jss::DepositPreauth); + BEAST_EXPECT(jrr[jss::node][sfAccount.jsonName] == alice.human()); + BEAST_EXPECT(jrr[jss::node][sfAuthorize.jsonName] == becky.human()); + depositPreauthIndex = jrr[jss::node][jss::index].asString(); + } + { + // Request a depositPreauth by index. + Json::Value jvParams; + jvParams[jss::deposit_preauth] = depositPreauthIndex; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + + BEAST_EXPECT( + jrr[jss::node][sfLedgerEntryType.jsonName] == + jss::DepositPreauth); + BEAST_EXPECT(jrr[jss::node][sfAccount.jsonName] == alice.human()); + BEAST_EXPECT(jrr[jss::node][sfAuthorize.jsonName] == becky.human()); + } + { + // Malformed request: deposit_preauth neither object nor string. + Json::Value jvParams; + jvParams[jss::deposit_preauth] = -5; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + { + // Malformed request: deposit_preauth not hex string. + Json::Value jvParams; + jvParams[jss::deposit_preauth] = "0123456789ABCDEFG"; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + { + // Malformed request: missing [jss::deposit_preauth][jss::owner] + Json::Value jvParams; + jvParams[jss::deposit_preauth][jss::authorized] = becky.human(); + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + { + // Malformed request: [jss::deposit_preauth][jss::owner] not string. + Json::Value jvParams; + jvParams[jss::deposit_preauth][jss::owner] = 7; + jvParams[jss::deposit_preauth][jss::authorized] = becky.human(); + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + { + // Malformed: missing [jss::deposit_preauth][jss::authorized] + Json::Value jvParams; + jvParams[jss::deposit_preauth][jss::owner] = alice.human(); + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + { + // Malformed: [jss::deposit_preauth][jss::authorized] not string. + Json::Value jvParams; + jvParams[jss::deposit_preauth][jss::owner] = alice.human(); + jvParams[jss::deposit_preauth][jss::authorized] = 47; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + { + // Malformed: [jss::deposit_preauth][jss::owner] is malformed. + Json::Value jvParams; + jvParams[jss::deposit_preauth][jss::owner] = + "rP6P9ypfAmc!pw8SZHNwM4nvZHFXDraQas"; + + jvParams[jss::deposit_preauth][jss::authorized] = becky.human(); + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedOwner", ""); + } + { + // Malformed: [jss::deposit_preauth][jss::authorized] is malformed. + Json::Value jvParams; + jvParams[jss::deposit_preauth][jss::owner] = alice.human(); + jvParams[jss::deposit_preauth][jss::authorized] = + "rP6P9ypfAmc!pw8SZHNwM4nvZHFXDraQas"; + + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedAuthorized", ""); + } + } + + void + testLedgerEntryDepositPreauthCred() + { + testcase("ledger_entry Deposit Preauth with credentials"); + + using namespace test::jtx; + + Env env(*this); + Account const issuer{"issuer"}; + Account const alice{"alice"}; + Account const bob{"bob"}; + const char credType[] = "abcde"; + + env.fund(XRP(5000), issuer, alice, bob); + env.close(); + + { + // Setup Bob with DepositAuth + env(fset(bob, asfDepositAuth)); + env.close(); + env(deposit::authCredentials(bob, {{issuer, credType}})); + env.close(); + } + + { + // Succeed + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::deposit_preauth][jss::owner] = bob.human(); + + jvParams[jss::deposit_preauth][jss::authorized_credentials] = + Json::arrayValue; + auto& arr( + jvParams[jss::deposit_preauth][jss::authorized_credentials]); + + Json::Value jo; + jo[jss::issuer] = issuer.human(); + jo[jss::credential_type] = strHex(std::string_view(credType)); + arr.append(std::move(jo)); + auto const jrr = + env.rpc("json", "ledger_entry", to_string(jvParams)); + + BEAST_EXPECT( + jrr.isObject() && jrr.isMember(jss::result) && + !jrr[jss::result].isMember(jss::error) && + jrr[jss::result].isMember(jss::node) && + jrr[jss::result][jss::node].isMember( + sfLedgerEntryType.jsonName) && + jrr[jss::result][jss::node][sfLedgerEntryType.jsonName] == + jss::DepositPreauth); + } + + { + // Failed, invalid account + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::deposit_preauth][jss::owner] = bob.human(); + + jvParams[jss::deposit_preauth][jss::authorized_credentials] = + Json::arrayValue; + auto& arr( + jvParams[jss::deposit_preauth][jss::authorized_credentials]); + + Json::Value jo; + jo[jss::issuer] = to_string(xrpAccount()); + jo[jss::credential_type] = strHex(std::string_view(credType)); + arr.append(std::move(jo)); + auto const jrr = + env.rpc("json", "ledger_entry", to_string(jvParams)); + checkErrorValue( + jrr[jss::result], "malformedAuthorizedCredentials", ""); + } + + { + // Failed, duplicates in credentials + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::deposit_preauth][jss::owner] = bob.human(); + + jvParams[jss::deposit_preauth][jss::authorized_credentials] = + Json::arrayValue; + auto& arr( + jvParams[jss::deposit_preauth][jss::authorized_credentials]); + + Json::Value jo; + jo[jss::issuer] = issuer.human(); + jo[jss::credential_type] = strHex(std::string_view(credType)); + arr.append(jo); + arr.append(std::move(jo)); + auto const jrr = + env.rpc("json", "ledger_entry", to_string(jvParams)); + checkErrorValue( + jrr[jss::result], "malformedAuthorizedCredentials", ""); + } + + { + // Failed, invalid credential_type + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::deposit_preauth][jss::owner] = bob.human(); + + jvParams[jss::deposit_preauth][jss::authorized_credentials] = + Json::arrayValue; + auto& arr( + jvParams[jss::deposit_preauth][jss::authorized_credentials]); + + Json::Value jo; + jo[jss::issuer] = issuer.human(); + jo[jss::credential_type] = ""; + arr.append(std::move(jo)); + + auto const jrr = + env.rpc("json", "ledger_entry", to_string(jvParams)); + checkErrorValue( + jrr[jss::result], "malformedAuthorizedCredentials", ""); + } + + { + // Failed, authorized and authorized_credentials both present + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::deposit_preauth][jss::owner] = bob.human(); + jvParams[jss::deposit_preauth][jss::authorized] = alice.human(); + + jvParams[jss::deposit_preauth][jss::authorized_credentials] = + Json::arrayValue; + auto& arr( + jvParams[jss::deposit_preauth][jss::authorized_credentials]); + + Json::Value jo; + jo[jss::issuer] = issuer.human(); + jo[jss::credential_type] = strHex(std::string_view(credType)); + arr.append(std::move(jo)); + + auto const jrr = + env.rpc("json", "ledger_entry", to_string(jvParams)); + checkErrorValue(jrr[jss::result], "malformedRequest", ""); + } + + { + // Failed, authorized_credentials is not an array + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::deposit_preauth][jss::owner] = bob.human(); + jvParams[jss::deposit_preauth][jss::authorized_credentials] = 42; + + auto const jrr = + env.rpc("json", "ledger_entry", to_string(jvParams)); + checkErrorValue(jrr[jss::result], "malformedRequest", ""); + } + + { + // Failed, authorized_credentials contains string data + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::deposit_preauth][jss::owner] = bob.human(); + jvParams[jss::deposit_preauth][jss::authorized_credentials] = + Json::arrayValue; + auto& arr( + jvParams[jss::deposit_preauth][jss::authorized_credentials]); + arr.append("foobar"); + + auto const jrr = + env.rpc("json", "ledger_entry", to_string(jvParams)); + checkErrorValue( + jrr[jss::result], "malformedAuthorizedCredentials", ""); + } + + { + // Failed, authorized_credentials contains arrays + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::deposit_preauth][jss::owner] = bob.human(); + jvParams[jss::deposit_preauth][jss::authorized_credentials] = + Json::arrayValue; + auto& arr( + jvParams[jss::deposit_preauth][jss::authorized_credentials]); + Json::Value payload = Json::arrayValue; + payload.append(42); + arr.append(std::move(payload)); + + auto const jrr = + env.rpc("json", "ledger_entry", to_string(jvParams)); + checkErrorValue( + jrr[jss::result], "malformedAuthorizedCredentials", ""); + } + + { + // Failed, authorized_credentials is empty array + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::deposit_preauth][jss::owner] = bob.human(); + jvParams[jss::deposit_preauth][jss::authorized_credentials] = + Json::arrayValue; + + auto const jrr = + env.rpc("json", "ledger_entry", to_string(jvParams)); + checkErrorValue( + jrr[jss::result], "malformedAuthorizedCredentials", ""); + } + + { + // Failed, authorized_credentials is too long + + static const std::string_view credTypes[] = { + "cred1", + "cred2", + "cred3", + "cred4", + "cred5", + "cred6", + "cred7", + "cred8", + "cred9"}; + static_assert( + sizeof(credTypes) / sizeof(credTypes[0]) > + maxCredentialsArraySize); + + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::deposit_preauth][jss::owner] = bob.human(); + jvParams[jss::deposit_preauth][jss::authorized_credentials] = + Json::arrayValue; + + auto& arr( + jvParams[jss::deposit_preauth][jss::authorized_credentials]); + + for (unsigned i = 0; i < sizeof(credTypes) / sizeof(credTypes[0]); + ++i) + { + Json::Value jo; + jo[jss::issuer] = issuer.human(); + jo[jss::credential_type] = + strHex(std::string_view(credTypes[i])); + arr.append(std::move(jo)); + } + + auto const jrr = + env.rpc("json", "ledger_entry", to_string(jvParams)); + checkErrorValue( + jrr[jss::result], "malformedAuthorizedCredentials", ""); + } + + { + // Failed, issuer is not set + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::deposit_preauth][jss::owner] = bob.human(); + + jvParams[jss::deposit_preauth][jss::authorized_credentials] = + Json::arrayValue; + auto& arr( + jvParams[jss::deposit_preauth][jss::authorized_credentials]); + + Json::Value jo; + jo[jss::credential_type] = strHex(std::string_view(credType)); + arr.append(std::move(jo)); + + auto const jrr = + env.rpc("json", "ledger_entry", to_string(jvParams)); + checkErrorValue( + jrr[jss::result], "malformedAuthorizedCredentials", ""); + } + + { + // Failed, issuer isn't string + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::deposit_preauth][jss::owner] = bob.human(); + + jvParams[jss::deposit_preauth][jss::authorized_credentials] = + Json::arrayValue; + auto& arr( + jvParams[jss::deposit_preauth][jss::authorized_credentials]); + + Json::Value jo; + jo[jss::issuer] = 42; + jo[jss::credential_type] = strHex(std::string_view(credType)); + arr.append(std::move(jo)); + + auto const jrr = + env.rpc("json", "ledger_entry", to_string(jvParams)); + checkErrorValue( + jrr[jss::result], "malformedAuthorizedCredentials", ""); + } + + { + // Failed, issuer is an array + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::deposit_preauth][jss::owner] = bob.human(); + + jvParams[jss::deposit_preauth][jss::authorized_credentials] = + Json::arrayValue; + auto& arr( + jvParams[jss::deposit_preauth][jss::authorized_credentials]); + + Json::Value jo; + Json::Value payload = Json::arrayValue; + payload.append(42); + jo[jss::issuer] = std::move(payload); + jo[jss::credential_type] = strHex(std::string_view(credType)); + arr.append(std::move(jo)); + + auto const jrr = + env.rpc("json", "ledger_entry", to_string(jvParams)); + checkErrorValue( + jrr[jss::result], "malformedAuthorizedCredentials", ""); + } + + { + // Failed, issuer isn't valid encoded account + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::deposit_preauth][jss::owner] = bob.human(); + + jvParams[jss::deposit_preauth][jss::authorized_credentials] = + Json::arrayValue; + auto& arr( + jvParams[jss::deposit_preauth][jss::authorized_credentials]); + + Json::Value jo; + jo[jss::issuer] = "invalid_account"; + jo[jss::credential_type] = strHex(std::string_view(credType)); + arr.append(std::move(jo)); + + auto const jrr = + env.rpc("json", "ledger_entry", to_string(jvParams)); + checkErrorValue( + jrr[jss::result], "malformedAuthorizedCredentials", ""); + } + + { + // Failed, credential_type is not set + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::deposit_preauth][jss::owner] = bob.human(); + + jvParams[jss::deposit_preauth][jss::authorized_credentials] = + Json::arrayValue; + auto& arr( + jvParams[jss::deposit_preauth][jss::authorized_credentials]); + + Json::Value jo; + jo[jss::issuer] = issuer.human(); + arr.append(std::move(jo)); + + auto const jrr = + env.rpc("json", "ledger_entry", to_string(jvParams)); + checkErrorValue( + jrr[jss::result], "malformedAuthorizedCredentials", ""); + } + + { + // Failed, credential_type isn't string + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::deposit_preauth][jss::owner] = bob.human(); + + jvParams[jss::deposit_preauth][jss::authorized_credentials] = + Json::arrayValue; + auto& arr( + jvParams[jss::deposit_preauth][jss::authorized_credentials]); + + Json::Value jo; + jo[jss::issuer] = issuer.human(); + jo[jss::credential_type] = 42; + arr.append(std::move(jo)); + + auto const jrr = + env.rpc("json", "ledger_entry", to_string(jvParams)); + checkErrorValue( + jrr[jss::result], "malformedAuthorizedCredentials", ""); + } + + { + // Failed, credential_type is an array + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::deposit_preauth][jss::owner] = bob.human(); + + jvParams[jss::deposit_preauth][jss::authorized_credentials] = + Json::arrayValue; + auto& arr( + jvParams[jss::deposit_preauth][jss::authorized_credentials]); + + Json::Value jo; + jo[jss::issuer] = issuer.human(); + Json::Value payload = Json::arrayValue; + payload.append(42); + jo[jss::credential_type] = std::move(payload); + arr.append(std::move(jo)); + + auto const jrr = + env.rpc("json", "ledger_entry", to_string(jvParams)); + checkErrorValue( + jrr[jss::result], "malformedAuthorizedCredentials", ""); + } + + { + // Failed, credential_type isn't hex encoded + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::deposit_preauth][jss::owner] = bob.human(); + + jvParams[jss::deposit_preauth][jss::authorized_credentials] = + Json::arrayValue; + auto& arr( + jvParams[jss::deposit_preauth][jss::authorized_credentials]); + + Json::Value jo; + jo[jss::issuer] = issuer.human(); + jo[jss::credential_type] = "12KK"; + arr.append(std::move(jo)); + + auto const jrr = + env.rpc("json", "ledger_entry", to_string(jvParams)); + checkErrorValue( + jrr[jss::result], "malformedAuthorizedCredentials", ""); + } + } + + void + testLedgerEntryDirectory() + { + testcase("ledger_entry Request Directory"); + using namespace test::jtx; + Env env{*this}; + Account const alice{"alice"}; + Account const gw{"gateway"}; + auto const USD = gw["USD"]; + env.fund(XRP(10000), alice, gw); + env.close(); + + env.trust(USD(1000), alice); + env.close(); + + // Run up the number of directory entries so alice has two + // directory nodes. + for (int d = 1'000'032; d >= 1'000'000; --d) + { + env(offer(alice, USD(1), drops(d))); + } + env.close(); + + std::string const ledgerHash{to_string(env.closed()->info().hash)}; + { + // Exercise ledger_closed along the way. + Json::Value const jrr = env.rpc("ledger_closed")[jss::result]; + BEAST_EXPECT(jrr[jss::ledger_hash] == ledgerHash); + BEAST_EXPECT(jrr[jss::ledger_index] == 5); + } + + std::string const dirRootIndex = + "A33EC6BB85FB5674074C4A3A43373BB17645308F3EAE1933E3E35252162B217D"; + { + // Locate directory by index. + Json::Value jvParams; + jvParams[jss::directory] = dirRootIndex; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + BEAST_EXPECT(jrr[jss::node][sfIndexes.jsonName].size() == 32); + } + { + // Locate directory by directory root. + Json::Value jvParams; + jvParams[jss::directory] = Json::objectValue; + jvParams[jss::directory][jss::dir_root] = dirRootIndex; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + BEAST_EXPECT(jrr[jss::index] == dirRootIndex); + } + { + // Locate directory by owner. + Json::Value jvParams; + jvParams[jss::directory] = Json::objectValue; + jvParams[jss::directory][jss::owner] = alice.human(); + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + BEAST_EXPECT(jrr[jss::index] == dirRootIndex); + } + { + // Locate directory by directory root and sub_index. + Json::Value jvParams; + jvParams[jss::directory] = Json::objectValue; + jvParams[jss::directory][jss::dir_root] = dirRootIndex; + jvParams[jss::directory][jss::sub_index] = 1; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + BEAST_EXPECT(jrr[jss::index] != dirRootIndex); + BEAST_EXPECT(jrr[jss::node][sfIndexes.jsonName].size() == 2); + } + { + // Locate directory by owner and sub_index. + Json::Value jvParams; + jvParams[jss::directory] = Json::objectValue; + jvParams[jss::directory][jss::owner] = alice.human(); + jvParams[jss::directory][jss::sub_index] = 1; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + BEAST_EXPECT(jrr[jss::index] != dirRootIndex); + BEAST_EXPECT(jrr[jss::node][sfIndexes.jsonName].size() == 2); + } + { + // Null directory argument. + Json::Value jvParams; + jvParams[jss::directory] = Json::nullValue; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + { + // Non-integer sub_index. + Json::Value jvParams; + jvParams[jss::directory] = Json::objectValue; + jvParams[jss::directory][jss::dir_root] = dirRootIndex; + jvParams[jss::directory][jss::sub_index] = 1.5; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + { + // Malformed owner entry. + Json::Value jvParams; + jvParams[jss::directory] = Json::objectValue; + + std::string const badAddress = makeBadAddress(alice.human()); + jvParams[jss::directory][jss::owner] = badAddress; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedAddress", ""); + } + { + // Malformed directory object. Specify both dir_root and owner. + Json::Value jvParams; + jvParams[jss::directory] = Json::objectValue; + jvParams[jss::directory][jss::owner] = alice.human(); + jvParams[jss::directory][jss::dir_root] = dirRootIndex; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + { + // Incomplete directory object. Missing both dir_root and owner. + Json::Value jvParams; + jvParams[jss::directory] = Json::objectValue; + jvParams[jss::directory][jss::sub_index] = 1; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + } + + void + testLedgerEntryEscrow() + { + testcase("ledger_entry Request Escrow"); + using namespace test::jtx; + Env env{*this}; + Account const alice{"alice"}; + env.fund(XRP(10000), alice); + env.close(); + + // Lambda to create an escrow. + auto escrowCreate = [](test::jtx::Account const& account, + test::jtx::Account const& to, + STAmount const& amount, + NetClock::time_point const& cancelAfter) { + Json::Value jv; + jv[jss::TransactionType] = jss::EscrowCreate; + jv[jss::Flags] = tfUniversal; + jv[jss::Account] = account.human(); + jv[jss::Destination] = to.human(); + jv[jss::Amount] = amount.getJson(JsonOptions::none); + jv[sfFinishAfter.jsonName] = + cancelAfter.time_since_epoch().count() + 2; + return jv; + }; + + using namespace std::chrono_literals; + env(escrowCreate(alice, alice, XRP(333), env.now() + 2s)); + env.close(); + + std::string const ledgerHash{to_string(env.closed()->info().hash)}; + std::string escrowIndex; + { + // Request the escrow using owner and sequence. + Json::Value jvParams; + jvParams[jss::escrow] = Json::objectValue; + jvParams[jss::escrow][jss::owner] = alice.human(); + jvParams[jss::escrow][jss::seq] = env.seq(alice) - 1; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + BEAST_EXPECT( + jrr[jss::node][jss::Amount] == XRP(333).value().getText()); + escrowIndex = jrr[jss::index].asString(); + } + { + // Request the escrow by index. + Json::Value jvParams; + jvParams[jss::escrow] = escrowIndex; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + BEAST_EXPECT( + jrr[jss::node][jss::Amount] == XRP(333).value().getText()); + } + { + // Malformed owner entry. + Json::Value jvParams; + jvParams[jss::escrow] = Json::objectValue; + + std::string const badAddress = makeBadAddress(alice.human()); + jvParams[jss::escrow][jss::owner] = badAddress; + jvParams[jss::escrow][jss::seq] = env.seq(alice) - 1; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedOwner", ""); + } + { + // Missing owner. + Json::Value jvParams; + jvParams[jss::escrow] = Json::objectValue; + jvParams[jss::escrow][jss::seq] = env.seq(alice) - 1; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + { + // Missing sequence. + Json::Value jvParams; + jvParams[jss::escrow] = Json::objectValue; + jvParams[jss::escrow][jss::owner] = alice.human(); + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + { + // Non-integer sequence. + Json::Value jvParams; + jvParams[jss::escrow] = Json::objectValue; + jvParams[jss::escrow][jss::owner] = alice.human(); + jvParams[jss::escrow][jss::seq] = + std::to_string(env.seq(alice) - 1); + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + } + + void + testLedgerEntryOffer() + { + testcase("ledger_entry Request Offer"); + using namespace test::jtx; + Env env{*this}; + Account const alice{"alice"}; + Account const gw{"gateway"}; + auto const USD = gw["USD"]; + env.fund(XRP(10000), alice, gw); + env.close(); + + env(offer(alice, USD(321), XRP(322))); + env.close(); + + std::string const ledgerHash{to_string(env.closed()->info().hash)}; + std::string offerIndex; + { + // Request the offer using owner and sequence. + Json::Value jvParams; + jvParams[jss::offer] = Json::objectValue; + jvParams[jss::offer][jss::account] = alice.human(); + jvParams[jss::offer][jss::seq] = env.seq(alice) - 1; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + BEAST_EXPECT(jrr[jss::node][jss::TakerGets] == "322000000"); + offerIndex = jrr[jss::index].asString(); + } + { + // Request the offer using its index. + Json::Value jvParams; + jvParams[jss::offer] = offerIndex; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + BEAST_EXPECT(jrr[jss::node][jss::TakerGets] == "322000000"); + } + { + // Malformed account entry. + Json::Value jvParams; + jvParams[jss::offer] = Json::objectValue; + + std::string const badAddress = makeBadAddress(alice.human()); + jvParams[jss::offer][jss::account] = badAddress; + jvParams[jss::offer][jss::seq] = env.seq(alice) - 1; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedAddress", ""); + } + { + // Malformed offer object. Missing account member. + Json::Value jvParams; + jvParams[jss::offer] = Json::objectValue; + jvParams[jss::offer][jss::seq] = env.seq(alice) - 1; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + { + // Malformed offer object. Missing seq member. + Json::Value jvParams; + jvParams[jss::offer] = Json::objectValue; + jvParams[jss::offer][jss::account] = alice.human(); + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + { + // Malformed offer object. Non-integral seq member. + Json::Value jvParams; + jvParams[jss::offer] = Json::objectValue; + jvParams[jss::offer][jss::account] = alice.human(); + jvParams[jss::offer][jss::seq] = std::to_string(env.seq(alice) - 1); + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + } + + void + testLedgerEntryPayChan() + { + testcase("ledger_entry Request Pay Chan"); + using namespace test::jtx; + using namespace std::literals::chrono_literals; + Env env{*this}; + Account const alice{"alice"}; + + env.fund(XRP(10000), alice); + env.close(); + + // Lambda to create a PayChan. + auto payChanCreate = [](test::jtx::Account const& account, + test::jtx::Account const& to, + STAmount const& amount, + NetClock::duration const& settleDelay, + PublicKey const& pk) { + Json::Value jv; + jv[jss::TransactionType] = jss::PaymentChannelCreate; + jv[jss::Account] = account.human(); + jv[jss::Destination] = to.human(); + jv[jss::Amount] = amount.getJson(JsonOptions::none); + jv[sfSettleDelay.jsonName] = settleDelay.count(); + jv[sfPublicKey.jsonName] = strHex(pk.slice()); + return jv; + }; + + env(payChanCreate(alice, env.master, XRP(57), 18s, alice.pk())); + env.close(); + + std::string const ledgerHash{to_string(env.closed()->info().hash)}; + + uint256 const payChanIndex{ + keylet::payChan(alice, env.master, env.seq(alice) - 1).key}; + { + // Request the payment channel using its index. + Json::Value jvParams; + jvParams[jss::payment_channel] = to_string(payChanIndex); + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + BEAST_EXPECT(jrr[jss::node][sfAmount.jsonName] == "57000000"); + BEAST_EXPECT(jrr[jss::node][sfBalance.jsonName] == "0"); + BEAST_EXPECT(jrr[jss::node][sfSettleDelay.jsonName] == 18); + } + { + // Request an index that is not a payment channel. + Json::Value jvParams; + jvParams[jss::payment_channel] = ledgerHash; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "entryNotFound", ""); + } + } + + void + testLedgerEntryRippleState() + { + testcase("ledger_entry Request RippleState"); + using namespace test::jtx; + Env env{*this}; + Account const alice{"alice"}; + Account const gw{"gateway"}; + auto const USD = gw["USD"]; + env.fund(XRP(10000), alice, gw); + env.close(); + + env.trust(USD(999), alice); + env.close(); + + env(pay(gw, alice, USD(97))); + env.close(); + + // check both aliases + for (auto const& fieldName : {jss::ripple_state, jss::state}) + { + std::string const ledgerHash{to_string(env.closed()->info().hash)}; + { + // Request the trust line using the accounts and currency. + Json::Value jvParams; + jvParams[fieldName] = Json::objectValue; + jvParams[fieldName][jss::accounts] = Json::arrayValue; + jvParams[fieldName][jss::accounts][0u] = alice.human(); + jvParams[fieldName][jss::accounts][1u] = gw.human(); + jvParams[fieldName][jss::currency] = "USD"; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + BEAST_EXPECT( + jrr[jss::node][sfBalance.jsonName][jss::value] == "-97"); + BEAST_EXPECT( + jrr[jss::node][sfHighLimit.jsonName][jss::value] == "999"); + } + { + // ripple_state is not an object. + Json::Value jvParams; + jvParams[fieldName] = "ripple_state"; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + { + // ripple_state.currency is missing. + Json::Value jvParams; + jvParams[fieldName] = Json::objectValue; + jvParams[fieldName][jss::accounts] = Json::arrayValue; + jvParams[fieldName][jss::accounts][0u] = alice.human(); + jvParams[fieldName][jss::accounts][1u] = gw.human(); + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + { + // ripple_state accounts is not an array. + Json::Value jvParams; + jvParams[fieldName] = Json::objectValue; + jvParams[fieldName][jss::accounts] = 2; + jvParams[fieldName][jss::currency] = "USD"; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + { + // ripple_state one of the accounts is missing. + Json::Value jvParams; + jvParams[fieldName] = Json::objectValue; + jvParams[fieldName][jss::accounts] = Json::arrayValue; + jvParams[fieldName][jss::accounts][0u] = alice.human(); + jvParams[fieldName][jss::currency] = "USD"; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + { + // ripple_state more than 2 accounts. + Json::Value jvParams; + jvParams[fieldName] = Json::objectValue; + jvParams[fieldName][jss::accounts] = Json::arrayValue; + jvParams[fieldName][jss::accounts][0u] = alice.human(); + jvParams[fieldName][jss::accounts][1u] = gw.human(); + jvParams[fieldName][jss::accounts][2u] = alice.human(); + jvParams[fieldName][jss::currency] = "USD"; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + { + // ripple_state account[0] is not a string. + Json::Value jvParams; + jvParams[fieldName] = Json::objectValue; + jvParams[fieldName][jss::accounts] = Json::arrayValue; + jvParams[fieldName][jss::accounts][0u] = 44; + jvParams[fieldName][jss::accounts][1u] = gw.human(); + jvParams[fieldName][jss::currency] = "USD"; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + { + // ripple_state account[1] is not a string. + Json::Value jvParams; + jvParams[fieldName] = Json::objectValue; + jvParams[fieldName][jss::accounts] = Json::arrayValue; + jvParams[fieldName][jss::accounts][0u] = alice.human(); + jvParams[fieldName][jss::accounts][1u] = 21; + jvParams[fieldName][jss::currency] = "USD"; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + { + // ripple_state account[0] == account[1]. + Json::Value jvParams; + jvParams[fieldName] = Json::objectValue; + jvParams[fieldName][jss::accounts] = Json::arrayValue; + jvParams[fieldName][jss::accounts][0u] = alice.human(); + jvParams[fieldName][jss::accounts][1u] = alice.human(); + jvParams[fieldName][jss::currency] = "USD"; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + { + // ripple_state malformed account[0]. + Json::Value jvParams; + jvParams[fieldName] = Json::objectValue; + jvParams[fieldName][jss::accounts] = Json::arrayValue; + jvParams[fieldName][jss::accounts][0u] = + makeBadAddress(alice.human()); + jvParams[fieldName][jss::accounts][1u] = gw.human(); + jvParams[fieldName][jss::currency] = "USD"; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedAddress", ""); + } + { + // ripple_state malformed account[1]. + Json::Value jvParams; + jvParams[fieldName] = Json::objectValue; + jvParams[fieldName][jss::accounts] = Json::arrayValue; + jvParams[fieldName][jss::accounts][0u] = alice.human(); + jvParams[fieldName][jss::accounts][1u] = + makeBadAddress(gw.human()); + jvParams[fieldName][jss::currency] = "USD"; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedAddress", ""); + } + { + // ripple_state malformed currency. + Json::Value jvParams; + jvParams[fieldName] = Json::objectValue; + jvParams[fieldName][jss::accounts] = Json::arrayValue; + jvParams[fieldName][jss::accounts][0u] = alice.human(); + jvParams[fieldName][jss::accounts][1u] = gw.human(); + jvParams[fieldName][jss::currency] = "USDollars"; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedCurrency", ""); + } + } + } + + void + testLedgerEntryTicket() + { + testcase("ledger_entry Request Ticket"); + using namespace test::jtx; + Env env{*this}; + env.close(); + + // Create two tickets. + std::uint32_t const tkt1{env.seq(env.master) + 1}; + env(ticket::create(env.master, 2)); + env.close(); + + std::string const ledgerHash{to_string(env.closed()->info().hash)}; + // Request four tickets: one before the first one we created, the + // two created tickets, and the ticket that would come after the + // last created ticket. + { + // Not a valid ticket requested by index. + Json::Value jvParams; + jvParams[jss::ticket] = + to_string(getTicketIndex(env.master, tkt1 - 1)); + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "entryNotFound", ""); + } + { + // First real ticket requested by index. + Json::Value jvParams; + jvParams[jss::ticket] = to_string(getTicketIndex(env.master, tkt1)); + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + BEAST_EXPECT( + jrr[jss::node][sfLedgerEntryType.jsonName] == jss::Ticket); + BEAST_EXPECT(jrr[jss::node][sfTicketSequence.jsonName] == tkt1); + } + { + // Second real ticket requested by account and sequence. + Json::Value jvParams; + jvParams[jss::ticket] = Json::objectValue; + jvParams[jss::ticket][jss::account] = env.master.human(); + jvParams[jss::ticket][jss::ticket_seq] = tkt1 + 1; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + BEAST_EXPECT( + jrr[jss::node][jss::index] == + to_string(getTicketIndex(env.master, tkt1 + 1))); + } + { + // Not a valid ticket requested by account and sequence. + Json::Value jvParams; + jvParams[jss::ticket] = Json::objectValue; + jvParams[jss::ticket][jss::account] = env.master.human(); + jvParams[jss::ticket][jss::ticket_seq] = tkt1 + 2; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "entryNotFound", ""); + } + { + // Request a ticket using an account root entry. + Json::Value jvParams; + jvParams[jss::ticket] = to_string(keylet::account(env.master).key); + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "unexpectedLedgerType", ""); + } + { + // Malformed account entry. + Json::Value jvParams; + jvParams[jss::ticket] = Json::objectValue; + + std::string const badAddress = makeBadAddress(env.master.human()); + jvParams[jss::ticket][jss::account] = badAddress; + jvParams[jss::ticket][jss::ticket_seq] = env.seq(env.master) - 1; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedAddress", ""); + } + { + // Malformed ticket object. Missing account member. + Json::Value jvParams; + jvParams[jss::ticket] = Json::objectValue; + jvParams[jss::ticket][jss::ticket_seq] = env.seq(env.master) - 1; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + { + // Malformed ticket object. Missing seq member. + Json::Value jvParams; + jvParams[jss::ticket] = Json::objectValue; + jvParams[jss::ticket][jss::account] = env.master.human(); + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + { + // Malformed ticket object. Non-integral seq member. + Json::Value jvParams; + jvParams[jss::ticket] = Json::objectValue; + jvParams[jss::ticket][jss::account] = env.master.human(); + jvParams[jss::ticket][jss::ticket_seq] = + std::to_string(env.seq(env.master) - 1); + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + } + + void + testLedgerEntryDID() + { + testcase("ledger_entry Request DID"); + using namespace test::jtx; + using namespace std::literals::chrono_literals; + Env env{*this}; + Account const alice{"alice"}; + + env.fund(XRP(10000), alice); + env.close(); + + // Lambda to create a DID. + auto didCreate = [](test::jtx::Account const& account) { + Json::Value jv; + jv[jss::TransactionType] = jss::DIDSet; + jv[jss::Account] = account.human(); + jv[sfDIDDocument.jsonName] = strHex(std::string{"data"}); + jv[sfURI.jsonName] = strHex(std::string{"uri"}); + return jv; + }; + + env(didCreate(alice)); + env.close(); + + std::string const ledgerHash{to_string(env.closed()->info().hash)}; + + { + // Request the DID using its index. + Json::Value jvParams; + jvParams[jss::did] = alice.human(); + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + BEAST_EXPECT( + jrr[jss::node][sfDIDDocument.jsonName] == + strHex(std::string{"data"})); + BEAST_EXPECT( + jrr[jss::node][sfURI.jsonName] == strHex(std::string{"uri"})); + } + { + // Request an index that is not a DID. + Json::Value jvParams; + jvParams[jss::did] = env.master.human(); + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "entryNotFound", ""); + } + } + + void + testLedgerEntryInvalidParams(unsigned int apiVersion) + { + testcase( + "ledger_entry Request With Invalid Parameters v" + + std::to_string(apiVersion)); + using namespace test::jtx; + Env env{*this}; + + std::string const ledgerHash{to_string(env.closed()->info().hash)}; + + auto makeParams = [&apiVersion](std::function f) { + Json::Value params; + params[jss::api_version] = apiVersion; + f(params); + return params; + }; + // "features" is not an option supported by ledger_entry. + { + auto const jvParams = + makeParams([&ledgerHash](Json::Value& jvParams) { + jvParams[jss::features] = ledgerHash; + jvParams[jss::ledger_hash] = ledgerHash; + }); + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + + if (apiVersion < 2u) + checkErrorValue(jrr, "unknownOption", ""); + else + checkErrorValue(jrr, "invalidParams", ""); + } + Json::Value const injectObject = []() { + Json::Value obj(Json::objectValue); + obj[jss::account] = "rhigTLJJyXXSRUyRCQtqi1NoAZZzZnS4KU"; + obj[jss::ledger_index] = "validated"; + return obj; + }(); + Json::Value const injectArray = []() { + Json::Value arr(Json::arrayValue); + arr[0u] = "rhigTLJJyXXSRUyRCQtqi1NoAZZzZnS4KU"; + arr[1u] = "validated"; + return arr; + }(); + + // invalid input for fields that can handle an object, but can't handle + // an array + for (auto const& field : + {jss::directory, jss::escrow, jss::offer, jss::ticket, jss::amm}) + { + auto const jvParams = + makeParams([&field, &injectArray](Json::Value& jvParams) { + jvParams[field] = injectArray; + }); + + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + + if (apiVersion < 2u) + checkErrorValue(jrr, "internal", "Internal error."); + else + checkErrorValue(jrr, "invalidParams", ""); + } + // Fields that can handle objects just fine + for (auto const& field : + {jss::directory, jss::escrow, jss::offer, jss::ticket, jss::amm}) + { + auto const jvParams = + makeParams([&field, &injectObject](Json::Value& jvParams) { + jvParams[field] = injectObject; + }); + + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + + checkErrorValue(jrr, "malformedRequest", ""); + } + + for (auto const& inject : {injectObject, injectArray}) + { + // invalid input for fields that can't handle an object or an array + for (auto const& field : + {jss::index, + jss::account_root, + jss::check, + jss::payment_channel}) + { + auto const jvParams = + makeParams([&field, &inject](Json::Value& jvParams) { + jvParams[field] = inject; + }); + + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + + if (apiVersion < 2u) + checkErrorValue(jrr, "internal", "Internal error."); + else + checkErrorValue(jrr, "invalidParams", ""); + } + // directory sub-fields + for (auto const& field : {jss::dir_root, jss::owner}) + { + auto const jvParams = + makeParams([&field, &inject](Json::Value& jvParams) { + jvParams[jss::directory][field] = inject; + }); + + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + + if (apiVersion < 2u) + checkErrorValue(jrr, "internal", "Internal error."); + else + checkErrorValue(jrr, "invalidParams", ""); + } + // escrow sub-fields + { + auto const jvParams = + makeParams([&inject](Json::Value& jvParams) { + jvParams[jss::escrow][jss::owner] = inject; + jvParams[jss::escrow][jss::seq] = 99; + }); + + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + + if (apiVersion < 2u) + checkErrorValue(jrr, "internal", "Internal error."); + else + checkErrorValue(jrr, "invalidParams", ""); + } + // offer sub-fields + { + auto const jvParams = + makeParams([&inject](Json::Value& jvParams) { + jvParams[jss::offer][jss::account] = inject; + jvParams[jss::offer][jss::seq] = 99; + }); + + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + + if (apiVersion < 2u) + checkErrorValue(jrr, "internal", "Internal error."); + else + checkErrorValue(jrr, "invalidParams", ""); + } + // ripple_state sub-fields + { + auto const jvParams = + makeParams([&inject](Json::Value& jvParams) { + Json::Value rs(Json::objectValue); + rs[jss::currency] = "FOO"; + rs[jss::accounts] = Json::Value(Json::arrayValue); + rs[jss::accounts][0u] = + "rhigTLJJyXXSRUyRCQtqi1NoAZZzZnS4KU"; + rs[jss::accounts][1u] = + "rKssEq6pg1KbqEqAFnua5mFAL6Ggpsh2wv"; + rs[jss::currency] = inject; + jvParams[jss::ripple_state] = std::move(rs); + }); + + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + + if (apiVersion < 2u) + checkErrorValue(jrr, "internal", "Internal error."); + else + checkErrorValue(jrr, "invalidParams", ""); + } + // ticket sub-fields + { + auto const jvParams = + makeParams([&inject](Json::Value& jvParams) { + jvParams[jss::ticket][jss::account] = inject; + jvParams[jss::ticket][jss::ticket_seq] = 99; + }); + + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + + if (apiVersion < 2u) + checkErrorValue(jrr, "internal", "Internal error."); + else + checkErrorValue(jrr, "invalidParams", ""); + } + + // Fields that can handle malformed inputs just fine + for (auto const& field : {jss::nft_page, jss::deposit_preauth}) + { + auto const jvParams = + makeParams([&field, &inject](Json::Value& jvParams) { + jvParams[field] = inject; + }); + + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + + checkErrorValue(jrr, "malformedRequest", ""); + } + // Subfields of deposit_preauth that can handle malformed inputs + // fine + for (auto const& field : {jss::owner, jss::authorized}) + { + auto const jvParams = + makeParams([&field, &inject](Json::Value& jvParams) { + auto pa = Json::Value(Json::objectValue); + pa[jss::owner] = "rhigTLJJyXXSRUyRCQtqi1NoAZZzZnS4KU"; + pa[jss::authorized] = + "rKssEq6pg1KbqEqAFnua5mFAL6Ggpsh2wv"; + pa[field] = inject; + jvParams[jss::deposit_preauth] = std::move(pa); + }); + + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + + checkErrorValue(jrr, "malformedRequest", ""); + } + } + } + + void + testInvalidOracleLedgerEntry() + { + testcase("Invalid Oracle Ledger Entry"); + using namespace ripple::test::jtx; + using namespace ripple::test::jtx::oracle; + + Env env(*this); + Account const owner("owner"); + env.fund(XRP(1'000), owner); + Oracle oracle( + env, + {.owner = owner, + .fee = static_cast(env.current()->fees().base.drops())}); + + // Malformed document id + auto res = Oracle::ledgerEntry(env, owner, NoneTag); + BEAST_EXPECT(res[jss::error].asString() == "invalidParams"); + std::vector invalid = {-1, 1.2, "", "Invalid"}; + for (auto const& v : invalid) + { + auto const res = Oracle::ledgerEntry(env, owner, v); + BEAST_EXPECT(res[jss::error].asString() == "malformedDocumentID"); + } + // Missing document id + res = Oracle::ledgerEntry(env, owner, std::nullopt); + BEAST_EXPECT(res[jss::error].asString() == "malformedRequest"); + + // Missing account + res = Oracle::ledgerEntry(env, std::nullopt, 1); + BEAST_EXPECT(res[jss::error].asString() == "malformedRequest"); + + // Malformed account + std::string malfAccount = to_string(owner.id()); + malfAccount.replace(10, 1, 1, '!'); + res = Oracle::ledgerEntry(env, malfAccount, 1); + BEAST_EXPECT(res[jss::error].asString() == "malformedAddress"); + } + + void + testOracleLedgerEntry() + { + testcase("Oracle Ledger Entry"); + using namespace ripple::test::jtx; + using namespace ripple::test::jtx::oracle; + + Env env(*this); + auto const baseFee = + static_cast(env.current()->fees().base.drops()); + std::vector accounts; + std::vector oracles; + for (int i = 0; i < 10; ++i) + { + Account const owner(std::string("owner") + std::to_string(i)); + env.fund(XRP(1'000), owner); + // different accounts can have the same asset pair + Oracle oracle( + env, {.owner = owner, .documentID = i, .fee = baseFee}); + accounts.push_back(owner.id()); + oracles.push_back(oracle.documentID()); + // same account can have different asset pair + Oracle oracle1( + env, {.owner = owner, .documentID = i + 10, .fee = baseFee}); + accounts.push_back(owner.id()); + oracles.push_back(oracle1.documentID()); + } + for (int i = 0; i < accounts.size(); ++i) + { + auto const jv = [&]() { + // document id is uint32 + if (i % 2) + return Oracle::ledgerEntry(env, accounts[i], oracles[i]); + // document id is string + return Oracle::ledgerEntry( + env, accounts[i], std::to_string(oracles[i])); + }(); + try + { + BEAST_EXPECT( + jv[jss::node][jss::Owner] == to_string(accounts[i])); + } + catch (...) + { + fail(); + } + } + } + + void + testLedgerEntryMPT() + { + testcase("ledger_entry Request MPT"); + using namespace test::jtx; + using namespace std::literals::chrono_literals; + Env env{*this}; + Account const alice{"alice"}; + Account const bob("bob"); + + MPTTester mptAlice(env, alice, {.holders = {bob}}); + mptAlice.create( + {.transferFee = 10, + .metadata = "123", + .ownerCount = 1, + .flags = tfMPTCanLock | tfMPTRequireAuth | tfMPTCanEscrow | + tfMPTCanTrade | tfMPTCanTransfer | tfMPTCanClawback}); + mptAlice.authorize({.account = bob, .holderCount = 1}); + + std::string const ledgerHash{to_string(env.closed()->info().hash)}; + + std::string const badMptID = + "00000193B9DDCAF401B5B3B26875986043F82CD0D13B4315"; + { + // Request the MPTIssuance using its MPTIssuanceID. + Json::Value jvParams; + jvParams[jss::mpt_issuance] = strHex(mptAlice.issuanceID()); + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + BEAST_EXPECT( + jrr[jss::node][sfMPTokenMetadata.jsonName] == + strHex(std::string{"123"})); + BEAST_EXPECT( + jrr[jss::node][jss::mpt_issuance_id] == + strHex(mptAlice.issuanceID())); + } + { + // Request an index that is not a MPTIssuance. + Json::Value jvParams; + jvParams[jss::mpt_issuance] = badMptID; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "entryNotFound", ""); + } + { + // Request the MPToken using its owner + mptIssuanceID. + Json::Value jvParams; + jvParams[jss::mptoken] = Json::objectValue; + jvParams[jss::mptoken][jss::account] = bob.human(); + jvParams[jss::mptoken][jss::mpt_issuance_id] = + strHex(mptAlice.issuanceID()); + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + BEAST_EXPECT( + jrr[jss::node][sfMPTokenIssuanceID.jsonName] == + strHex(mptAlice.issuanceID())); + } + { + // Request the MPToken using a bad mptIssuanceID. + Json::Value jvParams; + jvParams[jss::mptoken] = Json::objectValue; + jvParams[jss::mptoken][jss::account] = bob.human(); + jvParams[jss::mptoken][jss::mpt_issuance_id] = badMptID; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "entryNotFound", ""); + } + } + + void + testLedgerEntryPermissionedDomain() + { + testcase("ledger_entry PermissionedDomain"); + + using namespace test::jtx; + + Env env(*this, supported_amendments() | featurePermissionedDomains); + Account const issuer{"issuer"}; + Account const alice{"alice"}; + Account const bob{"bob"}; + + env.fund(XRP(5000), issuer, alice, bob); + env.close(); + + auto const seq = env.seq(alice); + env(pdomain::setTx(alice, {{alice, "first credential"}})); + env.close(); + auto const objects = pdomain::getObjects(alice, env); + if (!BEAST_EXPECT(objects.size() == 1)) + return; + + { + // Succeed + Json::Value params; + params[jss::ledger_index] = jss::validated; + params[jss::permissioned_domain][jss::account] = alice.human(); + params[jss::permissioned_domain][jss::seq] = seq; + auto jv = env.rpc("json", "ledger_entry", to_string(params)); + BEAST_EXPECT( + jv.isObject() && jv.isMember(jss::result) && + !jv[jss::result].isMember(jss::error) && + jv[jss::result].isMember(jss::node) && + jv[jss::result][jss::node].isMember( + sfLedgerEntryType.jsonName) && + jv[jss::result][jss::node][sfLedgerEntryType.jsonName] == + jss::PermissionedDomain); + + std::string const pdIdx = jv[jss::result][jss::index].asString(); + BEAST_EXPECT( + strHex(keylet::permissionedDomain(alice, seq).key) == pdIdx); + + params.clear(); + params[jss::ledger_index] = jss::validated; + params[jss::permissioned_domain] = pdIdx; + jv = env.rpc("json", "ledger_entry", to_string(params)); + BEAST_EXPECT( + jv.isObject() && jv.isMember(jss::result) && + !jv[jss::result].isMember(jss::error) && + jv[jss::result].isMember(jss::node) && + jv[jss::result][jss::node].isMember( + sfLedgerEntryType.jsonName) && + jv[jss::result][jss::node][sfLedgerEntryType.jsonName] == + jss::PermissionedDomain); + } + + { + // Fail, invalid permissioned domain index + Json::Value params; + params[jss::ledger_index] = jss::validated; + params[jss::permissioned_domain] = + "12F1F1F1F180D67377B2FAB292A31C922470326268D2B9B74CD1E582645B9A" + "DE"; + auto const jrr = env.rpc("json", "ledger_entry", to_string(params)); + checkErrorValue(jrr[jss::result], "entryNotFound", ""); + } + + { + // Fail, invalid permissioned domain index + Json::Value params; + params[jss::ledger_index] = jss::validated; + params[jss::permissioned_domain] = "NotAHexString"; + auto const jrr = env.rpc("json", "ledger_entry", to_string(params)); + checkErrorValue(jrr[jss::result], "malformedRequest", ""); + } + + { + // Fail, permissioned domain is not an object + Json::Value params; + params[jss::ledger_index] = jss::validated; + params[jss::permissioned_domain] = 10; + auto const jrr = env.rpc("json", "ledger_entry", to_string(params)); + checkErrorValue(jrr[jss::result], "malformedRequest", ""); + } + + { + // Fail, invalid account + Json::Value params; + params[jss::ledger_index] = jss::validated; + params[jss::permissioned_domain][jss::account] = 1; + params[jss::permissioned_domain][jss::seq] = seq; + auto const jrr = env.rpc("json", "ledger_entry", to_string(params)); + checkErrorValue(jrr[jss::result], "malformedAddress", ""); + } + + { + // Fail, account is an object + Json::Value params; + params[jss::ledger_index] = jss::validated; + params[jss::permissioned_domain][jss::account] = + Json::Value{Json::ValueType::objectValue}; + params[jss::permissioned_domain][jss::seq] = seq; + auto const jrr = env.rpc("json", "ledger_entry", to_string(params)); + checkErrorValue(jrr[jss::result], "malformedAddress", ""); + } + + { + // Fail, no account + Json::Value params; + params[jss::ledger_index] = jss::validated; + params[jss::permissioned_domain][jss::account] = ""; + params[jss::permissioned_domain][jss::seq] = seq; + auto const jrr = env.rpc("json", "ledger_entry", to_string(params)); + checkErrorValue(jrr[jss::result], "malformedAddress", ""); + } + + { + // Fail, invalid sequence + Json::Value params; + params[jss::ledger_index] = jss::validated; + params[jss::permissioned_domain][jss::account] = alice.human(); + params[jss::permissioned_domain][jss::seq] = "12g"; + auto const jrr = env.rpc("json", "ledger_entry", to_string(params)); + checkErrorValue(jrr[jss::result], "malformedRequest", ""); + } + } + + void + testLedgerEntryCLI() + { + testcase("ledger_entry command-line"); + using namespace test::jtx; + + Env env{*this}; + Account const alice{"alice"}; + env.fund(XRP(10000), alice); + env.close(); + + auto const checkId = keylet::check(env.master, env.seq(env.master)); + + env(check::create(env.master, alice, XRP(100))); + env.close(); + + std::string const ledgerHash{to_string(env.closed()->info().hash)}; + { + // Request a check. + Json::Value const jrr = + env.rpc("ledger_entry", to_string(checkId.key))[jss::result]; + BEAST_EXPECT( + jrr[jss::node][sfLedgerEntryType.jsonName] == jss::Check); + BEAST_EXPECT(jrr[jss::node][sfSendMax.jsonName] == "100000000"); + } + } + +public: + void + run() override + { + testLedgerEntryInvalid(); + testLedgerEntryAccountRoot(); + testLedgerEntryCheck(); + testLedgerEntryCredentials(); + testLedgerEntryDepositPreauth(); + testLedgerEntryDepositPreauthCred(); + testLedgerEntryDirectory(); + testLedgerEntryEscrow(); + testLedgerEntryOffer(); + testLedgerEntryPayChan(); + testLedgerEntryRippleState(); + testLedgerEntryTicket(); + testLedgerEntryDID(); + testInvalidOracleLedgerEntry(); + testOracleLedgerEntry(); + testLedgerEntryMPT(); + testLedgerEntryPermissionedDomain(); + testLedgerEntryCLI(); + + forAllApiVersions(std::bind_front( + &LedgerEntry_test::testLedgerEntryInvalidParams, this)); + } +}; + +class LedgerEntry_XChain_test : public beast::unit_test::suite, + public test::jtx::XChainBridgeObjects +{ + void + checkErrorValue( + Json::Value const& jv, + std::string const& err, + std::string const& msg) + { + if (BEAST_EXPECT(jv.isMember(jss::status))) + BEAST_EXPECT(jv[jss::status] == "error"); + if (BEAST_EXPECT(jv.isMember(jss::error))) + BEAST_EXPECT(jv[jss::error] == err); + if (msg.empty()) + { + BEAST_EXPECT( + jv[jss::error_message] == Json::nullValue || + jv[jss::error_message] == ""); + } + else if (BEAST_EXPECT(jv.isMember(jss::error_message))) + BEAST_EXPECT(jv[jss::error_message] == msg); + } + + void + testLedgerEntryBridge() + { + testcase("ledger_entry: bridge"); + using namespace test::jtx; + + Env mcEnv{*this, features}; + Env scEnv(*this, envconfig(), features); + + createBridgeObjects(mcEnv, scEnv); + + std::string const ledgerHash{to_string(mcEnv.closed()->info().hash)}; + std::string bridge_index; + Json::Value mcBridge; + { + // request the bridge via RPC + Json::Value jvParams; + jvParams[jss::bridge_account] = mcDoor.human(); + jvParams[jss::bridge] = jvb; + Json::Value const jrr = mcEnv.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + + BEAST_EXPECT(jrr.isMember(jss::node)); + auto r = jrr[jss::node]; + // std::cout << to_string(r) << '\n'; + + BEAST_EXPECT(r.isMember(jss::Account)); + BEAST_EXPECT(r[jss::Account] == mcDoor.human()); + + BEAST_EXPECT(r.isMember(jss::Flags)); + + BEAST_EXPECT(r.isMember(sfLedgerEntryType.jsonName)); + BEAST_EXPECT(r[sfLedgerEntryType.jsonName] == jss::Bridge); + + // we not created an account yet + BEAST_EXPECT(r.isMember(sfXChainAccountCreateCount.jsonName)); + BEAST_EXPECT(r[sfXChainAccountCreateCount.jsonName].asInt() == 0); + + // we have not claimed a locking chain tx yet + BEAST_EXPECT(r.isMember(sfXChainAccountClaimCount.jsonName)); + BEAST_EXPECT(r[sfXChainAccountClaimCount.jsonName].asInt() == 0); + + BEAST_EXPECT(r.isMember(jss::index)); + bridge_index = r[jss::index].asString(); + mcBridge = r; + } + { + // request the bridge via RPC by index + Json::Value jvParams; + jvParams[jss::index] = bridge_index; + Json::Value const jrr = mcEnv.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + + BEAST_EXPECT(jrr.isMember(jss::node)); + BEAST_EXPECT(jrr[jss::node] == mcBridge); + } + { + // swap door accounts and make sure we get an error value + Json::Value jvParams; + // Sidechain door account is "master", not scDoor + jvParams[jss::bridge_account] = Account::master.human(); + jvParams[jss::bridge] = jvb; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = mcEnv.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + + checkErrorValue(jrr, "entryNotFound", ""); + } + { + // create two claim ids and verify that the bridge counter was + // incremented + mcEnv(xchain_create_claim_id(mcAlice, jvb, reward, scAlice)); + mcEnv.close(); + mcEnv(xchain_create_claim_id(mcBob, jvb, reward, scBob)); + mcEnv.close(); + + // request the bridge via RPC + Json::Value jvParams; + jvParams[jss::bridge_account] = mcDoor.human(); + jvParams[jss::bridge] = jvb; + // std::cout << to_string(jvParams) << '\n'; + Json::Value const jrr = mcEnv.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + + BEAST_EXPECT(jrr.isMember(jss::node)); + auto r = jrr[jss::node]; + + // we executed two create claim id txs + BEAST_EXPECT(r.isMember(sfXChainClaimID.jsonName)); + BEAST_EXPECT(r[sfXChainClaimID.jsonName].asInt() == 2); + } + } + + void + testLedgerEntryClaimID() + { + testcase("ledger_entry: xchain_claim_id"); + using namespace test::jtx; + + Env mcEnv{*this, features}; + Env scEnv(*this, envconfig(), features); + + createBridgeObjects(mcEnv, scEnv); + + scEnv(xchain_create_claim_id(scAlice, jvb, reward, mcAlice)); + scEnv.close(); + scEnv(xchain_create_claim_id(scBob, jvb, reward, mcBob)); + scEnv.close(); + + std::string bridge_index; + { + // request the xchain_claim_id via RPC + Json::Value jvParams; + jvParams[jss::xchain_owned_claim_id] = jvXRPBridgeRPC; + jvParams[jss::xchain_owned_claim_id][jss::xchain_owned_claim_id] = + 1; + // std::cout << to_string(jvParams) << '\n'; + Json::Value const jrr = scEnv.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + + BEAST_EXPECT(jrr.isMember(jss::node)); + auto r = jrr[jss::node]; + // std::cout << to_string(r) << '\n'; + + BEAST_EXPECT(r.isMember(jss::Account)); + BEAST_EXPECT(r[jss::Account] == scAlice.human()); + BEAST_EXPECT( + r[sfLedgerEntryType.jsonName] == jss::XChainOwnedClaimID); + BEAST_EXPECT(r[sfXChainClaimID.jsonName].asInt() == 1); + BEAST_EXPECT(r[sfOwnerNode.jsonName].asInt() == 0); + } + + { + // request the xchain_claim_id via RPC + Json::Value jvParams; + jvParams[jss::xchain_owned_claim_id] = jvXRPBridgeRPC; + jvParams[jss::xchain_owned_claim_id][jss::xchain_owned_claim_id] = + 2; + Json::Value const jrr = scEnv.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + + BEAST_EXPECT(jrr.isMember(jss::node)); + auto r = jrr[jss::node]; + // std::cout << to_string(r) << '\n'; + + BEAST_EXPECT(r.isMember(jss::Account)); + BEAST_EXPECT(r[jss::Account] == scBob.human()); + BEAST_EXPECT( + r[sfLedgerEntryType.jsonName] == jss::XChainOwnedClaimID); + BEAST_EXPECT(r[sfXChainClaimID.jsonName].asInt() == 2); + BEAST_EXPECT(r[sfOwnerNode.jsonName].asInt() == 0); + } + } + + void + testLedgerEntryCreateAccountClaimID() + { + testcase("ledger_entry: xchain_create_account_claim_id"); + using namespace test::jtx; + + Env mcEnv{*this, features}; + Env scEnv(*this, envconfig(), features); + + // note: signers.size() and quorum are both 5 in createBridgeObjects + createBridgeObjects(mcEnv, scEnv); + + auto scCarol = + Account("scCarol"); // Don't fund it - it will be created with the + // xchain transaction + auto const amt = XRP(1000); + mcEnv(sidechain_xchain_account_create( + mcAlice, jvb, scCarol, amt, reward)); + mcEnv.close(); + + // send less than quorum of attestations (otherwise funds are + // immediately transferred and no "claim" object is created) + size_t constexpr num_attest = 3; + auto attestations = create_account_attestations( + scAttester, + jvb, + mcAlice, + amt, + reward, + payee, + /*wasLockingChainSend*/ true, + 1, + scCarol, + signers, + UT_XCHAIN_DEFAULT_NUM_SIGNERS); + for (size_t i = 0; i < num_attest; ++i) + { + scEnv(attestations[i]); + } + scEnv.close(); + + { + // request the create account claim_id via RPC + Json::Value jvParams; + jvParams[jss::xchain_owned_create_account_claim_id] = + jvXRPBridgeRPC; + jvParams[jss::xchain_owned_create_account_claim_id] + [jss::xchain_owned_create_account_claim_id] = 1; + // std::cout << to_string(jvParams) << '\n'; + Json::Value const jrr = scEnv.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + // std::cout << to_string(jrr) << '\n'; + + BEAST_EXPECT(jrr.isMember(jss::node)); + auto r = jrr[jss::node]; + + BEAST_EXPECT(r.isMember(jss::Account)); + BEAST_EXPECT(r[jss::Account] == Account::master.human()); + + BEAST_EXPECT(r.isMember(sfXChainAccountCreateCount.jsonName)); + BEAST_EXPECT(r[sfXChainAccountCreateCount.jsonName].asInt() == 1); + + BEAST_EXPECT( + r.isMember(sfXChainCreateAccountAttestations.jsonName)); + auto attest = r[sfXChainCreateAccountAttestations.jsonName]; + BEAST_EXPECT(attest.isArray()); + BEAST_EXPECT(attest.size() == 3); + BEAST_EXPECT(attest[Json::Value::UInt(0)].isMember( + sfXChainCreateAccountProofSig.jsonName)); + Json::Value a[num_attest]; + for (size_t i = 0; i < num_attest; ++i) + { + a[i] = attest[Json::Value::UInt(0)] + [sfXChainCreateAccountProofSig.jsonName]; + BEAST_EXPECT( + a[i].isMember(jss::Amount) && + a[i][jss::Amount].asInt() == 1000 * drop_per_xrp); + BEAST_EXPECT( + a[i].isMember(jss::Destination) && + a[i][jss::Destination] == scCarol.human()); + BEAST_EXPECT( + a[i].isMember(sfAttestationSignerAccount.jsonName) && + std::any_of( + signers.begin(), signers.end(), [&](signer const& s) { + return a[i][sfAttestationSignerAccount.jsonName] == + s.account.human(); + })); + BEAST_EXPECT( + a[i].isMember(sfAttestationRewardAccount.jsonName) && + std::any_of( + payee.begin(), + payee.end(), + [&](Account const& account) { + return a[i][sfAttestationRewardAccount.jsonName] == + account.human(); + })); + BEAST_EXPECT( + a[i].isMember(sfWasLockingChainSend.jsonName) && + a[i][sfWasLockingChainSend.jsonName] == 1); + BEAST_EXPECT( + a[i].isMember(sfSignatureReward.jsonName) && + a[i][sfSignatureReward.jsonName].asInt() == + 1 * drop_per_xrp); + } + } + + // complete attestations quorum - CreateAccountClaimID should not be + // present anymore + for (size_t i = num_attest; i < UT_XCHAIN_DEFAULT_NUM_SIGNERS; ++i) + { + scEnv(attestations[i]); + } + scEnv.close(); + { + // request the create account claim_id via RPC + Json::Value jvParams; + jvParams[jss::xchain_owned_create_account_claim_id] = + jvXRPBridgeRPC; + jvParams[jss::xchain_owned_create_account_claim_id] + [jss::xchain_owned_create_account_claim_id] = 1; + // std::cout << to_string(jvParams) << '\n'; + Json::Value const jrr = scEnv.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "entryNotFound", ""); + } + } + +public: + void + run() override + { + testLedgerEntryBridge(); + testLedgerEntryClaimID(); + testLedgerEntryCreateAccountClaimID(); + } +}; + +BEAST_DEFINE_TESTSUITE(LedgerEntry, app, ripple); +BEAST_DEFINE_TESTSUITE(LedgerEntry_XChain, app, ripple); + +} // namespace test +} // namespace ripple diff --git a/src/test/rpc/LedgerRPC_test.cpp b/src/test/rpc/LedgerRPC_test.cpp index f086a84de1..78caecb945 100644 --- a/src/test/rpc/LedgerRPC_test.cpp +++ b/src/test/rpc/LedgerRPC_test.cpp @@ -27,325 +27,12 @@ #include #include -#include #include #include namespace ripple { -class LedgerRPC_XChain_test : public beast::unit_test::suite, - public test::jtx::XChainBridgeObjects -{ - void - checkErrorValue( - Json::Value const& jv, - std::string const& err, - std::string const& msg) - { - if (BEAST_EXPECT(jv.isMember(jss::status))) - BEAST_EXPECT(jv[jss::status] == "error"); - if (BEAST_EXPECT(jv.isMember(jss::error))) - BEAST_EXPECT(jv[jss::error] == err); - if (msg.empty()) - { - BEAST_EXPECT( - jv[jss::error_message] == Json::nullValue || - jv[jss::error_message] == ""); - } - else if (BEAST_EXPECT(jv.isMember(jss::error_message))) - BEAST_EXPECT(jv[jss::error_message] == msg); - } - - void - testLedgerEntryBridge() - { - testcase("ledger_entry: bridge"); - using namespace test::jtx; - - Env mcEnv{*this, features}; - Env scEnv(*this, envconfig(), features); - - createBridgeObjects(mcEnv, scEnv); - - std::string const ledgerHash{to_string(mcEnv.closed()->info().hash)}; - std::string bridge_index; - Json::Value mcBridge; - { - // request the bridge via RPC - Json::Value jvParams; - jvParams[jss::bridge_account] = mcDoor.human(); - jvParams[jss::bridge] = jvb; - Json::Value const jrr = mcEnv.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - BEAST_EXPECT(jrr.isMember(jss::node)); - auto r = jrr[jss::node]; - // std::cout << to_string(r) << '\n'; - - BEAST_EXPECT(r.isMember(jss::Account)); - BEAST_EXPECT(r[jss::Account] == mcDoor.human()); - - BEAST_EXPECT(r.isMember(jss::Flags)); - - BEAST_EXPECT(r.isMember(sfLedgerEntryType.jsonName)); - BEAST_EXPECT(r[sfLedgerEntryType.jsonName] == jss::Bridge); - - // we not created an account yet - BEAST_EXPECT(r.isMember(sfXChainAccountCreateCount.jsonName)); - BEAST_EXPECT(r[sfXChainAccountCreateCount.jsonName].asInt() == 0); - - // we have not claimed a locking chain tx yet - BEAST_EXPECT(r.isMember(sfXChainAccountClaimCount.jsonName)); - BEAST_EXPECT(r[sfXChainAccountClaimCount.jsonName].asInt() == 0); - - BEAST_EXPECT(r.isMember(jss::index)); - bridge_index = r[jss::index].asString(); - mcBridge = r; - } - { - // request the bridge via RPC by index - Json::Value jvParams; - jvParams[jss::index] = bridge_index; - Json::Value const jrr = mcEnv.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - BEAST_EXPECT(jrr.isMember(jss::node)); - BEAST_EXPECT(jrr[jss::node] == mcBridge); - } - { - // swap door accounts and make sure we get an error value - Json::Value jvParams; - // Sidechain door account is "master", not scDoor - jvParams[jss::bridge_account] = Account::master.human(); - jvParams[jss::bridge] = jvb; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = mcEnv.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - checkErrorValue(jrr, "entryNotFound", ""); - } - { - // create two claim ids and verify that the bridge counter was - // incremented - mcEnv(xchain_create_claim_id(mcAlice, jvb, reward, scAlice)); - mcEnv.close(); - mcEnv(xchain_create_claim_id(mcBob, jvb, reward, scBob)); - mcEnv.close(); - - // request the bridge via RPC - Json::Value jvParams; - jvParams[jss::bridge_account] = mcDoor.human(); - jvParams[jss::bridge] = jvb; - // std::cout << to_string(jvParams) << '\n'; - Json::Value const jrr = mcEnv.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - BEAST_EXPECT(jrr.isMember(jss::node)); - auto r = jrr[jss::node]; - - // we executed two create claim id txs - BEAST_EXPECT(r.isMember(sfXChainClaimID.jsonName)); - BEAST_EXPECT(r[sfXChainClaimID.jsonName].asInt() == 2); - } - } - - void - testLedgerEntryClaimID() - { - testcase("ledger_entry: xchain_claim_id"); - using namespace test::jtx; - - Env mcEnv{*this, features}; - Env scEnv(*this, envconfig(), features); - - createBridgeObjects(mcEnv, scEnv); - - scEnv(xchain_create_claim_id(scAlice, jvb, reward, mcAlice)); - scEnv.close(); - scEnv(xchain_create_claim_id(scBob, jvb, reward, mcBob)); - scEnv.close(); - - std::string bridge_index; - { - // request the xchain_claim_id via RPC - Json::Value jvParams; - jvParams[jss::xchain_owned_claim_id] = jvXRPBridgeRPC; - jvParams[jss::xchain_owned_claim_id][jss::xchain_owned_claim_id] = - 1; - // std::cout << to_string(jvParams) << '\n'; - Json::Value const jrr = scEnv.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - BEAST_EXPECT(jrr.isMember(jss::node)); - auto r = jrr[jss::node]; - // std::cout << to_string(r) << '\n'; - - BEAST_EXPECT(r.isMember(jss::Account)); - BEAST_EXPECT(r[jss::Account] == scAlice.human()); - BEAST_EXPECT( - r[sfLedgerEntryType.jsonName] == jss::XChainOwnedClaimID); - BEAST_EXPECT(r[sfXChainClaimID.jsonName].asInt() == 1); - BEAST_EXPECT(r[sfOwnerNode.jsonName].asInt() == 0); - } - - { - // request the xchain_claim_id via RPC - Json::Value jvParams; - jvParams[jss::xchain_owned_claim_id] = jvXRPBridgeRPC; - jvParams[jss::xchain_owned_claim_id][jss::xchain_owned_claim_id] = - 2; - Json::Value const jrr = scEnv.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - BEAST_EXPECT(jrr.isMember(jss::node)); - auto r = jrr[jss::node]; - // std::cout << to_string(r) << '\n'; - - BEAST_EXPECT(r.isMember(jss::Account)); - BEAST_EXPECT(r[jss::Account] == scBob.human()); - BEAST_EXPECT( - r[sfLedgerEntryType.jsonName] == jss::XChainOwnedClaimID); - BEAST_EXPECT(r[sfXChainClaimID.jsonName].asInt() == 2); - BEAST_EXPECT(r[sfOwnerNode.jsonName].asInt() == 0); - } - } - - void - testLedgerEntryCreateAccountClaimID() - { - testcase("ledger_entry: xchain_create_account_claim_id"); - using namespace test::jtx; - - Env mcEnv{*this, features}; - Env scEnv(*this, envconfig(), features); - - // note: signers.size() and quorum are both 5 in createBridgeObjects - createBridgeObjects(mcEnv, scEnv); - - auto scCarol = - Account("scCarol"); // Don't fund it - it will be created with the - // xchain transaction - auto const amt = XRP(1000); - mcEnv(sidechain_xchain_account_create( - mcAlice, jvb, scCarol, amt, reward)); - mcEnv.close(); - - // send less than quorum of attestations (otherwise funds are - // immediately transferred and no "claim" object is created) - size_t constexpr num_attest = 3; - auto attestations = create_account_attestations( - scAttester, - jvb, - mcAlice, - amt, - reward, - payee, - /*wasLockingChainSend*/ true, - 1, - scCarol, - signers, - UT_XCHAIN_DEFAULT_NUM_SIGNERS); - for (size_t i = 0; i < num_attest; ++i) - { - scEnv(attestations[i]); - } - scEnv.close(); - - { - // request the create account claim_id via RPC - Json::Value jvParams; - jvParams[jss::xchain_owned_create_account_claim_id] = - jvXRPBridgeRPC; - jvParams[jss::xchain_owned_create_account_claim_id] - [jss::xchain_owned_create_account_claim_id] = 1; - // std::cout << to_string(jvParams) << '\n'; - Json::Value const jrr = scEnv.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - // std::cout << to_string(jrr) << '\n'; - - BEAST_EXPECT(jrr.isMember(jss::node)); - auto r = jrr[jss::node]; - - BEAST_EXPECT(r.isMember(jss::Account)); - BEAST_EXPECT(r[jss::Account] == Account::master.human()); - - BEAST_EXPECT(r.isMember(sfXChainAccountCreateCount.jsonName)); - BEAST_EXPECT(r[sfXChainAccountCreateCount.jsonName].asInt() == 1); - - BEAST_EXPECT( - r.isMember(sfXChainCreateAccountAttestations.jsonName)); - auto attest = r[sfXChainCreateAccountAttestations.jsonName]; - BEAST_EXPECT(attest.isArray()); - BEAST_EXPECT(attest.size() == 3); - BEAST_EXPECT(attest[Json::Value::UInt(0)].isMember( - sfXChainCreateAccountProofSig.jsonName)); - Json::Value a[num_attest]; - for (size_t i = 0; i < num_attest; ++i) - { - a[i] = attest[Json::Value::UInt(0)] - [sfXChainCreateAccountProofSig.jsonName]; - BEAST_EXPECT( - a[i].isMember(jss::Amount) && - a[i][jss::Amount].asInt() == 1000 * drop_per_xrp); - BEAST_EXPECT( - a[i].isMember(jss::Destination) && - a[i][jss::Destination] == scCarol.human()); - BEAST_EXPECT( - a[i].isMember(sfAttestationSignerAccount.jsonName) && - std::any_of( - signers.begin(), signers.end(), [&](signer const& s) { - return a[i][sfAttestationSignerAccount.jsonName] == - s.account.human(); - })); - BEAST_EXPECT( - a[i].isMember(sfAttestationRewardAccount.jsonName) && - std::any_of( - payee.begin(), - payee.end(), - [&](Account const& account) { - return a[i][sfAttestationRewardAccount.jsonName] == - account.human(); - })); - BEAST_EXPECT( - a[i].isMember(sfWasLockingChainSend.jsonName) && - a[i][sfWasLockingChainSend.jsonName] == 1); - BEAST_EXPECT( - a[i].isMember(sfSignatureReward.jsonName) && - a[i][sfSignatureReward.jsonName].asInt() == - 1 * drop_per_xrp); - } - } - - // complete attestations quorum - CreateAccountClaimID should not be - // present anymore - for (size_t i = num_attest; i < UT_XCHAIN_DEFAULT_NUM_SIGNERS; ++i) - { - scEnv(attestations[i]); - } - scEnv.close(); - { - // request the create account claim_id via RPC - Json::Value jvParams; - jvParams[jss::xchain_owned_create_account_claim_id] = - jvXRPBridgeRPC; - jvParams[jss::xchain_owned_create_account_claim_id] - [jss::xchain_owned_create_account_claim_id] = 1; - // std::cout << to_string(jvParams) << '\n'; - Json::Value const jrr = scEnv.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "entryNotFound", ""); - } - } - -public: - void - run() override - { - testLedgerEntryBridge(); - testLedgerEntryClaimID(); - testLedgerEntryCreateAccountClaimID(); - } -}; +namespace test { class LedgerRPC_test : public beast::unit_test::suite { @@ -493,18 +180,6 @@ class LedgerRPC_test : public beast::unit_test::suite "json", "ledger", "{ \"ledger_index\" : 1000000000000000 }"); checkErrorValue(ret, "invalidParams", "Invalid parameters."); } - - { - // ask for an zero index - Json::Value jvParams; - jvParams[jss::ledger_index] = "validated"; - jvParams[jss::index] = - "00000000000000000000000000000000000000000000000000000000000000" - "0000"; - auto const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } } void @@ -525,25 +200,6 @@ class LedgerRPC_test : public beast::unit_test::suite } } - void - testMissingLedgerEntryLedgerHash() - { - testcase("Missing ledger_entry ledger_hash"); - using namespace test::jtx; - Env env{*this}; - Account const alice{"alice"}; - env.fund(XRP(10000), alice); - env.close(); - - Json::Value jvParams; - jvParams[jss::account_root] = alice.human(); - jvParams[jss::ledger_hash] = - "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"; - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "lgrNotFound", "ledgerNotFound"); - } - void testLedgerFull() { @@ -603,1847 +259,6 @@ class LedgerRPC_test : public beast::unit_test::suite BEAST_EXPECT(jrr[jss::ledger][jss::accountState].size() == 3u); } - void - testLedgerEntryAccountRoot() - { - testcase("ledger_entry Request AccountRoot"); - using namespace test::jtx; - - auto cfg = envconfig(); - cfg->FEES.reference_fee = 10; - Env env{*this, std::move(cfg)}; - Account const alice{"alice"}; - env.fund(XRP(10000), alice); - env.close(); - - std::string const ledgerHash{to_string(env.closed()->info().hash)}; - { - // Exercise ledger_closed along the way. - Json::Value const jrr = env.rpc("ledger_closed")[jss::result]; - BEAST_EXPECT(jrr[jss::ledger_hash] == ledgerHash); - BEAST_EXPECT(jrr[jss::ledger_index] == 3); - } - - std::string accountRootIndex; - { - // Request alice's account root. - Json::Value jvParams; - jvParams[jss::account_root] = alice.human(); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - BEAST_EXPECT(jrr.isMember(jss::node)); - BEAST_EXPECT(jrr[jss::node][jss::Account] == alice.human()); - BEAST_EXPECT(jrr[jss::node][sfBalance.jsonName] == "10000000000"); - accountRootIndex = jrr[jss::index].asString(); - } - { - constexpr char alicesAcctRootBinary[]{ - "1100612200800000240000000425000000032D00000000559CE54C3B934E4" - "73A995B477E92EC229F99CED5B62BF4D2ACE4DC42719103AE2F6240000002" - "540BE4008114AE123A8556F3CF91154711376AFB0F894F832B3D"}; - - // Request alice's account root, but with binary == true; - Json::Value jvParams; - jvParams[jss::account_root] = alice.human(); - jvParams[jss::binary] = 1; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - BEAST_EXPECT(jrr.isMember(jss::node_binary)); - BEAST_EXPECT(jrr[jss::node_binary] == alicesAcctRootBinary); - } - { - // Request alice's account root using the index. - Json::Value jvParams; - jvParams[jss::index] = accountRootIndex; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - BEAST_EXPECT(!jrr.isMember(jss::node_binary)); - BEAST_EXPECT(jrr.isMember(jss::node)); - BEAST_EXPECT(jrr[jss::node][jss::Account] == alice.human()); - BEAST_EXPECT(jrr[jss::node][sfBalance.jsonName] == "10000000000"); - } - { - // Request alice's account root by index, but with binary == false. - Json::Value jvParams; - jvParams[jss::index] = accountRootIndex; - jvParams[jss::binary] = 0; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - BEAST_EXPECT(jrr.isMember(jss::node)); - BEAST_EXPECT(jrr[jss::node][jss::Account] == alice.human()); - BEAST_EXPECT(jrr[jss::node][sfBalance.jsonName] == "10000000000"); - } - { - // Request using a corrupted AccountID. - Json::Value jvParams; - jvParams[jss::account_root] = makeBadAddress(alice.human()); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedAddress", ""); - } - { - // Request an account that is not in the ledger. - Json::Value jvParams; - jvParams[jss::account_root] = Account("bob").human(); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "entryNotFound", ""); - } - } - - void - testLedgerEntryCheck() - { - testcase("ledger_entry Request Check"); - using namespace test::jtx; - Env env{*this}; - Account const alice{"alice"}; - env.fund(XRP(10000), alice); - env.close(); - - auto const checkId = keylet::check(env.master, env.seq(env.master)); - - env(check::create(env.master, alice, XRP(100))); - env.close(); - - std::string const ledgerHash{to_string(env.closed()->info().hash)}; - { - // Request a check. - Json::Value jvParams; - jvParams[jss::check] = to_string(checkId.key); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - BEAST_EXPECT( - jrr[jss::node][sfLedgerEntryType.jsonName] == jss::Check); - BEAST_EXPECT(jrr[jss::node][sfSendMax.jsonName] == "100000000"); - } - { - // Request an index that is not a check. We'll use alice's - // account root index. - std::string accountRootIndex; - { - Json::Value jvParams; - jvParams[jss::account_root] = alice.human(); - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - accountRootIndex = jrr[jss::index].asString(); - } - Json::Value jvParams; - jvParams[jss::check] = accountRootIndex; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "unexpectedLedgerType", ""); - } - } - - void - testLedgerEntryCredentials() - { - testcase("ledger_entry credentials"); - - using namespace test::jtx; - - Env env(*this); - Account const issuer{"issuer"}; - Account const alice{"alice"}; - Account const bob{"bob"}; - const char credType[] = "abcde"; - - env.fund(XRP(5000), issuer, alice, bob); - env.close(); - - // Setup credentials with DepositAuth object for Alice and Bob - env(credentials::create(alice, issuer, credType)); - env.close(); - - { - // Succeed - auto jv = credentials::ledgerEntry(env, alice, issuer, credType); - BEAST_EXPECT( - jv.isObject() && jv.isMember(jss::result) && - !jv[jss::result].isMember(jss::error) && - jv[jss::result].isMember(jss::node) && - jv[jss::result][jss::node].isMember( - sfLedgerEntryType.jsonName) && - jv[jss::result][jss::node][sfLedgerEntryType.jsonName] == - jss::Credential); - - std::string const credIdx = jv[jss::result][jss::index].asString(); - - jv = credentials::ledgerEntry(env, credIdx); - BEAST_EXPECT( - jv.isObject() && jv.isMember(jss::result) && - !jv[jss::result].isMember(jss::error) && - jv[jss::result].isMember(jss::node) && - jv[jss::result][jss::node].isMember( - sfLedgerEntryType.jsonName) && - jv[jss::result][jss::node][sfLedgerEntryType.jsonName] == - jss::Credential); - } - - { - // Fail, index not a hash - auto const jv = credentials::ledgerEntry(env, ""); - checkErrorValue(jv[jss::result], "malformedRequest", ""); - } - - { - // Fail, credential doesn't exist - auto const jv = credentials::ledgerEntry( - env, - "48004829F915654A81B11C4AB8218D96FED67F209B58328A72314FB6EA288B" - "E4"); - checkErrorValue(jv[jss::result], "entryNotFound", ""); - } - - { - // Fail, invalid subject - Json::Value jv; - jv[jss::ledger_index] = jss::validated; - jv[jss::credential][jss::subject] = 42; - jv[jss::credential][jss::issuer] = issuer.human(); - jv[jss::credential][jss::credential_type] = - strHex(std::string_view(credType)); - auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - - { - // Fail, invalid issuer - Json::Value jv; - jv[jss::ledger_index] = jss::validated; - jv[jss::credential][jss::subject] = alice.human(); - jv[jss::credential][jss::issuer] = 42; - jv[jss::credential][jss::credential_type] = - strHex(std::string_view(credType)); - auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - - { - // Fail, invalid credentials type - Json::Value jv; - jv[jss::ledger_index] = jss::validated; - jv[jss::credential][jss::subject] = alice.human(); - jv[jss::credential][jss::issuer] = issuer.human(); - jv[jss::credential][jss::credential_type] = 42; - auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - - { - // Fail, empty subject - Json::Value jv; - jv[jss::ledger_index] = jss::validated; - jv[jss::credential][jss::subject] = ""; - jv[jss::credential][jss::issuer] = issuer.human(); - jv[jss::credential][jss::credential_type] = - strHex(std::string_view(credType)); - auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - - { - // Fail, empty issuer - Json::Value jv; - jv[jss::ledger_index] = jss::validated; - jv[jss::credential][jss::subject] = alice.human(); - jv[jss::credential][jss::issuer] = ""; - jv[jss::credential][jss::credential_type] = - strHex(std::string_view(credType)); - auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - - { - // Fail, empty credentials type - Json::Value jv; - jv[jss::ledger_index] = jss::validated; - jv[jss::credential][jss::subject] = alice.human(); - jv[jss::credential][jss::issuer] = issuer.human(); - jv[jss::credential][jss::credential_type] = ""; - auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - - { - // Fail, no subject - Json::Value jv; - jv[jss::ledger_index] = jss::validated; - jv[jss::credential][jss::issuer] = issuer.human(); - jv[jss::credential][jss::credential_type] = - strHex(std::string_view(credType)); - auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - - { - // Fail, no issuer - Json::Value jv; - jv[jss::ledger_index] = jss::validated; - jv[jss::credential][jss::subject] = alice.human(); - jv[jss::credential][jss::credential_type] = - strHex(std::string_view(credType)); - auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - - { - // Fail, no credentials type - Json::Value jv; - jv[jss::ledger_index] = jss::validated; - jv[jss::credential][jss::subject] = alice.human(); - jv[jss::credential][jss::issuer] = issuer.human(); - auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - - { - // Fail, not AccountID subject - Json::Value jv; - jv[jss::ledger_index] = jss::validated; - jv[jss::credential][jss::subject] = "wehsdbvasbdfvj"; - jv[jss::credential][jss::issuer] = issuer.human(); - jv[jss::credential][jss::credential_type] = - strHex(std::string_view(credType)); - auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - - { - // Fail, not AccountID issuer - Json::Value jv; - jv[jss::ledger_index] = jss::validated; - jv[jss::credential][jss::subject] = alice.human(); - jv[jss::credential][jss::issuer] = "c4p93ugndfbsiu"; - jv[jss::credential][jss::credential_type] = - strHex(std::string_view(credType)); - auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - - { - // Fail, credentials type isn't hex encoded - Json::Value jv; - jv[jss::ledger_index] = jss::validated; - jv[jss::credential][jss::subject] = alice.human(); - jv[jss::credential][jss::issuer] = issuer.human(); - jv[jss::credential][jss::credential_type] = "12KK"; - auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - } - - void - testLedgerEntryDepositPreauth() - { - testcase("ledger_entry Deposit Preauth"); - - using namespace test::jtx; - - Env env{*this}; - Account const alice{"alice"}; - Account const becky{"becky"}; - - env.fund(XRP(10000), alice, becky); - env.close(); - - env(deposit::auth(alice, becky)); - env.close(); - - std::string const ledgerHash{to_string(env.closed()->info().hash)}; - std::string depositPreauthIndex; - { - // Request a depositPreauth by owner and authorized. - Json::Value jvParams; - jvParams[jss::deposit_preauth][jss::owner] = alice.human(); - jvParams[jss::deposit_preauth][jss::authorized] = becky.human(); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - BEAST_EXPECT( - jrr[jss::node][sfLedgerEntryType.jsonName] == - jss::DepositPreauth); - BEAST_EXPECT(jrr[jss::node][sfAccount.jsonName] == alice.human()); - BEAST_EXPECT(jrr[jss::node][sfAuthorize.jsonName] == becky.human()); - depositPreauthIndex = jrr[jss::node][jss::index].asString(); - } - { - // Request a depositPreauth by index. - Json::Value jvParams; - jvParams[jss::deposit_preauth] = depositPreauthIndex; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - BEAST_EXPECT( - jrr[jss::node][sfLedgerEntryType.jsonName] == - jss::DepositPreauth); - BEAST_EXPECT(jrr[jss::node][sfAccount.jsonName] == alice.human()); - BEAST_EXPECT(jrr[jss::node][sfAuthorize.jsonName] == becky.human()); - } - { - // Malformed request: deposit_preauth neither object nor string. - Json::Value jvParams; - jvParams[jss::deposit_preauth] = -5; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Malformed request: deposit_preauth not hex string. - Json::Value jvParams; - jvParams[jss::deposit_preauth] = "0123456789ABCDEFG"; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Malformed request: missing [jss::deposit_preauth][jss::owner] - Json::Value jvParams; - jvParams[jss::deposit_preauth][jss::authorized] = becky.human(); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Malformed request: [jss::deposit_preauth][jss::owner] not string. - Json::Value jvParams; - jvParams[jss::deposit_preauth][jss::owner] = 7; - jvParams[jss::deposit_preauth][jss::authorized] = becky.human(); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Malformed: missing [jss::deposit_preauth][jss::authorized] - Json::Value jvParams; - jvParams[jss::deposit_preauth][jss::owner] = alice.human(); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Malformed: [jss::deposit_preauth][jss::authorized] not string. - Json::Value jvParams; - jvParams[jss::deposit_preauth][jss::owner] = alice.human(); - jvParams[jss::deposit_preauth][jss::authorized] = 47; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Malformed: [jss::deposit_preauth][jss::owner] is malformed. - Json::Value jvParams; - jvParams[jss::deposit_preauth][jss::owner] = - "rP6P9ypfAmc!pw8SZHNwM4nvZHFXDraQas"; - - jvParams[jss::deposit_preauth][jss::authorized] = becky.human(); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedOwner", ""); - } - { - // Malformed: [jss::deposit_preauth][jss::authorized] is malformed. - Json::Value jvParams; - jvParams[jss::deposit_preauth][jss::owner] = alice.human(); - jvParams[jss::deposit_preauth][jss::authorized] = - "rP6P9ypfAmc!pw8SZHNwM4nvZHFXDraQas"; - - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedAuthorized", ""); - } - } - - void - testLedgerEntryDepositPreauthCred() - { - testcase("ledger_entry Deposit Preauth with credentials"); - - using namespace test::jtx; - - Env env(*this); - Account const issuer{"issuer"}; - Account const alice{"alice"}; - Account const bob{"bob"}; - const char credType[] = "abcde"; - - env.fund(XRP(5000), issuer, alice, bob); - env.close(); - - { - // Setup Bob with DepositAuth - env(fset(bob, asfDepositAuth)); - env.close(); - env(deposit::authCredentials(bob, {{issuer, credType}})); - env.close(); - } - - { - // Succeed - Json::Value jvParams; - jvParams[jss::ledger_index] = jss::validated; - jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - - jvParams[jss::deposit_preauth][jss::authorized_credentials] = - Json::arrayValue; - auto& arr( - jvParams[jss::deposit_preauth][jss::authorized_credentials]); - - Json::Value jo; - jo[jss::issuer] = issuer.human(); - jo[jss::credential_type] = strHex(std::string_view(credType)); - arr.append(std::move(jo)); - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - - BEAST_EXPECT( - jrr.isObject() && jrr.isMember(jss::result) && - !jrr[jss::result].isMember(jss::error) && - jrr[jss::result].isMember(jss::node) && - jrr[jss::result][jss::node].isMember( - sfLedgerEntryType.jsonName) && - jrr[jss::result][jss::node][sfLedgerEntryType.jsonName] == - jss::DepositPreauth); - } - - { - // Failed, invalid account - Json::Value jvParams; - jvParams[jss::ledger_index] = jss::validated; - jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - - jvParams[jss::deposit_preauth][jss::authorized_credentials] = - Json::arrayValue; - auto& arr( - jvParams[jss::deposit_preauth][jss::authorized_credentials]); - - Json::Value jo; - jo[jss::issuer] = to_string(xrpAccount()); - jo[jss::credential_type] = strHex(std::string_view(credType)); - arr.append(std::move(jo)); - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); - } - - { - // Failed, duplicates in credentials - Json::Value jvParams; - jvParams[jss::ledger_index] = jss::validated; - jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - - jvParams[jss::deposit_preauth][jss::authorized_credentials] = - Json::arrayValue; - auto& arr( - jvParams[jss::deposit_preauth][jss::authorized_credentials]); - - Json::Value jo; - jo[jss::issuer] = issuer.human(); - jo[jss::credential_type] = strHex(std::string_view(credType)); - arr.append(jo); - arr.append(std::move(jo)); - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); - } - - { - // Failed, invalid credential_type - Json::Value jvParams; - jvParams[jss::ledger_index] = jss::validated; - jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - - jvParams[jss::deposit_preauth][jss::authorized_credentials] = - Json::arrayValue; - auto& arr( - jvParams[jss::deposit_preauth][jss::authorized_credentials]); - - Json::Value jo; - jo[jss::issuer] = issuer.human(); - jo[jss::credential_type] = ""; - arr.append(std::move(jo)); - - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); - } - - { - // Failed, authorized and authorized_credentials both present - Json::Value jvParams; - jvParams[jss::ledger_index] = jss::validated; - jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - jvParams[jss::deposit_preauth][jss::authorized] = alice.human(); - - jvParams[jss::deposit_preauth][jss::authorized_credentials] = - Json::arrayValue; - auto& arr( - jvParams[jss::deposit_preauth][jss::authorized_credentials]); - - Json::Value jo; - jo[jss::issuer] = issuer.human(); - jo[jss::credential_type] = strHex(std::string_view(credType)); - arr.append(std::move(jo)); - - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - - { - // Failed, authorized_credentials is not an array - Json::Value jvParams; - jvParams[jss::ledger_index] = jss::validated; - jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - jvParams[jss::deposit_preauth][jss::authorized_credentials] = 42; - - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - - { - // Failed, authorized_credentials contains string data - Json::Value jvParams; - jvParams[jss::ledger_index] = jss::validated; - jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - jvParams[jss::deposit_preauth][jss::authorized_credentials] = - Json::arrayValue; - auto& arr( - jvParams[jss::deposit_preauth][jss::authorized_credentials]); - arr.append("foobar"); - - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); - } - - { - // Failed, authorized_credentials contains arrays - Json::Value jvParams; - jvParams[jss::ledger_index] = jss::validated; - jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - jvParams[jss::deposit_preauth][jss::authorized_credentials] = - Json::arrayValue; - auto& arr( - jvParams[jss::deposit_preauth][jss::authorized_credentials]); - Json::Value payload = Json::arrayValue; - payload.append(42); - arr.append(std::move(payload)); - - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); - } - - { - // Failed, authorized_credentials is empty array - Json::Value jvParams; - jvParams[jss::ledger_index] = jss::validated; - jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - jvParams[jss::deposit_preauth][jss::authorized_credentials] = - Json::arrayValue; - - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); - } - - { - // Failed, authorized_credentials is too long - - static const std::string_view credTypes[] = { - "cred1", - "cred2", - "cred3", - "cred4", - "cred5", - "cred6", - "cred7", - "cred8", - "cred9"}; - static_assert( - sizeof(credTypes) / sizeof(credTypes[0]) > - maxCredentialsArraySize); - - Json::Value jvParams; - jvParams[jss::ledger_index] = jss::validated; - jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - jvParams[jss::deposit_preauth][jss::authorized_credentials] = - Json::arrayValue; - - auto& arr( - jvParams[jss::deposit_preauth][jss::authorized_credentials]); - - for (unsigned i = 0; i < sizeof(credTypes) / sizeof(credTypes[0]); - ++i) - { - Json::Value jo; - jo[jss::issuer] = issuer.human(); - jo[jss::credential_type] = - strHex(std::string_view(credTypes[i])); - arr.append(std::move(jo)); - } - - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); - } - - { - // Failed, issuer is not set - Json::Value jvParams; - jvParams[jss::ledger_index] = jss::validated; - jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - - jvParams[jss::deposit_preauth][jss::authorized_credentials] = - Json::arrayValue; - auto& arr( - jvParams[jss::deposit_preauth][jss::authorized_credentials]); - - Json::Value jo; - jo[jss::credential_type] = strHex(std::string_view(credType)); - arr.append(std::move(jo)); - - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); - } - - { - // Failed, issuer isn't string - Json::Value jvParams; - jvParams[jss::ledger_index] = jss::validated; - jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - - jvParams[jss::deposit_preauth][jss::authorized_credentials] = - Json::arrayValue; - auto& arr( - jvParams[jss::deposit_preauth][jss::authorized_credentials]); - - Json::Value jo; - jo[jss::issuer] = 42; - jo[jss::credential_type] = strHex(std::string_view(credType)); - arr.append(std::move(jo)); - - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); - } - - { - // Failed, issuer is an array - Json::Value jvParams; - jvParams[jss::ledger_index] = jss::validated; - jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - - jvParams[jss::deposit_preauth][jss::authorized_credentials] = - Json::arrayValue; - auto& arr( - jvParams[jss::deposit_preauth][jss::authorized_credentials]); - - Json::Value jo; - Json::Value payload = Json::arrayValue; - payload.append(42); - jo[jss::issuer] = std::move(payload); - jo[jss::credential_type] = strHex(std::string_view(credType)); - arr.append(std::move(jo)); - - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); - } - - { - // Failed, issuer isn't valid encoded account - Json::Value jvParams; - jvParams[jss::ledger_index] = jss::validated; - jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - - jvParams[jss::deposit_preauth][jss::authorized_credentials] = - Json::arrayValue; - auto& arr( - jvParams[jss::deposit_preauth][jss::authorized_credentials]); - - Json::Value jo; - jo[jss::issuer] = "invalid_account"; - jo[jss::credential_type] = strHex(std::string_view(credType)); - arr.append(std::move(jo)); - - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); - } - - { - // Failed, credential_type is not set - Json::Value jvParams; - jvParams[jss::ledger_index] = jss::validated; - jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - - jvParams[jss::deposit_preauth][jss::authorized_credentials] = - Json::arrayValue; - auto& arr( - jvParams[jss::deposit_preauth][jss::authorized_credentials]); - - Json::Value jo; - jo[jss::issuer] = issuer.human(); - arr.append(std::move(jo)); - - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); - } - - { - // Failed, credential_type isn't string - Json::Value jvParams; - jvParams[jss::ledger_index] = jss::validated; - jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - - jvParams[jss::deposit_preauth][jss::authorized_credentials] = - Json::arrayValue; - auto& arr( - jvParams[jss::deposit_preauth][jss::authorized_credentials]); - - Json::Value jo; - jo[jss::issuer] = issuer.human(); - jo[jss::credential_type] = 42; - arr.append(std::move(jo)); - - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); - } - - { - // Failed, credential_type is an array - Json::Value jvParams; - jvParams[jss::ledger_index] = jss::validated; - jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - - jvParams[jss::deposit_preauth][jss::authorized_credentials] = - Json::arrayValue; - auto& arr( - jvParams[jss::deposit_preauth][jss::authorized_credentials]); - - Json::Value jo; - jo[jss::issuer] = issuer.human(); - Json::Value payload = Json::arrayValue; - payload.append(42); - jo[jss::credential_type] = std::move(payload); - arr.append(std::move(jo)); - - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); - } - - { - // Failed, credential_type isn't hex encoded - Json::Value jvParams; - jvParams[jss::ledger_index] = jss::validated; - jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - - jvParams[jss::deposit_preauth][jss::authorized_credentials] = - Json::arrayValue; - auto& arr( - jvParams[jss::deposit_preauth][jss::authorized_credentials]); - - Json::Value jo; - jo[jss::issuer] = issuer.human(); - jo[jss::credential_type] = "12KK"; - arr.append(std::move(jo)); - - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); - } - } - - void - testLedgerEntryDirectory() - { - testcase("ledger_entry Request Directory"); - using namespace test::jtx; - Env env{*this}; - Account const alice{"alice"}; - Account const gw{"gateway"}; - auto const USD = gw["USD"]; - env.fund(XRP(10000), alice, gw); - env.close(); - - env.trust(USD(1000), alice); - env.close(); - - // Run up the number of directory entries so alice has two - // directory nodes. - for (int d = 1'000'032; d >= 1'000'000; --d) - { - env(offer(alice, USD(1), drops(d))); - } - env.close(); - - std::string const ledgerHash{to_string(env.closed()->info().hash)}; - { - // Exercise ledger_closed along the way. - Json::Value const jrr = env.rpc("ledger_closed")[jss::result]; - BEAST_EXPECT(jrr[jss::ledger_hash] == ledgerHash); - BEAST_EXPECT(jrr[jss::ledger_index] == 5); - } - - std::string const dirRootIndex = - "A33EC6BB85FB5674074C4A3A43373BB17645308F3EAE1933E3E35252162B217D"; - { - // Locate directory by index. - Json::Value jvParams; - jvParams[jss::directory] = dirRootIndex; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - BEAST_EXPECT(jrr[jss::node][sfIndexes.jsonName].size() == 32); - } - { - // Locate directory by directory root. - Json::Value jvParams; - jvParams[jss::directory] = Json::objectValue; - jvParams[jss::directory][jss::dir_root] = dirRootIndex; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - BEAST_EXPECT(jrr[jss::index] == dirRootIndex); - } - { - // Locate directory by owner. - Json::Value jvParams; - jvParams[jss::directory] = Json::objectValue; - jvParams[jss::directory][jss::owner] = alice.human(); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - BEAST_EXPECT(jrr[jss::index] == dirRootIndex); - } - { - // Locate directory by directory root and sub_index. - Json::Value jvParams; - jvParams[jss::directory] = Json::objectValue; - jvParams[jss::directory][jss::dir_root] = dirRootIndex; - jvParams[jss::directory][jss::sub_index] = 1; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - BEAST_EXPECT(jrr[jss::index] != dirRootIndex); - BEAST_EXPECT(jrr[jss::node][sfIndexes.jsonName].size() == 2); - } - { - // Locate directory by owner and sub_index. - Json::Value jvParams; - jvParams[jss::directory] = Json::objectValue; - jvParams[jss::directory][jss::owner] = alice.human(); - jvParams[jss::directory][jss::sub_index] = 1; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - BEAST_EXPECT(jrr[jss::index] != dirRootIndex); - BEAST_EXPECT(jrr[jss::node][sfIndexes.jsonName].size() == 2); - } - { - // Null directory argument. - Json::Value jvParams; - jvParams[jss::directory] = Json::nullValue; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Non-integer sub_index. - Json::Value jvParams; - jvParams[jss::directory] = Json::objectValue; - jvParams[jss::directory][jss::dir_root] = dirRootIndex; - jvParams[jss::directory][jss::sub_index] = 1.5; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Malformed owner entry. - Json::Value jvParams; - jvParams[jss::directory] = Json::objectValue; - - std::string const badAddress = makeBadAddress(alice.human()); - jvParams[jss::directory][jss::owner] = badAddress; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedAddress", ""); - } - { - // Malformed directory object. Specify both dir_root and owner. - Json::Value jvParams; - jvParams[jss::directory] = Json::objectValue; - jvParams[jss::directory][jss::owner] = alice.human(); - jvParams[jss::directory][jss::dir_root] = dirRootIndex; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Incomplete directory object. Missing both dir_root and owner. - Json::Value jvParams; - jvParams[jss::directory] = Json::objectValue; - jvParams[jss::directory][jss::sub_index] = 1; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - } - - void - testLedgerEntryEscrow() - { - testcase("ledger_entry Request Escrow"); - using namespace test::jtx; - Env env{*this}; - Account const alice{"alice"}; - env.fund(XRP(10000), alice); - env.close(); - - // Lambda to create an escrow. - auto escrowCreate = [](test::jtx::Account const& account, - test::jtx::Account const& to, - STAmount const& amount, - NetClock::time_point const& cancelAfter) { - Json::Value jv; - jv[jss::TransactionType] = jss::EscrowCreate; - jv[jss::Flags] = tfUniversal; - jv[jss::Account] = account.human(); - jv[jss::Destination] = to.human(); - jv[jss::Amount] = amount.getJson(JsonOptions::none); - jv[sfFinishAfter.jsonName] = - cancelAfter.time_since_epoch().count() + 2; - return jv; - }; - - using namespace std::chrono_literals; - env(escrowCreate(alice, alice, XRP(333), env.now() + 2s)); - env.close(); - - std::string const ledgerHash{to_string(env.closed()->info().hash)}; - std::string escrowIndex; - { - // Request the escrow using owner and sequence. - Json::Value jvParams; - jvParams[jss::escrow] = Json::objectValue; - jvParams[jss::escrow][jss::owner] = alice.human(); - jvParams[jss::escrow][jss::seq] = env.seq(alice) - 1; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - BEAST_EXPECT( - jrr[jss::node][jss::Amount] == XRP(333).value().getText()); - escrowIndex = jrr[jss::index].asString(); - } - { - // Request the escrow by index. - Json::Value jvParams; - jvParams[jss::escrow] = escrowIndex; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - BEAST_EXPECT( - jrr[jss::node][jss::Amount] == XRP(333).value().getText()); - } - { - // Malformed owner entry. - Json::Value jvParams; - jvParams[jss::escrow] = Json::objectValue; - - std::string const badAddress = makeBadAddress(alice.human()); - jvParams[jss::escrow][jss::owner] = badAddress; - jvParams[jss::escrow][jss::seq] = env.seq(alice) - 1; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedOwner", ""); - } - { - // Missing owner. - Json::Value jvParams; - jvParams[jss::escrow] = Json::objectValue; - jvParams[jss::escrow][jss::seq] = env.seq(alice) - 1; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Missing sequence. - Json::Value jvParams; - jvParams[jss::escrow] = Json::objectValue; - jvParams[jss::escrow][jss::owner] = alice.human(); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Non-integer sequence. - Json::Value jvParams; - jvParams[jss::escrow] = Json::objectValue; - jvParams[jss::escrow][jss::owner] = alice.human(); - jvParams[jss::escrow][jss::seq] = - std::to_string(env.seq(alice) - 1); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - } - - void - testLedgerEntryOffer() - { - testcase("ledger_entry Request Offer"); - using namespace test::jtx; - Env env{*this}; - Account const alice{"alice"}; - Account const gw{"gateway"}; - auto const USD = gw["USD"]; - env.fund(XRP(10000), alice, gw); - env.close(); - - env(offer(alice, USD(321), XRP(322))); - env.close(); - - std::string const ledgerHash{to_string(env.closed()->info().hash)}; - std::string offerIndex; - { - // Request the offer using owner and sequence. - Json::Value jvParams; - jvParams[jss::offer] = Json::objectValue; - jvParams[jss::offer][jss::account] = alice.human(); - jvParams[jss::offer][jss::seq] = env.seq(alice) - 1; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - BEAST_EXPECT(jrr[jss::node][jss::TakerGets] == "322000000"); - offerIndex = jrr[jss::index].asString(); - } - { - // Request the offer using its index. - Json::Value jvParams; - jvParams[jss::offer] = offerIndex; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - BEAST_EXPECT(jrr[jss::node][jss::TakerGets] == "322000000"); - } - { - // Malformed account entry. - Json::Value jvParams; - jvParams[jss::offer] = Json::objectValue; - - std::string const badAddress = makeBadAddress(alice.human()); - jvParams[jss::offer][jss::account] = badAddress; - jvParams[jss::offer][jss::seq] = env.seq(alice) - 1; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedAddress", ""); - } - { - // Malformed offer object. Missing account member. - Json::Value jvParams; - jvParams[jss::offer] = Json::objectValue; - jvParams[jss::offer][jss::seq] = env.seq(alice) - 1; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Malformed offer object. Missing seq member. - Json::Value jvParams; - jvParams[jss::offer] = Json::objectValue; - jvParams[jss::offer][jss::account] = alice.human(); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Malformed offer object. Non-integral seq member. - Json::Value jvParams; - jvParams[jss::offer] = Json::objectValue; - jvParams[jss::offer][jss::account] = alice.human(); - jvParams[jss::offer][jss::seq] = std::to_string(env.seq(alice) - 1); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - } - - void - testLedgerEntryPayChan() - { - testcase("ledger_entry Request Pay Chan"); - using namespace test::jtx; - using namespace std::literals::chrono_literals; - Env env{*this}; - Account const alice{"alice"}; - - env.fund(XRP(10000), alice); - env.close(); - - // Lambda to create a PayChan. - auto payChanCreate = [](test::jtx::Account const& account, - test::jtx::Account const& to, - STAmount const& amount, - NetClock::duration const& settleDelay, - PublicKey const& pk) { - Json::Value jv; - jv[jss::TransactionType] = jss::PaymentChannelCreate; - jv[jss::Account] = account.human(); - jv[jss::Destination] = to.human(); - jv[jss::Amount] = amount.getJson(JsonOptions::none); - jv[sfSettleDelay.jsonName] = settleDelay.count(); - jv[sfPublicKey.jsonName] = strHex(pk.slice()); - return jv; - }; - - env(payChanCreate(alice, env.master, XRP(57), 18s, alice.pk())); - env.close(); - - std::string const ledgerHash{to_string(env.closed()->info().hash)}; - - uint256 const payChanIndex{ - keylet::payChan(alice, env.master, env.seq(alice) - 1).key}; - { - // Request the payment channel using its index. - Json::Value jvParams; - jvParams[jss::payment_channel] = to_string(payChanIndex); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - BEAST_EXPECT(jrr[jss::node][sfAmount.jsonName] == "57000000"); - BEAST_EXPECT(jrr[jss::node][sfBalance.jsonName] == "0"); - BEAST_EXPECT(jrr[jss::node][sfSettleDelay.jsonName] == 18); - } - { - // Request an index that is not a payment channel. - Json::Value jvParams; - jvParams[jss::payment_channel] = ledgerHash; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "entryNotFound", ""); - } - } - - void - testLedgerEntryRippleState() - { - testcase("ledger_entry Request RippleState"); - using namespace test::jtx; - Env env{*this}; - Account const alice{"alice"}; - Account const gw{"gateway"}; - auto const USD = gw["USD"]; - env.fund(XRP(10000), alice, gw); - env.close(); - - env.trust(USD(999), alice); - env.close(); - - env(pay(gw, alice, USD(97))); - env.close(); - - // check both aliases - for (auto const& fieldName : {jss::ripple_state, jss::state}) - { - std::string const ledgerHash{to_string(env.closed()->info().hash)}; - { - // Request the trust line using the accounts and currency. - Json::Value jvParams; - jvParams[fieldName] = Json::objectValue; - jvParams[fieldName][jss::accounts] = Json::arrayValue; - jvParams[fieldName][jss::accounts][0u] = alice.human(); - jvParams[fieldName][jss::accounts][1u] = gw.human(); - jvParams[fieldName][jss::currency] = "USD"; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - BEAST_EXPECT( - jrr[jss::node][sfBalance.jsonName][jss::value] == "-97"); - BEAST_EXPECT( - jrr[jss::node][sfHighLimit.jsonName][jss::value] == "999"); - } - { - // ripple_state is not an object. - Json::Value jvParams; - jvParams[fieldName] = "ripple_state"; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // ripple_state.currency is missing. - Json::Value jvParams; - jvParams[fieldName] = Json::objectValue; - jvParams[fieldName][jss::accounts] = Json::arrayValue; - jvParams[fieldName][jss::accounts][0u] = alice.human(); - jvParams[fieldName][jss::accounts][1u] = gw.human(); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // ripple_state accounts is not an array. - Json::Value jvParams; - jvParams[fieldName] = Json::objectValue; - jvParams[fieldName][jss::accounts] = 2; - jvParams[fieldName][jss::currency] = "USD"; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // ripple_state one of the accounts is missing. - Json::Value jvParams; - jvParams[fieldName] = Json::objectValue; - jvParams[fieldName][jss::accounts] = Json::arrayValue; - jvParams[fieldName][jss::accounts][0u] = alice.human(); - jvParams[fieldName][jss::currency] = "USD"; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // ripple_state more than 2 accounts. - Json::Value jvParams; - jvParams[fieldName] = Json::objectValue; - jvParams[fieldName][jss::accounts] = Json::arrayValue; - jvParams[fieldName][jss::accounts][0u] = alice.human(); - jvParams[fieldName][jss::accounts][1u] = gw.human(); - jvParams[fieldName][jss::accounts][2u] = alice.human(); - jvParams[fieldName][jss::currency] = "USD"; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // ripple_state account[0] is not a string. - Json::Value jvParams; - jvParams[fieldName] = Json::objectValue; - jvParams[fieldName][jss::accounts] = Json::arrayValue; - jvParams[fieldName][jss::accounts][0u] = 44; - jvParams[fieldName][jss::accounts][1u] = gw.human(); - jvParams[fieldName][jss::currency] = "USD"; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // ripple_state account[1] is not a string. - Json::Value jvParams; - jvParams[fieldName] = Json::objectValue; - jvParams[fieldName][jss::accounts] = Json::arrayValue; - jvParams[fieldName][jss::accounts][0u] = alice.human(); - jvParams[fieldName][jss::accounts][1u] = 21; - jvParams[fieldName][jss::currency] = "USD"; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // ripple_state account[0] == account[1]. - Json::Value jvParams; - jvParams[fieldName] = Json::objectValue; - jvParams[fieldName][jss::accounts] = Json::arrayValue; - jvParams[fieldName][jss::accounts][0u] = alice.human(); - jvParams[fieldName][jss::accounts][1u] = alice.human(); - jvParams[fieldName][jss::currency] = "USD"; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // ripple_state malformed account[0]. - Json::Value jvParams; - jvParams[fieldName] = Json::objectValue; - jvParams[fieldName][jss::accounts] = Json::arrayValue; - jvParams[fieldName][jss::accounts][0u] = - makeBadAddress(alice.human()); - jvParams[fieldName][jss::accounts][1u] = gw.human(); - jvParams[fieldName][jss::currency] = "USD"; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedAddress", ""); - } - { - // ripple_state malformed account[1]. - Json::Value jvParams; - jvParams[fieldName] = Json::objectValue; - jvParams[fieldName][jss::accounts] = Json::arrayValue; - jvParams[fieldName][jss::accounts][0u] = alice.human(); - jvParams[fieldName][jss::accounts][1u] = - makeBadAddress(gw.human()); - jvParams[fieldName][jss::currency] = "USD"; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedAddress", ""); - } - { - // ripple_state malformed currency. - Json::Value jvParams; - jvParams[fieldName] = Json::objectValue; - jvParams[fieldName][jss::accounts] = Json::arrayValue; - jvParams[fieldName][jss::accounts][0u] = alice.human(); - jvParams[fieldName][jss::accounts][1u] = gw.human(); - jvParams[fieldName][jss::currency] = "USDollars"; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedCurrency", ""); - } - } - } - - void - testLedgerEntryTicket() - { - testcase("ledger_entry Request Ticket"); - using namespace test::jtx; - Env env{*this}; - env.close(); - - // Create two tickets. - std::uint32_t const tkt1{env.seq(env.master) + 1}; - env(ticket::create(env.master, 2)); - env.close(); - - std::string const ledgerHash{to_string(env.closed()->info().hash)}; - // Request four tickets: one before the first one we created, the - // two created tickets, and the ticket that would come after the - // last created ticket. - { - // Not a valid ticket requested by index. - Json::Value jvParams; - jvParams[jss::ticket] = - to_string(getTicketIndex(env.master, tkt1 - 1)); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "entryNotFound", ""); - } - { - // First real ticket requested by index. - Json::Value jvParams; - jvParams[jss::ticket] = to_string(getTicketIndex(env.master, tkt1)); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - BEAST_EXPECT( - jrr[jss::node][sfLedgerEntryType.jsonName] == jss::Ticket); - BEAST_EXPECT(jrr[jss::node][sfTicketSequence.jsonName] == tkt1); - } - { - // Second real ticket requested by account and sequence. - Json::Value jvParams; - jvParams[jss::ticket] = Json::objectValue; - jvParams[jss::ticket][jss::account] = env.master.human(); - jvParams[jss::ticket][jss::ticket_seq] = tkt1 + 1; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - BEAST_EXPECT( - jrr[jss::node][jss::index] == - to_string(getTicketIndex(env.master, tkt1 + 1))); - } - { - // Not a valid ticket requested by account and sequence. - Json::Value jvParams; - jvParams[jss::ticket] = Json::objectValue; - jvParams[jss::ticket][jss::account] = env.master.human(); - jvParams[jss::ticket][jss::ticket_seq] = tkt1 + 2; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "entryNotFound", ""); - } - { - // Request a ticket using an account root entry. - Json::Value jvParams; - jvParams[jss::ticket] = to_string(keylet::account(env.master).key); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "unexpectedLedgerType", ""); - } - { - // Malformed account entry. - Json::Value jvParams; - jvParams[jss::ticket] = Json::objectValue; - - std::string const badAddress = makeBadAddress(env.master.human()); - jvParams[jss::ticket][jss::account] = badAddress; - jvParams[jss::ticket][jss::ticket_seq] = env.seq(env.master) - 1; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedAddress", ""); - } - { - // Malformed ticket object. Missing account member. - Json::Value jvParams; - jvParams[jss::ticket] = Json::objectValue; - jvParams[jss::ticket][jss::ticket_seq] = env.seq(env.master) - 1; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Malformed ticket object. Missing seq member. - Json::Value jvParams; - jvParams[jss::ticket] = Json::objectValue; - jvParams[jss::ticket][jss::account] = env.master.human(); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Malformed ticket object. Non-integral seq member. - Json::Value jvParams; - jvParams[jss::ticket] = Json::objectValue; - jvParams[jss::ticket][jss::account] = env.master.human(); - jvParams[jss::ticket][jss::ticket_seq] = - std::to_string(env.seq(env.master) - 1); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - } - - void - testLedgerEntryDID() - { - testcase("ledger_entry Request DID"); - using namespace test::jtx; - using namespace std::literals::chrono_literals; - Env env{*this}; - Account const alice{"alice"}; - - env.fund(XRP(10000), alice); - env.close(); - - // Lambda to create a DID. - auto didCreate = [](test::jtx::Account const& account) { - Json::Value jv; - jv[jss::TransactionType] = jss::DIDSet; - jv[jss::Account] = account.human(); - jv[sfDIDDocument.jsonName] = strHex(std::string{"data"}); - jv[sfURI.jsonName] = strHex(std::string{"uri"}); - return jv; - }; - - env(didCreate(alice)); - env.close(); - - std::string const ledgerHash{to_string(env.closed()->info().hash)}; - - { - // Request the DID using its index. - Json::Value jvParams; - jvParams[jss::did] = alice.human(); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - BEAST_EXPECT( - jrr[jss::node][sfDIDDocument.jsonName] == - strHex(std::string{"data"})); - BEAST_EXPECT( - jrr[jss::node][sfURI.jsonName] == strHex(std::string{"uri"})); - } - { - // Request an index that is not a DID. - Json::Value jvParams; - jvParams[jss::did] = env.master.human(); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "entryNotFound", ""); - } - } - - void - testLedgerEntryInvalidParams(unsigned int apiVersion) - { - testcase( - "ledger_entry Request With Invalid Parameters v" + - std::to_string(apiVersion)); - using namespace test::jtx; - Env env{*this}; - - std::string const ledgerHash{to_string(env.closed()->info().hash)}; - - auto makeParams = [&apiVersion](std::function f) { - Json::Value params; - params[jss::api_version] = apiVersion; - f(params); - return params; - }; - // "features" is not an option supported by ledger_entry. - { - auto const jvParams = - makeParams([&ledgerHash](Json::Value& jvParams) { - jvParams[jss::features] = ledgerHash; - jvParams[jss::ledger_hash] = ledgerHash; - }); - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - if (apiVersion < 2u) - checkErrorValue(jrr, "unknownOption", ""); - else - checkErrorValue(jrr, "invalidParams", ""); - } - Json::Value const injectObject = []() { - Json::Value obj(Json::objectValue); - obj[jss::account] = "rhigTLJJyXXSRUyRCQtqi1NoAZZzZnS4KU"; - obj[jss::ledger_index] = "validated"; - return obj; - }(); - Json::Value const injectArray = []() { - Json::Value arr(Json::arrayValue); - arr[0u] = "rhigTLJJyXXSRUyRCQtqi1NoAZZzZnS4KU"; - arr[1u] = "validated"; - return arr; - }(); - - // invalid input for fields that can handle an object, but can't handle - // an array - for (auto const& field : - {jss::directory, jss::escrow, jss::offer, jss::ticket, jss::amm}) - { - auto const jvParams = - makeParams([&field, &injectArray](Json::Value& jvParams) { - jvParams[field] = injectArray; - }); - - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - if (apiVersion < 2u) - checkErrorValue(jrr, "internal", "Internal error."); - else - checkErrorValue(jrr, "invalidParams", ""); - } - // Fields that can handle objects just fine - for (auto const& field : - {jss::directory, jss::escrow, jss::offer, jss::ticket, jss::amm}) - { - auto const jvParams = - makeParams([&field, &injectObject](Json::Value& jvParams) { - jvParams[field] = injectObject; - }); - - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - checkErrorValue(jrr, "malformedRequest", ""); - } - - for (auto const& inject : {injectObject, injectArray}) - { - // invalid input for fields that can't handle an object or an array - for (auto const& field : - {jss::index, - jss::account_root, - jss::check, - jss::payment_channel}) - { - auto const jvParams = - makeParams([&field, &inject](Json::Value& jvParams) { - jvParams[field] = inject; - }); - - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - if (apiVersion < 2u) - checkErrorValue(jrr, "internal", "Internal error."); - else - checkErrorValue(jrr, "invalidParams", ""); - } - // directory sub-fields - for (auto const& field : {jss::dir_root, jss::owner}) - { - auto const jvParams = - makeParams([&field, &inject](Json::Value& jvParams) { - jvParams[jss::directory][field] = inject; - }); - - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - if (apiVersion < 2u) - checkErrorValue(jrr, "internal", "Internal error."); - else - checkErrorValue(jrr, "invalidParams", ""); - } - // escrow sub-fields - { - auto const jvParams = - makeParams([&inject](Json::Value& jvParams) { - jvParams[jss::escrow][jss::owner] = inject; - jvParams[jss::escrow][jss::seq] = 99; - }); - - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - if (apiVersion < 2u) - checkErrorValue(jrr, "internal", "Internal error."); - else - checkErrorValue(jrr, "invalidParams", ""); - } - // offer sub-fields - { - auto const jvParams = - makeParams([&inject](Json::Value& jvParams) { - jvParams[jss::offer][jss::account] = inject; - jvParams[jss::offer][jss::seq] = 99; - }); - - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - if (apiVersion < 2u) - checkErrorValue(jrr, "internal", "Internal error."); - else - checkErrorValue(jrr, "invalidParams", ""); - } - // ripple_state sub-fields - { - auto const jvParams = - makeParams([&inject](Json::Value& jvParams) { - Json::Value rs(Json::objectValue); - rs[jss::currency] = "FOO"; - rs[jss::accounts] = Json::Value(Json::arrayValue); - rs[jss::accounts][0u] = - "rhigTLJJyXXSRUyRCQtqi1NoAZZzZnS4KU"; - rs[jss::accounts][1u] = - "rKssEq6pg1KbqEqAFnua5mFAL6Ggpsh2wv"; - rs[jss::currency] = inject; - jvParams[jss::ripple_state] = std::move(rs); - }); - - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - if (apiVersion < 2u) - checkErrorValue(jrr, "internal", "Internal error."); - else - checkErrorValue(jrr, "invalidParams", ""); - } - // ticket sub-fields - { - auto const jvParams = - makeParams([&inject](Json::Value& jvParams) { - jvParams[jss::ticket][jss::account] = inject; - jvParams[jss::ticket][jss::ticket_seq] = 99; - }); - - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - if (apiVersion < 2u) - checkErrorValue(jrr, "internal", "Internal error."); - else - checkErrorValue(jrr, "invalidParams", ""); - } - - // Fields that can handle malformed inputs just fine - for (auto const& field : {jss::nft_page, jss::deposit_preauth}) - { - auto const jvParams = - makeParams([&field, &inject](Json::Value& jvParams) { - jvParams[field] = inject; - }); - - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - checkErrorValue(jrr, "malformedRequest", ""); - } - // Subfields of deposit_preauth that can handle malformed inputs - // fine - for (auto const& field : {jss::owner, jss::authorized}) - { - auto const jvParams = - makeParams([&field, &inject](Json::Value& jvParams) { - auto pa = Json::Value(Json::objectValue); - pa[jss::owner] = "rhigTLJJyXXSRUyRCQtqi1NoAZZzZnS4KU"; - pa[jss::authorized] = - "rKssEq6pg1KbqEqAFnua5mFAL6Ggpsh2wv"; - pa[field] = inject; - jvParams[jss::deposit_preauth] = std::move(pa); - }); - - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - checkErrorValue(jrr, "malformedRequest", ""); - } - } - } - /// @brief ledger RPC requests as a way to drive /// input options to lookupLedger. The point of this test is /// coverage for lookupLedger, not so much the ledger @@ -2926,321 +741,6 @@ class LedgerRPC_test : public beast::unit_test::suite } } - void - testInvalidOracleLedgerEntry() - { - testcase("Invalid Oracle Ledger Entry"); - using namespace ripple::test::jtx; - using namespace ripple::test::jtx::oracle; - - Env env(*this); - Account const owner("owner"); - env.fund(XRP(1'000), owner); - Oracle oracle( - env, - {.owner = owner, - .fee = static_cast(env.current()->fees().base.drops())}); - - // Malformed document id - auto res = Oracle::ledgerEntry(env, owner, NoneTag); - BEAST_EXPECT(res[jss::error].asString() == "invalidParams"); - std::vector invalid = {-1, 1.2, "", "Invalid"}; - for (auto const& v : invalid) - { - auto const res = Oracle::ledgerEntry(env, owner, v); - BEAST_EXPECT(res[jss::error].asString() == "malformedDocumentID"); - } - // Missing document id - res = Oracle::ledgerEntry(env, owner, std::nullopt); - BEAST_EXPECT(res[jss::error].asString() == "malformedRequest"); - - // Missing account - res = Oracle::ledgerEntry(env, std::nullopt, 1); - BEAST_EXPECT(res[jss::error].asString() == "malformedRequest"); - - // Malformed account - std::string malfAccount = to_string(owner.id()); - malfAccount.replace(10, 1, 1, '!'); - res = Oracle::ledgerEntry(env, malfAccount, 1); - BEAST_EXPECT(res[jss::error].asString() == "malformedAddress"); - } - - void - testOracleLedgerEntry() - { - testcase("Oracle Ledger Entry"); - using namespace ripple::test::jtx; - using namespace ripple::test::jtx::oracle; - - Env env(*this); - auto const baseFee = - static_cast(env.current()->fees().base.drops()); - std::vector accounts; - std::vector oracles; - for (int i = 0; i < 10; ++i) - { - Account const owner(std::string("owner") + std::to_string(i)); - env.fund(XRP(1'000), owner); - // different accounts can have the same asset pair - Oracle oracle( - env, {.owner = owner, .documentID = i, .fee = baseFee}); - accounts.push_back(owner.id()); - oracles.push_back(oracle.documentID()); - // same account can have different asset pair - Oracle oracle1( - env, {.owner = owner, .documentID = i + 10, .fee = baseFee}); - accounts.push_back(owner.id()); - oracles.push_back(oracle1.documentID()); - } - for (int i = 0; i < accounts.size(); ++i) - { - auto const jv = [&]() { - // document id is uint32 - if (i % 2) - return Oracle::ledgerEntry(env, accounts[i], oracles[i]); - // document id is string - return Oracle::ledgerEntry( - env, accounts[i], std::to_string(oracles[i])); - }(); - try - { - BEAST_EXPECT( - jv[jss::node][jss::Owner] == to_string(accounts[i])); - } - catch (...) - { - fail(); - } - } - } - - void - testLedgerEntryMPT() - { - testcase("ledger_entry Request MPT"); - using namespace test::jtx; - using namespace std::literals::chrono_literals; - Env env{*this}; - Account const alice{"alice"}; - Account const bob("bob"); - - MPTTester mptAlice(env, alice, {.holders = {bob}}); - mptAlice.create( - {.transferFee = 10, - .metadata = "123", - .ownerCount = 1, - .flags = tfMPTCanLock | tfMPTRequireAuth | tfMPTCanEscrow | - tfMPTCanTrade | tfMPTCanTransfer | tfMPTCanClawback}); - mptAlice.authorize({.account = bob, .holderCount = 1}); - - std::string const ledgerHash{to_string(env.closed()->info().hash)}; - - std::string const badMptID = - "00000193B9DDCAF401B5B3B26875986043F82CD0D13B4315"; - { - // Request the MPTIssuance using its MPTIssuanceID. - Json::Value jvParams; - jvParams[jss::mpt_issuance] = strHex(mptAlice.issuanceID()); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - BEAST_EXPECT( - jrr[jss::node][sfMPTokenMetadata.jsonName] == - strHex(std::string{"123"})); - BEAST_EXPECT( - jrr[jss::node][jss::mpt_issuance_id] == - strHex(mptAlice.issuanceID())); - } - { - // Request an index that is not a MPTIssuance. - Json::Value jvParams; - jvParams[jss::mpt_issuance] = badMptID; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "entryNotFound", ""); - } - { - // Request the MPToken using its owner + mptIssuanceID. - Json::Value jvParams; - jvParams[jss::mptoken] = Json::objectValue; - jvParams[jss::mptoken][jss::account] = bob.human(); - jvParams[jss::mptoken][jss::mpt_issuance_id] = - strHex(mptAlice.issuanceID()); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - BEAST_EXPECT( - jrr[jss::node][sfMPTokenIssuanceID.jsonName] == - strHex(mptAlice.issuanceID())); - } - { - // Request the MPToken using a bad mptIssuanceID. - Json::Value jvParams; - jvParams[jss::mptoken] = Json::objectValue; - jvParams[jss::mptoken][jss::account] = bob.human(); - jvParams[jss::mptoken][jss::mpt_issuance_id] = badMptID; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "entryNotFound", ""); - } - } - - void - testLedgerEntryCLI() - { - testcase("ledger_entry command-line"); - using namespace test::jtx; - - Env env{*this}; - Account const alice{"alice"}; - env.fund(XRP(10000), alice); - env.close(); - - auto const checkId = keylet::check(env.master, env.seq(env.master)); - - env(check::create(env.master, alice, XRP(100))); - env.close(); - - std::string const ledgerHash{to_string(env.closed()->info().hash)}; - { - // Request a check. - Json::Value const jrr = - env.rpc("ledger_entry", to_string(checkId.key))[jss::result]; - BEAST_EXPECT( - jrr[jss::node][sfLedgerEntryType.jsonName] == jss::Check); - BEAST_EXPECT(jrr[jss::node][sfSendMax.jsonName] == "100000000"); - } - } - - void - testLedgerEntryPermissionedDomain() - { - testcase("ledger_entry PermissionedDomain"); - - using namespace test::jtx; - - Env env(*this, supported_amendments() | featurePermissionedDomains); - Account const issuer{"issuer"}; - Account const alice{"alice"}; - Account const bob{"bob"}; - - env.fund(XRP(5000), issuer, alice, bob); - env.close(); - - auto const seq = env.seq(alice); - env(pdomain::setTx(alice, {{alice, "first credential"}})); - env.close(); - auto const objects = pdomain::getObjects(alice, env); - if (!BEAST_EXPECT(objects.size() == 1)) - return; - - { - // Succeed - Json::Value params; - params[jss::ledger_index] = jss::validated; - params[jss::permissioned_domain][jss::account] = alice.human(); - params[jss::permissioned_domain][jss::seq] = seq; - auto jv = env.rpc("json", "ledger_entry", to_string(params)); - BEAST_EXPECT( - jv.isObject() && jv.isMember(jss::result) && - !jv[jss::result].isMember(jss::error) && - jv[jss::result].isMember(jss::node) && - jv[jss::result][jss::node].isMember( - sfLedgerEntryType.jsonName) && - jv[jss::result][jss::node][sfLedgerEntryType.jsonName] == - jss::PermissionedDomain); - - std::string const pdIdx = jv[jss::result][jss::index].asString(); - BEAST_EXPECT( - strHex(keylet::permissionedDomain(alice, seq).key) == pdIdx); - - params.clear(); - params[jss::ledger_index] = jss::validated; - params[jss::permissioned_domain] = pdIdx; - jv = env.rpc("json", "ledger_entry", to_string(params)); - BEAST_EXPECT( - jv.isObject() && jv.isMember(jss::result) && - !jv[jss::result].isMember(jss::error) && - jv[jss::result].isMember(jss::node) && - jv[jss::result][jss::node].isMember( - sfLedgerEntryType.jsonName) && - jv[jss::result][jss::node][sfLedgerEntryType.jsonName] == - jss::PermissionedDomain); - } - - { - // Fail, invalid permissioned domain index - Json::Value params; - params[jss::ledger_index] = jss::validated; - params[jss::permissioned_domain] = - "12F1F1F1F180D67377B2FAB292A31C922470326268D2B9B74CD1E582645B9A" - "DE"; - auto const jrr = env.rpc("json", "ledger_entry", to_string(params)); - checkErrorValue(jrr[jss::result], "entryNotFound", ""); - } - - { - // Fail, invalid permissioned domain index - Json::Value params; - params[jss::ledger_index] = jss::validated; - params[jss::permissioned_domain] = "NotAHexString"; - auto const jrr = env.rpc("json", "ledger_entry", to_string(params)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - - { - // Fail, permissioned domain is not an object - Json::Value params; - params[jss::ledger_index] = jss::validated; - params[jss::permissioned_domain] = 10; - auto const jrr = env.rpc("json", "ledger_entry", to_string(params)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - - { - // Fail, invalid account - Json::Value params; - params[jss::ledger_index] = jss::validated; - params[jss::permissioned_domain][jss::account] = 1; - params[jss::permissioned_domain][jss::seq] = seq; - auto const jrr = env.rpc("json", "ledger_entry", to_string(params)); - checkErrorValue(jrr[jss::result], "malformedAddress", ""); - } - - { - // Fail, account is an object - Json::Value params; - params[jss::ledger_index] = jss::validated; - params[jss::permissioned_domain][jss::account] = - Json::Value{Json::ValueType::objectValue}; - params[jss::permissioned_domain][jss::seq] = seq; - auto const jrr = env.rpc("json", "ledger_entry", to_string(params)); - checkErrorValue(jrr[jss::result], "malformedAddress", ""); - } - - { - // Fail, no account - Json::Value params; - params[jss::ledger_index] = jss::validated; - params[jss::permissioned_domain][jss::account] = ""; - params[jss::permissioned_domain][jss::seq] = seq; - auto const jrr = env.rpc("json", "ledger_entry", to_string(params)); - checkErrorValue(jrr[jss::result], "malformedAddress", ""); - } - - { - // Fail, invalid sequence - Json::Value params; - params[jss::ledger_index] = jss::validated; - params[jss::permissioned_domain][jss::account] = alice.human(); - params[jss::permissioned_domain][jss::seq] = "12g"; - auto const jrr = env.rpc("json", "ledger_entry", to_string(params)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - } - public: void run() override @@ -3248,38 +748,17 @@ public: testLedgerRequest(); testBadInput(); testLedgerCurrent(); - testMissingLedgerEntryLedgerHash(); testLedgerFull(); testLedgerFullNonAdmin(); testLedgerAccounts(); - testLedgerEntryAccountRoot(); - testLedgerEntryCheck(); - testLedgerEntryCredentials(); - testLedgerEntryDepositPreauth(); - testLedgerEntryDepositPreauthCred(); - testLedgerEntryDirectory(); - testLedgerEntryEscrow(); - testLedgerEntryOffer(); - testLedgerEntryPayChan(); - testLedgerEntryRippleState(); - testLedgerEntryTicket(); testLookupLedger(); testNoQueue(); testQueue(); testLedgerAccountsOption(); - testLedgerEntryDID(); - testInvalidOracleLedgerEntry(); - testOracleLedgerEntry(); - testLedgerEntryMPT(); - testLedgerEntryCLI(); - testLedgerEntryPermissionedDomain(); - - forAllApiVersions(std::bind_front( - &LedgerRPC_test::testLedgerEntryInvalidParams, this)); } }; BEAST_DEFINE_TESTSUITE(LedgerRPC, app, ripple); -BEAST_DEFINE_TESTSUITE(LedgerRPC_XChain, app, ripple); +} // namespace test } // namespace ripple diff --git a/src/xrpld/rpc/handlers/LedgerEntry.cpp b/src/xrpld/rpc/handlers/LedgerEntry.cpp index 7da99b3a76..1d15825786 100644 --- a/src/xrpld/rpc/handlers/LedgerEntry.cpp +++ b/src/xrpld/rpc/handlers/LedgerEntry.cpp @@ -37,6 +37,168 @@ namespace ripple { +static std::optional +parseIndex(Json::Value const& params, Json::Value& jvResult) +{ + uint256 uNodeIndex; + if (!uNodeIndex.parseHex(params.asString())) + { + jvResult[jss::error] = "malformedRequest"; + return std::nullopt; + } + + return uNodeIndex; +} + +static std::optional +parseAccountRoot(Json::Value const& params, Json::Value& jvResult) +{ + auto const account = parseBase58(params.asString()); + if (!account || account->isZero()) + { + jvResult[jss::error] = "malformedAddress"; + return std::nullopt; + } + + return keylet::account(*account).key; +} + +static std::optional +parseAMM(Json::Value const& params, Json::Value& jvResult) +{ + if (!params.isObject()) + { + uint256 uNodeIndex; + if (!uNodeIndex.parseHex(params.asString())) + { + jvResult[jss::error] = "malformedRequest"; + return std::nullopt; + } + return uNodeIndex; + } + + if (!params.isMember(jss::asset) || !params.isMember(jss::asset2)) + { + jvResult[jss::error] = "malformedRequest"; + return std::nullopt; + } + + try + { + auto const issue = issueFromJson(params[jss::asset]); + auto const issue2 = issueFromJson(params[jss::asset2]); + return keylet::amm(issue, issue2).key; + } + catch (std::runtime_error const&) + { + jvResult[jss::error] = "malformedRequest"; + return std::nullopt; + } +} + +static std::optional +parseBridge(Json::Value const& params, Json::Value& jvResult) +{ + // return the keylet for the specified bridge or nullopt if the + // request is malformed + auto const maybeKeylet = [&]() -> std::optional { + try + { + if (!params.isMember(jss::bridge_account)) + return std::nullopt; + + auto const& jsBridgeAccount = params[jss::bridge_account]; + if (!jsBridgeAccount.isString()) + { + return std::nullopt; + } + + auto const account = + parseBase58(jsBridgeAccount.asString()); + if (!account || account->isZero()) + { + return std::nullopt; + } + + // This may throw and is the reason for the `try` block. The + // try block has a larger scope so the `bridge` variable + // doesn't need to be an optional. + STXChainBridge const bridge(params[jss::bridge]); + STXChainBridge::ChainType const chainType = + STXChainBridge::srcChain(account == bridge.lockingChainDoor()); + + if (account != bridge.door(chainType)) + return std::nullopt; + + return keylet::bridge(bridge, chainType); + } + catch (...) + { + return std::nullopt; + } + }(); + + if (maybeKeylet) + { + return maybeKeylet->key; + } + + jvResult[jss::error] = "malformedRequest"; + return std::nullopt; +} + +static std::optional +parseCheck(Json::Value const& params, Json::Value& jvResult) +{ + uint256 uNodeIndex; + if (!uNodeIndex.parseHex(params.asString())) + { + jvResult[jss::error] = "malformedRequest"; + return std::nullopt; + } + + return uNodeIndex; +} + +static std::optional +parseCredential(Json::Value const& cred, Json::Value& jvResult) +{ + if (cred.isString()) + { + uint256 uNodeIndex; + if (!uNodeIndex.parseHex(cred.asString())) + { + jvResult[jss::error] = "malformedRequest"; + return std::nullopt; + } + return uNodeIndex; + } + + if ((!cred.isMember(jss::subject) || !cred[jss::subject].isString()) || + (!cred.isMember(jss::issuer) || !cred[jss::issuer].isString()) || + (!cred.isMember(jss::credential_type) || + !cred[jss::credential_type].isString())) + { + jvResult[jss::error] = "malformedRequest"; + return std::nullopt; + } + + auto const subject = parseBase58(cred[jss::subject].asString()); + auto const issuer = parseBase58(cred[jss::issuer].asString()); + auto const credType = strUnHex(cred[jss::credential_type].asString()); + + if (!subject || subject->isZero() || !issuer || issuer->isZero() || + !credType || credType->empty()) + { + jvResult[jss::error] = "malformedRequest"; + return std::nullopt; + } + + return keylet::credential( + *subject, *issuer, Slice(credType->data(), credType->size())) + .key; +} + static STArray parseAuthorizeCredentials(Json::Value const& jv) { @@ -68,45 +230,6 @@ parseAuthorizeCredentials(Json::Value const& jv) return arr; } -static std::optional -parseIndex(Json::Value const& params, Json::Value& jvResult) -{ - uint256 uNodeIndex; - if (!uNodeIndex.parseHex(params.asString())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - - return uNodeIndex; -} - -static std::optional -parseAccountRoot(Json::Value const& params, Json::Value& jvResult) -{ - auto const account = parseBase58(params.asString()); - if (!account || account->isZero()) - { - jvResult[jss::error] = "malformedAddress"; - return std::nullopt; - } - - return keylet::account(*account).key; -} - -static std::optional -parseCheck(Json::Value const& params, Json::Value& jvResult) -{ - uint256 uNodeIndex; - if (!uNodeIndex.parseHex(params.asString())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - - return uNodeIndex; -} - static std::optional parseDepositPreauth(Json::Value const& dp, Json::Value& jvResult) { @@ -172,6 +295,19 @@ parseDepositPreauth(Json::Value const& dp, Json::Value& jvResult) return keylet::depositPreauth(*owner, sorted).key; } +static std::optional +parseDID(Json::Value const& params, Json::Value& jvResult) +{ + auto const account = parseBase58(params.asString()); + if (!account || account->isZero()) + { + jvResult[jss::error] = "malformedAddress"; + return std::nullopt; + } + + return keylet::did(*account).key; +} + static std::optional parseDirectory(Json::Value const& params, Json::Value& jvResult) { @@ -271,6 +407,92 @@ parseEscrow(Json::Value const& params, Json::Value& jvResult) return keylet::escrow(*id, params[jss::seq].asUInt()).key; } +static std::optional +parseMPToken(Json::Value const& mptJson, Json::Value& jvResult) +{ + if (!mptJson.isObject()) + { + uint256 uNodeIndex; + if (!uNodeIndex.parseHex(mptJson.asString())) + { + jvResult[jss::error] = "malformedRequest"; + return std::nullopt; + } + return uNodeIndex; + } + + if (!mptJson.isMember(jss::mpt_issuance_id) || + !mptJson.isMember(jss::account)) + { + jvResult[jss::error] = "malformedRequest"; + return std::nullopt; + } + + try + { + auto const mptIssuanceIdStr = mptJson[jss::mpt_issuance_id].asString(); + + uint192 mptIssuanceID; + if (!mptIssuanceID.parseHex(mptIssuanceIdStr)) + Throw("Cannot parse mpt_issuance_id"); + + auto const account = + parseBase58(mptJson[jss::account].asString()); + + if (!account || account->isZero()) + { + jvResult[jss::error] = "malformedAddress"; + return std::nullopt; + } + + return keylet::mptoken(mptIssuanceID, *account).key; + } + catch (std::runtime_error const&) + { + jvResult[jss::error] = "malformedRequest"; + return std::nullopt; + } +} + +static std::optional +parseMPTokenIssuance( + Json::Value const& unparsedMPTIssuanceID, + Json::Value& jvResult) +{ + if (unparsedMPTIssuanceID.isString()) + { + uint192 mptIssuanceID; + if (!mptIssuanceID.parseHex(unparsedMPTIssuanceID.asString())) + { + jvResult[jss::error] = "malformedRequest"; + return std::nullopt; + } + + return keylet::mptIssuance(mptIssuanceID).key; + } + + jvResult[jss::error] = "malformedRequest"; + return std::nullopt; +} + +static std::optional +parseNFTokenPage(Json::Value const& params, Json::Value& jvResult) +{ + if (params.isString()) + { + uint256 uNodeIndex; + if (!uNodeIndex.parseHex(params.asString())) + { + jvResult[jss::error] = "malformedRequest"; + return std::nullopt; + } + return uNodeIndex; + } + + jvResult[jss::error] = "malformedRequest"; + return std::nullopt; +} + static std::optional parseOffer(Json::Value const& params, Json::Value& jvResult) { @@ -302,6 +524,60 @@ parseOffer(Json::Value const& params, Json::Value& jvResult) return keylet::offer(*id, params[jss::seq].asUInt()).key; } +static std::optional +parseOracle(Json::Value const& params, Json::Value& jvResult) +{ + if (!params.isObject()) + { + uint256 uNodeIndex; + if (!uNodeIndex.parseHex(params.asString())) + { + jvResult[jss::error] = "malformedRequest"; + return std::nullopt; + } + return uNodeIndex; + } + + if (!params.isMember(jss::oracle_document_id) || + !params.isMember(jss::account)) + { + jvResult[jss::error] = "malformedRequest"; + return std::nullopt; + } + + auto const& oracle = params; + auto const documentID = [&]() -> std::optional { + auto const id = oracle[jss::oracle_document_id]; + if (id.isUInt() || (id.isInt() && id.asInt() >= 0)) + return std::make_optional(id.asUInt()); + + if (id.isString()) + { + std::uint32_t v; + if (beast::lexicalCastChecked(v, id.asString())) + return std::make_optional(v); + } + + return std::nullopt; + }(); + + auto const account = + parseBase58(oracle[jss::account].asString()); + if (!account || account->isZero()) + { + jvResult[jss::error] = "malformedAddress"; + return std::nullopt; + } + + if (!documentID) + { + jvResult[jss::error] = "malformedDocumentID"; + return std::nullopt; + } + + return keylet::oracle(*account, *documentID).key; +} + static std::optional parsePaymentChannel(Json::Value const& params, Json::Value& jvResult) { @@ -315,6 +591,51 @@ parsePaymentChannel(Json::Value const& params, Json::Value& jvResult) return uNodeIndex; } +static std::optional +parsePermissionedDomains(Json::Value const& pd, Json::Value& jvResult) +{ + if (pd.isString()) + { + auto const index = parseIndex(pd, jvResult); + return index; + } + + if (!pd.isObject()) + { + jvResult[jss::error] = "malformedRequest"; + return std::nullopt; + } + + if (!pd.isMember(jss::account)) + { + jvResult[jss::error] = "malformedRequest"; + return std::nullopt; + } + + if (!pd[jss::account].isString()) + { + jvResult[jss::error] = "malformedAddress"; + return std::nullopt; + } + + if (!pd.isMember(jss::seq) || + (pd[jss::seq].isInt() && pd[jss::seq].asInt() < 0) || + (!pd[jss::seq].isInt() && !pd[jss::seq].isUInt())) + { + jvResult[jss::error] = "malformedRequest"; + return std::nullopt; + } + + auto const account = parseBase58(pd[jss::account].asString()); + if (!account) + { + jvResult[jss::error] = "malformedAddress"; + return std::nullopt; + } + + return keylet::permissionedDomain(*account, pd[jss::seq].asUInt()).key; +} + static std::optional parseRippleState(Json::Value const& jvRippleState, Json::Value& jvResult) { @@ -383,108 +704,6 @@ parseTicket(Json::Value const& params, Json::Value& jvResult) return getTicketIndex(*id, params[jss::ticket_seq].asUInt()); } -static std::optional -parseNFTokenPage(Json::Value const& params, Json::Value& jvResult) -{ - if (params.isString()) - { - uint256 uNodeIndex; - if (!uNodeIndex.parseHex(params.asString())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - return uNodeIndex; - } - - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; -} - -static std::optional -parseAMM(Json::Value const& params, Json::Value& jvResult) -{ - if (!params.isObject()) - { - uint256 uNodeIndex; - if (!uNodeIndex.parseHex(params.asString())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - return uNodeIndex; - } - - if (!params.isMember(jss::asset) || !params.isMember(jss::asset2)) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - - try - { - auto const issue = issueFromJson(params[jss::asset]); - auto const issue2 = issueFromJson(params[jss::asset2]); - return keylet::amm(issue, issue2).key; - } - catch (std::runtime_error const&) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } -} - -static std::optional -parseBridge(Json::Value const& params, Json::Value& jvResult) -{ - // return the keylet for the specified bridge or nullopt if the - // request is malformed - auto const maybeKeylet = [&]() -> std::optional { - try - { - if (!params.isMember(jss::bridge_account)) - return std::nullopt; - - auto const& jsBridgeAccount = params[jss::bridge_account]; - if (!jsBridgeAccount.isString()) - { - return std::nullopt; - } - - auto const account = - parseBase58(jsBridgeAccount.asString()); - if (!account || account->isZero()) - { - return std::nullopt; - } - - // This may throw and is the reason for the `try` block. The - // try block has a larger scope so the `bridge` variable - // doesn't need to be an optional. - STXChainBridge const bridge(params[jss::bridge]); - STXChainBridge::ChainType const chainType = - STXChainBridge::srcChain(account == bridge.lockingChainDoor()); - - if (account != bridge.door(chainType)) - return std::nullopt; - - return keylet::bridge(bridge, chainType); - } - catch (...) - { - return std::nullopt; - } - }(); - - if (maybeKeylet) - { - return maybeKeylet->key; - } - - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; -} - static std::optional parseXChainOwnedClaimID(Json::Value const& claim_id, Json::Value& jvResult) { @@ -633,225 +852,6 @@ parseXChainOwnedCreateAccountClaimID( return std::nullopt; } -static std::optional -parseDID(Json::Value const& params, Json::Value& jvResult) -{ - auto const account = parseBase58(params.asString()); - if (!account || account->isZero()) - { - jvResult[jss::error] = "malformedAddress"; - return std::nullopt; - } - - return keylet::did(*account).key; -} - -static std::optional -parseOracle(Json::Value const& params, Json::Value& jvResult) -{ - if (!params.isObject()) - { - uint256 uNodeIndex; - if (!uNodeIndex.parseHex(params.asString())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - return uNodeIndex; - } - - if (!params.isMember(jss::oracle_document_id) || - !params.isMember(jss::account)) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - - auto const& oracle = params; - auto const documentID = [&]() -> std::optional { - auto const id = oracle[jss::oracle_document_id]; - if (id.isUInt() || (id.isInt() && id.asInt() >= 0)) - return std::make_optional(id.asUInt()); - - if (id.isString()) - { - std::uint32_t v; - if (beast::lexicalCastChecked(v, id.asString())) - return std::make_optional(v); - } - - return std::nullopt; - }(); - - auto const account = - parseBase58(oracle[jss::account].asString()); - if (!account || account->isZero()) - { - jvResult[jss::error] = "malformedAddress"; - return std::nullopt; - } - - if (!documentID) - { - jvResult[jss::error] = "malformedDocumentID"; - return std::nullopt; - } - - return keylet::oracle(*account, *documentID).key; -} - -static std::optional -parseCredential(Json::Value const& cred, Json::Value& jvResult) -{ - if (cred.isString()) - { - uint256 uNodeIndex; - if (!uNodeIndex.parseHex(cred.asString())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - return uNodeIndex; - } - - if ((!cred.isMember(jss::subject) || !cred[jss::subject].isString()) || - (!cred.isMember(jss::issuer) || !cred[jss::issuer].isString()) || - (!cred.isMember(jss::credential_type) || - !cred[jss::credential_type].isString())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - - auto const subject = parseBase58(cred[jss::subject].asString()); - auto const issuer = parseBase58(cred[jss::issuer].asString()); - auto const credType = strUnHex(cred[jss::credential_type].asString()); - - if (!subject || subject->isZero() || !issuer || issuer->isZero() || - !credType || credType->empty()) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - - return keylet::credential( - *subject, *issuer, Slice(credType->data(), credType->size())) - .key; -} - -static std::optional -parseMPTokenIssuance( - Json::Value const& unparsedMPTIssuanceID, - Json::Value& jvResult) -{ - if (unparsedMPTIssuanceID.isString()) - { - uint192 mptIssuanceID; - if (!mptIssuanceID.parseHex(unparsedMPTIssuanceID.asString())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - - return keylet::mptIssuance(mptIssuanceID).key; - } - - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; -} - -static std::optional -parseMPToken(Json::Value const& mptJson, Json::Value& jvResult) -{ - if (!mptJson.isObject()) - { - uint256 uNodeIndex; - if (!uNodeIndex.parseHex(mptJson.asString())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - return uNodeIndex; - } - - if (!mptJson.isMember(jss::mpt_issuance_id) || - !mptJson.isMember(jss::account)) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - - try - { - auto const mptIssuanceIdStr = mptJson[jss::mpt_issuance_id].asString(); - - uint192 mptIssuanceID; - if (!mptIssuanceID.parseHex(mptIssuanceIdStr)) - Throw("Cannot parse mpt_issuance_id"); - - auto const account = - parseBase58(mptJson[jss::account].asString()); - - if (!account || account->isZero()) - { - jvResult[jss::error] = "malformedAddress"; - return std::nullopt; - } - - return keylet::mptoken(mptIssuanceID, *account).key; - } - catch (std::runtime_error const&) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } -} - -static std::optional -parsePermissionedDomains(Json::Value const& pd, Json::Value& jvResult) -{ - if (pd.isString()) - { - auto const index = parseIndex(pd, jvResult); - return index; - } - - if (!pd.isObject()) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - - if (!pd.isMember(jss::account)) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - - if (!pd[jss::account].isString()) - { - jvResult[jss::error] = "malformedAddress"; - return std::nullopt; - } - - if (!pd.isMember(jss::seq) || - (pd[jss::seq].isInt() && pd[jss::seq].asInt() < 0) || - (!pd[jss::seq].isInt() && !pd[jss::seq].isUInt())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - - auto const account = parseBase58(pd[jss::account].asString()); - if (!account) - { - jvResult[jss::error] = "malformedAddress"; - return std::nullopt; - } - - return keylet::permissionedDomain(*account, pd[jss::seq].asUInt()).key; -} - using FunctionType = std::function(Json::Value const&, Json::Value&)>; From a574ec60232922a96b15089cee979e3296ce7699 Mon Sep 17 00:00:00 2001 From: Denis Angell Date: Thu, 10 Apr 2025 00:08:44 +0200 Subject: [PATCH 008/244] fix: `fixPayChanV1` (#4717) This change introduces a new fix amendment (`fixPayChanV1`) that prevents the creation of new `PaymentChannelCreate` transaction with a `CancelAfter` time less than the current ledger time. It piggy backs off of fix1571. Once the amendment is activated, creating a new `PaymentChannel` will require that if you specify the `CancelAfter` time/value, that value must be greater than or equal to the current ledger time. Currently users can create a payment channel where the `CancelAfter` time is before the current ledger time. This results in the payment channel being immediately closed on the next PaymentChannel transaction. --- include/xrpl/protocol/detail/features.macro | 1 + src/test/app/PayChan_test.cpp | 46 +++++++++++++++++++++ src/xrpld/app/tx/detail/Escrow.cpp | 12 ------ src/xrpld/app/tx/detail/PayChan.cpp | 7 ++++ src/xrpld/ledger/View.h | 9 ++++ src/xrpld/ledger/detail/View.cpp | 6 +++ 6 files changed, 69 insertions(+), 12 deletions(-) diff --git a/include/xrpl/protocol/detail/features.macro b/include/xrpl/protocol/detail/features.macro index f54fb9e974..ac393eae98 100644 --- a/include/xrpl/protocol/detail/features.macro +++ b/include/xrpl/protocol/detail/features.macro @@ -32,6 +32,7 @@ // If you add an amendment here, then do not forget to increment `numFeatures` // in include/xrpl/protocol/Feature.h. +XRPL_FIX (PayChanCancelAfter, Supported::yes, VoteBehavior::DefaultNo) // Check flags in Credential transactions XRPL_FIX (InvalidTxFlags, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (FrozenLPTokenTransfer, Supported::yes, VoteBehavior::DefaultNo) diff --git a/src/test/app/PayChan_test.cpp b/src/test/app/PayChan_test.cpp index 4fd5f0bb26..f2fcf344da 100644 --- a/src/test/app/PayChan_test.cpp +++ b/src/test/app/PayChan_test.cpp @@ -402,6 +402,52 @@ struct PayChan_test : public beast::unit_test::suite BEAST_EXPECT(!channelExists(*env.current(), chan)); BEAST_EXPECT(env.balance(alice) == preAlice + channelFunds); } + // fixPayChanCancelAfter + // CancelAfter should be greater than close time + { + for (bool const withFixPayChan : {true, false}) + { + auto const amend = withFixPayChan + ? features + : features - fixPayChanCancelAfter; + Env env{*this, amend}; + env.fund(XRP(10000), alice, bob); + env.close(); + + auto const pk = alice.pk(); + auto const settleDelay = 100s; + auto const channelFunds = XRP(1000); + NetClock::time_point const cancelAfter = + env.current()->info().parentCloseTime - 1s; + auto const txResult = + withFixPayChan ? ter(tecEXPIRED) : ter(tesSUCCESS); + env(create( + alice, bob, channelFunds, settleDelay, pk, cancelAfter), + txResult); + } + } + // fixPayChanCancelAfter + // CancelAfter can be equal to the close time + { + for (bool const withFixPayChan : {true, false}) + { + auto const amend = withFixPayChan + ? features + : features - fixPayChanCancelAfter; + Env env{*this, amend}; + env.fund(XRP(10000), alice, bob); + env.close(); + + auto const pk = alice.pk(); + auto const settleDelay = 100s; + auto const channelFunds = XRP(1000); + NetClock::time_point const cancelAfter = + env.current()->info().parentCloseTime; + env(create( + alice, bob, channelFunds, settleDelay, pk, cancelAfter), + ter(tesSUCCESS)); + } + } } void diff --git a/src/xrpld/app/tx/detail/Escrow.cpp b/src/xrpld/app/tx/detail/Escrow.cpp index 5cf5e8740b..bc9ad0a11f 100644 --- a/src/xrpld/app/tx/detail/Escrow.cpp +++ b/src/xrpld/app/tx/detail/Escrow.cpp @@ -76,18 +76,6 @@ namespace ripple { //------------------------------------------------------------------------------ -/** Has the specified time passed? - - @param now the current time - @param mark the cutoff point - @return true if \a now refers to a time strictly after \a mark, else false. -*/ -static inline bool -after(NetClock::time_point now, std::uint32_t mark) -{ - return now.time_since_epoch().count() > mark; -} - TxConsequences EscrowCreate::makeTxConsequences(PreflightContext const& ctx) { diff --git a/src/xrpld/app/tx/detail/PayChan.cpp b/src/xrpld/app/tx/detail/PayChan.cpp index fa5d44fb00..25cdd0e69a 100644 --- a/src/xrpld/app/tx/detail/PayChan.cpp +++ b/src/xrpld/app/tx/detail/PayChan.cpp @@ -252,6 +252,13 @@ PayChanCreate::doApply() if (!sle) return tefINTERNAL; + if (ctx_.view().rules().enabled(fixPayChanCancelAfter)) + { + auto const closeTime = ctx_.view().info().parentCloseTime; + if (ctx_.tx[~sfCancelAfter] && after(closeTime, ctx_.tx[sfCancelAfter])) + return tecEXPIRED; + } + auto const dst = ctx_.tx[sfDestination]; // Create PayChan in ledger. diff --git a/src/xrpld/ledger/View.h b/src/xrpld/ledger/View.h index 85aa02f1b4..bb04fa8b87 100644 --- a/src/xrpld/ledger/View.h +++ b/src/xrpld/ledger/View.h @@ -592,6 +592,15 @@ deleteAMMTrustLine( std::optional const& ammAccountID, beast::Journal j); +/** Has the specified time passed? + + @param now the current time + @param mark the cutoff point + @return true if \a now refers to a time strictly after \a mark, else false. +*/ +bool +after(NetClock::time_point now, std::uint32_t mark); + } // namespace ripple #endif diff --git a/src/xrpld/ledger/detail/View.cpp b/src/xrpld/ledger/detail/View.cpp index 1860f61445..2a5224ebf1 100644 --- a/src/xrpld/ledger/detail/View.cpp +++ b/src/xrpld/ledger/detail/View.cpp @@ -2123,4 +2123,10 @@ rippleCredit( saAmount.asset().value()); } +bool +after(NetClock::time_point now, std::uint32_t mark) +{ + return now.time_since_epoch().count() > mark; +} + } // namespace ripple From aafd2d85251d760bb00862945b9c10556181231b Mon Sep 17 00:00:00 2001 From: Wietse Wind Date: Thu, 10 Apr 2025 08:37:24 +0200 Subject: [PATCH 009/244] Fix: admin RPC webhook queue limit removal and timeout reduction (#5163) When using subscribe at admin RPC port to send webhooks for the transaction stream to a backend, on large(r) ledgers the endpoint receives fewer HTTP POSTs with TX information than the amount of transactions in a ledger. This change removes the hardcoded queue length to avoid dropping TX notifications for the admin-only command. In addition, the per-request TTL for outgoing RPC HTTP calls has been reduced from 10 minutes to 30 seconds. --- src/xrpld/net/detail/RPCCall.cpp | 4 ++-- src/xrpld/net/detail/RPCSub.cpp | 9 --------- 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/src/xrpld/net/detail/RPCCall.cpp b/src/xrpld/net/detail/RPCCall.cpp index 814ea362e4..92f48f8812 100644 --- a/src/xrpld/net/detail/RPCCall.cpp +++ b/src/xrpld/net/detail/RPCCall.cpp @@ -1663,7 +1663,7 @@ fromNetwork( constexpr auto RPC_REPLY_MAX_BYTES = megabytes(256); using namespace std::chrono_literals; - auto constexpr RPC_NOTIFY = 10min; + auto constexpr RPC_WEBHOOK_TIMEOUT = 30s; HTTPClient::request( bSSL, @@ -1680,7 +1680,7 @@ fromNetwork( std::placeholders::_2, j), RPC_REPLY_MAX_BYTES, - RPC_NOTIFY, + RPC_WEBHOOK_TIMEOUT, std::bind( &RPCCallImp::onResponse, callbackFuncP, diff --git a/src/xrpld/net/detail/RPCSub.cpp b/src/xrpld/net/detail/RPCSub.cpp index 96f91cdcf5..994292e7b8 100644 --- a/src/xrpld/net/detail/RPCSub.cpp +++ b/src/xrpld/net/detail/RPCSub.cpp @@ -80,13 +80,6 @@ public: { std::lock_guard sl(mLock); - if (mDeque.size() >= eventQueueMax) - { - // Drop the previous event. - JLOG(j_.warn()) << "RPCCall::fromNetwork drop"; - mDeque.pop_back(); - } - auto jm = broadcast ? j_.debug() : j_.info(); JLOG(jm) << "RPCCall::fromNetwork push: " << jvObj; @@ -184,8 +177,6 @@ private: } private: - enum { eventQueueMax = 32 }; - boost::asio::io_service& m_io_service; JobQueue& m_jobQueue; From c4308b216f0cc7b1a33a720b4d38bbc850c4a2f1 Mon Sep 17 00:00:00 2001 From: Denis Angell Date: Thu, 10 Apr 2025 14:38:52 +0200 Subject: [PATCH 010/244] fix: Adds CTID to RPC tx and updates error (#4738) This change fixes a number of issues involved with CTID: * CTID is not present on all RPC tx transactions. * rpcWRONG_NETWORK is missing in the ErrorCodes.cpp --- src/libxrpl/protocol/ErrorCodes.cpp | 1 + src/test/rpc/Transaction_test.cpp | 94 +++++++++++++------ src/xrpld/app/ledger/TransactionMaster.h | 6 +- .../app/ledger/detail/TransactionMaster.cpp | 8 +- src/xrpld/app/misc/NetworkOPs.cpp | 15 +++ src/xrpld/app/misc/Transaction.h | 8 +- src/xrpld/app/misc/detail/AccountTxPaging.cpp | 13 ++- src/xrpld/app/misc/detail/Transaction.cpp | 25 ++++- src/xrpld/app/rdb/backend/detail/Node.cpp | 6 +- src/xrpld/rpc/CTID.h | 20 ++-- src/xrpld/rpc/handlers/Tx.cpp | 16 ++-- 11 files changed, 163 insertions(+), 49 deletions(-) diff --git a/src/libxrpl/protocol/ErrorCodes.cpp b/src/libxrpl/protocol/ErrorCodes.cpp index ca853c690e..93e30f24be 100644 --- a/src/libxrpl/protocol/ErrorCodes.cpp +++ b/src/libxrpl/protocol/ErrorCodes.cpp @@ -96,6 +96,7 @@ constexpr static ErrorInfo unorderedErrorInfos[]{ {rpcNOT_SYNCED, "notSynced", "Not synced to the network.", 503}, {rpcNO_EVENTS, "noEvents", "Current transport does not support events.", 405}, {rpcNO_NETWORK, "noNetwork", "Not synced to the network.", 503}, + {rpcWRONG_NETWORK, "wrongNetwork", "Wrong network.", 503}, {rpcNO_PERMISSION, "noPermission", "You don't have permission for this command.", 401}, {rpcNO_PF_REQUEST, "noPathRequest", "No pathfinding request in progress.", 404}, {rpcOBJECT_NOT_FOUND, "objectNotFound", "The requested object was not found.", 404}, diff --git a/src/test/rpc/Transaction_test.cpp b/src/test/rpc/Transaction_test.cpp index 75604225ba..0a5821499a 100644 --- a/src/test/rpc/Transaction_test.cpp +++ b/src/test/rpc/Transaction_test.cpp @@ -300,7 +300,7 @@ class Transaction_test : public beast::unit_test::suite void testRangeCTIDRequest(FeatureBitset features) { - testcase("ctid_range"); + testcase("CTID Range Request"); using namespace test::jtx; using std::to_string; @@ -548,7 +548,7 @@ class Transaction_test : public beast::unit_test::suite void testCTIDValidation(FeatureBitset features) { - testcase("ctid_validation"); + testcase("CTID Validation"); using namespace test::jtx; using std::to_string; @@ -570,20 +570,10 @@ class Transaction_test : public beast::unit_test::suite BEAST_EXPECT(!RPC::encodeCTID(0x1000'0000UL, 0xFFFFU, 0xFFFFU)); // Test case 3: txn_index greater than 0xFFFF - // this test case is impossible in c++ due to the type, left in for - // completeness - auto const expected3 = std::optional("CFFFFFFF0000FFFF"); - BEAST_EXPECT( - RPC::encodeCTID(0x0FFF'FFFF, (uint16_t)0x10000, 0xFFFF) == - expected3); + BEAST_EXPECT(!RPC::encodeCTID(0x0FFF'FFFF, 0x1'0000, 0xFFFF)); // Test case 4: network_id greater than 0xFFFF - // this test case is impossible in c++ due to the type, left in for - // completeness - auto const expected4 = std::optional("CFFFFFFFFFFF0000"); - BEAST_EXPECT( - RPC::encodeCTID(0x0FFF'FFFFUL, 0xFFFFU, (uint16_t)0x1000'0U) == - expected4); + BEAST_EXPECT(!RPC::encodeCTID(0x0FFF'FFFFUL, 0xFFFFU, 0x1'0000U)); // Test case 5: Valid input values auto const expected51 = @@ -647,14 +637,15 @@ class Transaction_test : public beast::unit_test::suite void testCTIDRPC(FeatureBitset features) { - testcase("ctid_rpc"); + testcase("CTID RPC"); using namespace test::jtx; - // test that the ctid AND the hash are in the response + // Use a Concise Transaction Identifier to request a transaction. + for (uint32_t netID : {11111, 65535, 65536}) { - Env env{*this, makeNetworkConfig(11111)}; - uint32_t netID = env.app().config().NETWORK_ID; + Env env{*this, makeNetworkConfig(netID)}; + BEAST_EXPECT(netID == env.app().config().NETWORK_ID); auto const alice = Account("alice"); auto const bob = Account("bob"); @@ -664,14 +655,22 @@ class Transaction_test : public beast::unit_test::suite env(pay(alice, bob, XRP(10))); env.close(); - auto const ctid = *RPC::encodeCTID(startLegSeq, 0, netID); + auto const ctid = RPC::encodeCTID(startLegSeq, 0, netID); + if (netID > 0xFFFF) + { + // Concise transaction IDs do not support a network ID > 0xFFFF. + BEAST_EXPECT(ctid == std::nullopt); + continue; + } + Json::Value jsonTx; jsonTx[jss::binary] = false; - jsonTx[jss::ctid] = ctid; + jsonTx[jss::ctid] = *ctid; jsonTx[jss::id] = 1; - auto jrr = env.rpc("json", "tx", to_string(jsonTx))[jss::result]; + auto const jrr = + env.rpc("json", "tx", to_string(jsonTx))[jss::result]; BEAST_EXPECT(jrr[jss::ctid] == ctid); - BEAST_EXPECT(jrr[jss::hash]); + BEAST_EXPECT(jrr.isMember(jss::hash)); } // test querying with mixed case ctid @@ -716,8 +715,44 @@ class Transaction_test : public beast::unit_test::suite } // test that if the network is 65535 the ctid is not in the response + // Using a hash to request the transaction, test the network ID + // boundary where the CTID is (not) in the response. + for (uint32_t netID : {2, 1024, 65535, 65536}) { - Env env{*this, makeNetworkConfig(65535)}; + Env env{*this, makeNetworkConfig(netID)}; + BEAST_EXPECT(netID == env.app().config().NETWORK_ID); + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + + env.fund(XRP(10000), alice, bob); + env(pay(alice, bob, XRP(10))); + env.close(); + + auto const ledgerSeq = env.current()->info().seq; + + env(noop(alice), ter(tesSUCCESS)); + env.close(); + + Json::Value params; + params[jss::id] = 1; + auto const hash = env.tx()->getJson(JsonOptions::none)[jss::hash]; + params[jss::transaction] = hash; + auto const jrr = + env.rpc("json", "tx", to_string(params))[jss::result]; + BEAST_EXPECT(jrr[jss::hash] == hash); + + BEAST_EXPECT(jrr.isMember(jss::ctid) == (netID <= 0xFFFF)); + if (jrr.isMember(jss::ctid)) + { + auto const ctid = RPC::encodeCTID(ledgerSeq, 0, netID); + BEAST_EXPECT(jrr[jss::ctid] == *ctid); + } + } + + // test the wrong network ID was submitted + { + Env env{*this, makeNetworkConfig(21337)}; uint32_t netID = env.app().config().NETWORK_ID; auto const alice = Account("alice"); @@ -728,14 +763,19 @@ class Transaction_test : public beast::unit_test::suite env(pay(alice, bob, XRP(10))); env.close(); - auto const ctid = *RPC::encodeCTID(startLegSeq, 0, netID); + auto const ctid = *RPC::encodeCTID(startLegSeq, 0, netID + 1); Json::Value jsonTx; jsonTx[jss::binary] = false; jsonTx[jss::ctid] = ctid; jsonTx[jss::id] = 1; - auto jrr = env.rpc("json", "tx", to_string(jsonTx))[jss::result]; - BEAST_EXPECT(!jrr[jss::ctid]); - BEAST_EXPECT(jrr[jss::hash]); + auto const jrr = + env.rpc("json", "tx", to_string(jsonTx))[jss::result]; + BEAST_EXPECT(jrr[jss::error] == "wrongNetwork"); + BEAST_EXPECT(jrr[jss::error_code] == rpcWRONG_NETWORK); + BEAST_EXPECT( + jrr[jss::error_message] == + "Wrong network. You should submit this request to a node " + "running on NetworkID: 21338"); } } diff --git a/src/xrpld/app/ledger/TransactionMaster.h b/src/xrpld/app/ledger/TransactionMaster.h index ffbbe4ae09..f6993dc0e8 100644 --- a/src/xrpld/app/ledger/TransactionMaster.h +++ b/src/xrpld/app/ledger/TransactionMaster.h @@ -76,7 +76,11 @@ public: // return value: true = we had the transaction already bool - inLedger(uint256 const& hash, std::uint32_t ledger); + inLedger( + uint256 const& hash, + std::uint32_t ledger, + std::optional tseq, + std::optional netID); void canonicalize(std::shared_ptr* pTransaction); diff --git a/src/xrpld/app/ledger/detail/TransactionMaster.cpp b/src/xrpld/app/ledger/detail/TransactionMaster.cpp index ea13ad53e4..1f5ab7e5b0 100644 --- a/src/xrpld/app/ledger/detail/TransactionMaster.cpp +++ b/src/xrpld/app/ledger/detail/TransactionMaster.cpp @@ -39,14 +39,18 @@ TransactionMaster::TransactionMaster(Application& app) } bool -TransactionMaster::inLedger(uint256 const& hash, std::uint32_t ledger) +TransactionMaster::inLedger( + uint256 const& hash, + std::uint32_t ledger, + std::optional tseq, + std::optional netID) { auto txn = mCache.fetch(hash); if (!txn) return false; - txn->setStatus(COMMITTED, ledger); + txn->setStatus(COMMITTED, ledger, tseq, netID); return true; } diff --git a/src/xrpld/app/misc/NetworkOPs.cpp b/src/xrpld/app/misc/NetworkOPs.cpp index b72963aa81..abd6ff7da7 100644 --- a/src/xrpld/app/misc/NetworkOPs.cpp +++ b/src/xrpld/app/misc/NetworkOPs.cpp @@ -47,6 +47,7 @@ #include #include #include +#include #include #include #include @@ -3080,6 +3081,20 @@ NetworkOPsImp::transJson( jvObj[jss::meta], transaction, meta->get()); } + // add CTID where the needed data for it exists + if (auto const& lookup = ledger->txRead(transaction->getTransactionID()); + lookup.second && lookup.second->isFieldPresent(sfTransactionIndex)) + { + uint32_t const txnSeq = lookup.second->getFieldU32(sfTransactionIndex); + uint32_t netID = app_.config().NETWORK_ID; + if (transaction->isFieldPresent(sfNetworkID)) + netID = transaction->getFieldU32(sfNetworkID); + + if (std::optional ctid = + RPC::encodeCTID(ledger->info().seq, txnSeq, netID); + ctid) + jvObj[jss::ctid] = *ctid; + } if (!ledger->open()) jvObj[jss::ledger_hash] = to_string(ledger->info().hash); diff --git a/src/xrpld/app/misc/Transaction.h b/src/xrpld/app/misc/Transaction.h index a2ef496dff..82e5b55bf6 100644 --- a/src/xrpld/app/misc/Transaction.h +++ b/src/xrpld/app/misc/Transaction.h @@ -128,7 +128,11 @@ public: } void - setStatus(TransStatus status, std::uint32_t ledgerSeq); + setStatus( + TransStatus status, + std::uint32_t ledgerSeq, + std::optional transactionSeq = std::nullopt, + std::optional networkID = std::nullopt); void setStatus(TransStatus status) @@ -388,6 +392,8 @@ private: uint256 mTransactionID; LedgerIndex mLedgerIndex = 0; + std::optional mTxnSeq; + std::optional mNetworkID; TransStatus mStatus = INVALID; TER mResult = temUNCERTAIN; bool mApplying = false; diff --git a/src/xrpld/app/misc/detail/AccountTxPaging.cpp b/src/xrpld/app/misc/detail/AccountTxPaging.cpp index 278680581e..243d2d4d53 100644 --- a/src/xrpld/app/misc/detail/AccountTxPaging.cpp +++ b/src/xrpld/app/misc/detail/AccountTxPaging.cpp @@ -41,12 +41,19 @@ convertBlobsToTxResult( auto tr = std::make_shared(txn, reason, app); - tr->setStatus(Transaction::sqlTransactionStatus(status)); - tr->setLedger(ledger_index); - auto metaset = std::make_shared(tr->getID(), tr->getLedger(), rawMeta); + // if properly formed meta is available we can use it to generate ctid + if (metaset->getAsObject().isFieldPresent(sfTransactionIndex)) + tr->setStatus( + Transaction::sqlTransactionStatus(status), + ledger_index, + metaset->getAsObject().getFieldU32(sfTransactionIndex), + app.config().NETWORK_ID); + else + tr->setStatus(Transaction::sqlTransactionStatus(status), ledger_index); + to.emplace_back(std::move(tr), metaset); }; diff --git a/src/xrpld/app/misc/detail/Transaction.cpp b/src/xrpld/app/misc/detail/Transaction.cpp index 89bf1a7202..cc38a77d72 100644 --- a/src/xrpld/app/misc/detail/Transaction.cpp +++ b/src/xrpld/app/misc/detail/Transaction.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -54,10 +55,18 @@ Transaction::Transaction( // void -Transaction::setStatus(TransStatus ts, std::uint32_t lseq) +Transaction::setStatus( + TransStatus ts, + std::uint32_t lseq, + std::optional tseq, + std::optional netID) { mStatus = ts; mLedgerIndex = lseq; + if (tseq) + mTxnSeq = tseq; + if (netID) + mNetworkID = netID; } TransStatus @@ -174,6 +183,20 @@ Transaction::getJson(JsonOptions options, bool binary) const if (ct) ret[jss::date] = ct->time_since_epoch().count(); } + + // compute outgoing CTID + // override local network id if it's explicitly in the txn + std::optional netID = mNetworkID; + if (mTransaction->isFieldPresent(sfNetworkID)) + netID = mTransaction->getFieldU32(sfNetworkID); + + if (mTxnSeq && netID) + { + std::optional const ctid = + RPC::encodeCTID(mLedgerIndex, *mTxnSeq, *netID); + if (ctid) + ret[jss::ctid] = *ctid; + } } return ret; diff --git a/src/xrpld/app/rdb/backend/detail/Node.cpp b/src/xrpld/app/rdb/backend/detail/Node.cpp index a230eac6e3..019d00ed36 100644 --- a/src/xrpld/app/rdb/backend/detail/Node.cpp +++ b/src/xrpld/app/rdb/backend/detail/Node.cpp @@ -339,7 +339,11 @@ saveValidatedLedger( seq, acceptedLedgerTx->getEscMeta()) + ";"); - app.getMasterTransaction().inLedger(transactionID, seq); + app.getMasterTransaction().inLedger( + transactionID, + seq, + acceptedLedgerTx->getTxnSeq(), + app.config().NETWORK_ID); } tr.commit(); diff --git a/src/xrpld/rpc/CTID.h b/src/xrpld/rpc/CTID.h index 8407d51526..042b79b527 100644 --- a/src/xrpld/rpc/CTID.h +++ b/src/xrpld/rpc/CTID.h @@ -30,18 +30,24 @@ namespace ripple { namespace RPC { +// CTID stands for Concise Transaction ID. +// +// The CTID comes from XLS-15d: Concise Transaction Identifier #34 +// +// https://github.com/XRPLF/XRPL-Standards/discussions/34 +// +// The Concise Transaction ID provides a way to identify a transaction +// that includes which network the transaction was submitted to. + inline std::optional -encodeCTID( - uint32_t ledger_seq, - uint16_t txn_index, - uint16_t network_id) noexcept +encodeCTID(uint32_t ledgerSeq, uint32_t txnIndex, uint32_t networkID) noexcept { - if (ledger_seq > 0x0FFF'FFFF) + if (ledgerSeq > 0x0FFF'FFFF || txnIndex > 0xFFFF || networkID > 0xFFFF) return {}; uint64_t ctidValue = - ((0xC000'0000ULL + static_cast(ledger_seq)) << 32) + - (static_cast(txn_index) << 16) + network_id; + ((0xC000'0000ULL + static_cast(ledgerSeq)) << 32) + + (static_cast(txnIndex) << 16) + networkID; std::stringstream buffer; buffer << std::hex << std::uppercase << std::setfill('0') << std::setw(16) diff --git a/src/xrpld/rpc/handlers/Tx.cpp b/src/xrpld/rpc/handlers/Tx.cpp index c3b3305af7..3db71d9002 100644 --- a/src/xrpld/rpc/handlers/Tx.cpp +++ b/src/xrpld/rpc/handlers/Tx.cpp @@ -169,13 +169,17 @@ doTxHelp(RPC::Context& context, TxArgs args) context.ledgerMaster.getCloseTimeBySeq(txn->getLedger()); // compute outgoing CTID - uint32_t lgrSeq = ledger->info().seq; - uint32_t txnIdx = meta->getAsObject().getFieldU32(sfTransactionIndex); - uint32_t netID = context.app.config().NETWORK_ID; + if (meta->getAsObject().isFieldPresent(sfTransactionIndex)) + { + uint32_t lgrSeq = ledger->info().seq; + uint32_t txnIdx = + meta->getAsObject().getFieldU32(sfTransactionIndex); + uint32_t netID = context.app.config().NETWORK_ID; - if (txnIdx <= 0xFFFFU && netID < 0xFFFFU && lgrSeq < 0x0FFF'FFFFUL) - result.ctid = - RPC::encodeCTID(lgrSeq, (uint16_t)txnIdx, (uint16_t)netID); + if (txnIdx <= 0xFFFFU && netID < 0xFFFFU && lgrSeq < 0x0FFF'FFFFUL) + result.ctid = + RPC::encodeCTID(lgrSeq, (uint32_t)txnIdx, (uint32_t)netID); + } } return {result, rpcSUCCESS}; From 1c99ea23d1784323f1413403d44a88af73ea091e Mon Sep 17 00:00:00 2001 From: Valentin Balaschenko <13349202+vlntb@users.noreply.github.com> Date: Thu, 10 Apr 2025 20:58:29 +0100 Subject: [PATCH 011/244] Temporary disable automatic triggering macOS pipeline (#5397) We temporarily disable running unit tests on macOS on the CI pipeline while we are investigating the delays. --- .github/workflows/macos.yml | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 566e3550e0..955441da0a 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -87,8 +87,9 @@ jobs: generator: ${{ matrix.generator }} configuration: ${{ matrix.configuration }} cmake-args: "-Dassert=TRUE -Dwerr=TRUE ${{ matrix.cmake-args }}" - - name: test - run: | - n=$(nproc) - echo "Using $n test jobs" - ${build_dir}/rippled --unittest --unittest-jobs $n + # TODO: Temporary disabled tests + # - name: test + # run: | + # n=$(nproc) + # echo "Using $n test jobs" + # ${build_dir}/rippled --unittest --unittest-jobs $n From cba512068bfc113291f6bf33434a77902cd1bd41 Mon Sep 17 00:00:00 2001 From: Mayukha Vadari Date: Fri, 11 Apr 2025 05:07:42 -0400 Subject: [PATCH 012/244] refactor: Clean up test logging to make it easier to search (#5396) This PR replaces the word `failed` with `failure` in any test names and renames some test files to fix MSVC warnings, so that it is easier to search through the test output to find tests that failed. --- src/test/app/AMM_test.cpp | 9 +++++++-- src/test/app/DepositAuth_test.cpp | 8 ++++---- src/test/jtx/Env.h | 6 ++++-- src/test/jtx/impl/{credentials.cpp => creds.cpp} | 0 src/test/jtx/impl/{did.cpp => dids.cpp} | 0 .../{ledgerStateFix.cpp => ledgerStateFixes.cpp} | 0 src/test/rpc/DepositAuthorized_test.cpp | 12 ++++++++---- src/test/unit_test/multi_runner.cpp | 10 ++++++++++ .../rpc/handlers/{Manifest.cpp => DoManifest.cpp} | 0 9 files changed, 33 insertions(+), 12 deletions(-) rename src/test/jtx/impl/{credentials.cpp => creds.cpp} (100%) rename src/test/jtx/impl/{did.cpp => dids.cpp} (100%) rename src/test/jtx/impl/{ledgerStateFix.cpp => ledgerStateFixes.cpp} (100%) rename src/xrpld/rpc/handlers/{Manifest.cpp => DoManifest.cpp} (100%) diff --git a/src/test/app/AMM_test.cpp b/src/test/app/AMM_test.cpp index e20102f224..a0be79913b 100644 --- a/src/test/app/AMM_test.cpp +++ b/src/test/app/AMM_test.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -6079,6 +6080,8 @@ private: testcase("Fix changeSpotPriceQuality"); using namespace jtx; + std::string logs; + enum class Status { SucceedShouldSucceedResize, // Succeed in pre-fix because // error allowance, succeed post-fix @@ -6161,7 +6164,7 @@ private: boost::smatch match; // tests that succeed should have the same amounts pre-fix and post-fix std::vector> successAmounts; - Env env(*this, features); + Env env(*this, features, std::make_unique(&logs)); auto rules = env.current()->rules(); CurrentTransactionRulesGuard rg(rules); for (auto const& t : tests) @@ -6355,6 +6358,8 @@ private: using namespace std::chrono; FeatureBitset const all{features}; + std::string logs; + Account const gatehub{"gatehub"}; Account const bitstamp{"bitstamp"}; Account const trader{"trader"}; @@ -6583,7 +6588,7 @@ private: for (auto const& features : {all - fixAMMOverflowOffer, all | fixAMMOverflowOffer}) { - Env env(*this, features); + Env env(*this, features, std::make_unique(&logs)); env.fund(XRP(5'000), gatehub, bitstamp, trader); env.close(); diff --git a/src/test/app/DepositAuth_test.cpp b/src/test/app/DepositAuth_test.cpp index 6d9a3ac914..18f7b410b7 100644 --- a/src/test/app/DepositAuth_test.cpp +++ b/src/test/app/DepositAuth_test.cpp @@ -828,7 +828,7 @@ struct DepositPreauth_test : public beast::unit_test::suite Account const john{"john"}; { - testcase("Payment failed with disabled credentials rule."); + testcase("Payment failure with disabled credentials rule."); Env env(*this, supported_amendments() - featureCredentials); @@ -930,7 +930,7 @@ struct DepositPreauth_test : public beast::unit_test::suite } { - testcase("Payment failed with invalid credentials."); + testcase("Payment failure with invalid credentials."); Env env(*this); @@ -1206,7 +1206,7 @@ struct DepositPreauth_test : public beast::unit_test::suite Account const zelda{"zelda"}; { - testcase("Payment failed with expired credentials."); + testcase("Payment failure with expired credentials."); Env env(*this); @@ -1353,7 +1353,7 @@ struct DepositPreauth_test : public beast::unit_test::suite { using namespace std::chrono; - testcase("Escrow failed with expired credentials."); + testcase("Escrow failure with expired credentials."); Env env(*this); diff --git a/src/test/jtx/Env.h b/src/test/jtx/Env.h index 4e8bb64f59..399b176677 100644 --- a/src/test/jtx/Env.h +++ b/src/test/jtx/Env.h @@ -210,8 +210,10 @@ public: * @param args collection of features * */ - Env(beast::unit_test::suite& suite_, FeatureBitset features) - : Env(suite_, envconfig(), features) + Env(beast::unit_test::suite& suite_, + FeatureBitset features, + std::unique_ptr logs = nullptr) + : Env(suite_, envconfig(), features, std::move(logs)) { } diff --git a/src/test/jtx/impl/credentials.cpp b/src/test/jtx/impl/creds.cpp similarity index 100% rename from src/test/jtx/impl/credentials.cpp rename to src/test/jtx/impl/creds.cpp diff --git a/src/test/jtx/impl/did.cpp b/src/test/jtx/impl/dids.cpp similarity index 100% rename from src/test/jtx/impl/did.cpp rename to src/test/jtx/impl/dids.cpp diff --git a/src/test/jtx/impl/ledgerStateFix.cpp b/src/test/jtx/impl/ledgerStateFixes.cpp similarity index 100% rename from src/test/jtx/impl/ledgerStateFix.cpp rename to src/test/jtx/impl/ledgerStateFixes.cpp diff --git a/src/test/rpc/DepositAuthorized_test.cpp b/src/test/rpc/DepositAuthorized_test.cpp index 2ccad9d779..8162528ec2 100644 --- a/src/test/rpc/DepositAuthorized_test.cpp +++ b/src/test/rpc/DepositAuthorized_test.cpp @@ -65,6 +65,7 @@ public: void testValid() { + testcase("Valid"); using namespace jtx; Account const alice{"alice"}; Account const becky{"becky"}; @@ -162,6 +163,7 @@ public: void testErrors() { + testcase("Errors"); using namespace jtx; Account const alice{"alice"}; Account const becky{"becky"}; @@ -333,6 +335,8 @@ public: void testCredentials() { + testcase("Credentials"); + using namespace jtx; const char credType[] = "abcde"; @@ -363,7 +367,7 @@ public: { testcase( - "deposit_authorized with credentials failed: empty array."); + "deposit_authorized with credentials failure: empty array."); auto args = depositAuthArgs(alice, becky, "validated"); args[jss::credentials] = Json::arrayValue; @@ -376,7 +380,7 @@ public: { testcase( - "deposit_authorized with credentials failed: not a string " + "deposit_authorized with credentials failure: not a string " "credentials"); auto args = depositAuthArgs(alice, becky, "validated"); @@ -392,7 +396,7 @@ public: { testcase( - "deposit_authorized with credentials failed: not a hex string " + "deposit_authorized with credentials failure: not a hex string " "credentials"); auto args = depositAuthArgs(alice, becky, "validated"); @@ -412,7 +416,7 @@ public: { testcase( - "deposit_authorized with credentials failed: not a credential " + "deposit_authorized with credentials failure: not a credential " "index"); auto args = depositAuthArgs( diff --git a/src/test/unit_test/multi_runner.cpp b/src/test/unit_test/multi_runner.cpp index 0b7d08c5ae..087e37dac2 100644 --- a/src/test/unit_test/multi_runner.cpp +++ b/src/test/unit_test/multi_runner.cpp @@ -577,6 +577,16 @@ multi_runner_child::on_suite_begin(beast::unit_test::suite_info const& info) void multi_runner_child::on_suite_end() { + if (print_log_ || suite_results_.failed > 0) + { + std::stringstream s; + if (num_jobs_ > 1) + s << job_index_ << "> "; + s << (suite_results_.failed > 0 ? "failed: " : "") + << suite_results_.name << " had " << suite_results_.failed + << " failures." << std::endl; + message_queue_send(MessageType::log, s.str()); + } results_.add(suite_results_); message_queue_send(MessageType::test_end, suite_results_.name); } diff --git a/src/xrpld/rpc/handlers/Manifest.cpp b/src/xrpld/rpc/handlers/DoManifest.cpp similarity index 100% rename from src/xrpld/rpc/handlers/Manifest.cpp rename to src/xrpld/rpc/handlers/DoManifest.cpp From 405f4613d8d3af5e4443aaa8f952c557a7c7a54b Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Fri, 11 Apr 2025 18:20:59 -0400 Subject: [PATCH 013/244] chore: Run CI on PRs that are Ready or have the "DraftRunCI" label (#5400) - Avoids costly overhead for idle PRs where the CI results don't add any value. --- .github/workflows/clang-format.yml | 6 +++++- .github/workflows/levelization.yml | 6 +++++- .github/workflows/libxrpl.yml | 2 ++ .github/workflows/macos.yml | 2 ++ .github/workflows/nix.yml | 3 +++ .github/workflows/windows.yml | 2 ++ 6 files changed, 19 insertions(+), 2 deletions(-) diff --git a/.github/workflows/clang-format.yml b/.github/workflows/clang-format.yml index 4b71cbf617..ac6154ab9f 100644 --- a/.github/workflows/clang-format.yml +++ b/.github/workflows/clang-format.yml @@ -1,9 +1,13 @@ name: clang-format -on: [push, pull_request] +on: + push: + pull_request: + types: [opened, reopened, synchronize, ready_for_review] jobs: check: + if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }} runs-on: ubuntu-24.04 env: CLANG_VERSION: 18 diff --git a/.github/workflows/levelization.yml b/.github/workflows/levelization.yml index 3722a78e5e..979049d630 100644 --- a/.github/workflows/levelization.yml +++ b/.github/workflows/levelization.yml @@ -1,9 +1,13 @@ name: levelization -on: [push, pull_request] +on: + push: + pull_request: + types: [opened, reopened, synchronize, ready_for_review] jobs: check: + if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }} runs-on: ubuntu-latest env: CLANG_VERSION: 10 diff --git a/.github/workflows/libxrpl.yml b/.github/workflows/libxrpl.yml index 92deff7810..36ccad5c96 100644 --- a/.github/workflows/libxrpl.yml +++ b/.github/workflows/libxrpl.yml @@ -8,12 +8,14 @@ on: paths: - 'src/libxrpl/protocol/BuildInfo.cpp' - '.github/workflows/libxrpl.yml' + types: [opened, reopened, synchronize, ready_for_review] concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: publish: + if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }} name: Publish libXRPL outputs: outcome: ${{ steps.upload.outputs.outcome }} diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 955441da0a..905df7e83d 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -1,6 +1,7 @@ name: macos on: pull_request: + types: [opened, reopened, synchronize, ready_for_review] push: # If the branches list is ever changed, be sure to change it on all # build/test jobs (nix, macos, windows, instrumentation) @@ -18,6 +19,7 @@ concurrency: jobs: test: + if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }} strategy: matrix: platform: diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index abae2ee84a..8a8ba94e2d 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -1,6 +1,7 @@ name: nix on: pull_request: + types: [opened, reopened, synchronize, ready_for_review] push: # If the branches list is ever changed, be sure to change it on all # build/test jobs (nix, macos, windows) @@ -39,6 +40,7 @@ concurrency: jobs: dependencies: + if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }} strategy: fail-fast: false matrix: @@ -358,6 +360,7 @@ jobs: # later instrumentation-build: + if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }} env: CLANG_RELEASE: 16 strategy: diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 7641db0d10..1d90c2ef58 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -2,6 +2,7 @@ name: windows on: pull_request: + types: [opened, reopened, synchronize, ready_for_review] push: # If the branches list is ever changed, be sure to change it on all # build/test jobs (nix, macos, windows, instrumentation) @@ -21,6 +22,7 @@ concurrency: jobs: test: + if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }} strategy: fail-fast: false matrix: From 217ba8dd4dce8740999eed63e687debd0737d69a Mon Sep 17 00:00:00 2001 From: Denis Angell Date: Thu, 24 Apr 2025 16:24:10 +0200 Subject: [PATCH 014/244] fix: CTID to use correct ledger_index (#5408) --- src/xrpld/app/misc/detail/AccountTxPaging.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/xrpld/app/misc/detail/AccountTxPaging.cpp b/src/xrpld/app/misc/detail/AccountTxPaging.cpp index 243d2d4d53..ff59e4022f 100644 --- a/src/xrpld/app/misc/detail/AccountTxPaging.cpp +++ b/src/xrpld/app/misc/detail/AccountTxPaging.cpp @@ -41,8 +41,7 @@ convertBlobsToTxResult( auto tr = std::make_shared(txn, reason, app); - auto metaset = - std::make_shared(tr->getID(), tr->getLedger(), rawMeta); + auto metaset = std::make_shared(tr->getID(), ledger_index, rawMeta); // if properly formed meta is available we can use it to generate ctid if (metaset->getAsObject().isFieldPresent(sfTransactionIndex)) From fa1e25abef7bf12bde87ba23528346853cef88b0 Mon Sep 17 00:00:00 2001 From: Vlad <129996061+vvysokikh1@users.noreply.github.com> Date: Fri, 25 Apr 2025 16:21:27 +0100 Subject: [PATCH 015/244] chore: Small clarification to lsfDefaultRipple comment (#5410) --- include/xrpl/protocol/LedgerFormats.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/xrpl/protocol/LedgerFormats.h b/include/xrpl/protocol/LedgerFormats.h index 5f3cca53ac..e2ac5bd071 100644 --- a/include/xrpl/protocol/LedgerFormats.h +++ b/include/xrpl/protocol/LedgerFormats.h @@ -132,7 +132,7 @@ enum LedgerSpecificFlags { lsfNoFreeze = 0x00200000, // True, cannot freeze ripple states lsfGlobalFreeze = 0x00400000, // True, all assets frozen lsfDefaultRipple = - 0x00800000, // True, trust lines allow rippling by default + 0x00800000, // True, incoming trust lines allow rippling by default lsfDepositAuth = 0x01000000, // True, all deposits require authorization /* // reserved for Hooks amendment lsfTshCollect = 0x02000000, // True, allow TSH collect-calls to acc hooks From 3502df217484fb1dbd3689a6b1102016d1abda0d Mon Sep 17 00:00:00 2001 From: Vito Tumas <5780819+Tapanito@users.noreply.github.com> Date: Mon, 28 Apr 2025 21:38:55 +0200 Subject: [PATCH 016/244] fix: Replaces random endpoint resolution with sequential (#5365) This change addresses an issue where `rippled` attempts to connect to an IPv6 address, even when the local network lacks IPv6 support, resulting in a "Network is unreachable" error. The fix replaces the custom endpoint selection logic with `boost::async_connect`, which sequentially attempts to connect to available endpoints until one succeeds or all fail. --- src/xrpld/app/misc/detail/ValidatorSite.cpp | 4 +- src/xrpld/app/misc/detail/WorkBase.h | 59 ++++++++------------- 2 files changed, 23 insertions(+), 40 deletions(-) diff --git a/src/xrpld/app/misc/detail/ValidatorSite.cpp b/src/xrpld/app/misc/detail/ValidatorSite.cpp index 799689ef25..42d4e9e271 100644 --- a/src/xrpld/app/misc/detail/ValidatorSite.cpp +++ b/src/xrpld/app/misc/detail/ValidatorSite.cpp @@ -102,7 +102,7 @@ ValidatorSite::ValidatorSite( ValidatorSite::~ValidatorSite() { std::unique_lock lock{state_mutex_}; - if (timer_.expires_at() > clock_type::time_point{}) + if (timer_.expiry() > clock_type::time_point{}) { if (!stopping_) { @@ -168,7 +168,7 @@ ValidatorSite::start() { std::lock_guard l0{sites_mutex_}; std::lock_guard l1{state_mutex_}; - if (timer_.expires_at() == clock_type::time_point{}) + if (timer_.expiry() == clock_type::time_point{}) setTimer(l0, l1); } diff --git a/src/xrpld/app/misc/detail/WorkBase.h b/src/xrpld/app/misc/detail/WorkBase.h index a2b852edfd..17f935126b 100644 --- a/src/xrpld/app/misc/detail/WorkBase.h +++ b/src/xrpld/app/misc/detail/WorkBase.h @@ -96,6 +96,9 @@ public: void onResolve(error_code const& ec, results_type results); + void + onConnect(error_code const& ec, endpoint_type const& endpoint); + void onStart(); @@ -195,46 +198,26 @@ WorkBase::onResolve(error_code const& ec, results_type results) if (ec) return fail(ec); - // Use last endpoint if it is successfully connected - // and is in the list, otherwise pick a random endpoint - // from the list (excluding last endpoint). If there is - // only one endpoint and it is the last endpoint then - // use the last endpoint. - lastEndpoint_ = [&]() -> endpoint_type { - int foundIndex = 0; - auto const foundIt = std::find_if( - results.begin(), results.end(), [&](endpoint_type const& e) { - if (e == lastEndpoint_) - return true; - foundIndex++; - return false; - }); - if (foundIt != results.end() && lastStatus_) - return lastEndpoint_; - else if (results.size() == 1) - return *results.begin(); - else if (foundIt == results.end()) - return *std::next(results.begin(), rand_int(results.size() - 1)); - - // lastEndpoint_ is part of the collection - // Pick a random number from the n-1 valid choices, if we use - // this as an index, note the last element will never be chosen - // and the `lastEndpoint_` index may be chosen. So when the - // `lastEndpoint_` index is chosen, that is treated as if the - // last element was chosen. - auto randIndex = - (results.size() > 2) ? rand_int(results.size() - 2) : 0; - if (randIndex == foundIndex) - randIndex = results.size() - 1; - return *std::next(results.begin(), randIndex); - }(); - - socket_.async_connect( - lastEndpoint_, + boost::asio::async_connect( + socket_, + results, strand_.wrap(std::bind( - &Impl::onConnect, + &WorkBase::onConnect, impl().shared_from_this(), - std::placeholders::_1))); + std::placeholders::_1, + std::placeholders::_2))); +} + +template +void +WorkBase::onConnect(error_code const& ec, endpoint_type const& endpoint) +{ + lastEndpoint_ = endpoint; + + if (ec) + return fail(ec); + + impl().onConnect(ec); } template From 4a084ce34c6e595ca4e00a2ade1119c5b75e4f32 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Thu, 1 May 2025 13:58:18 -0400 Subject: [PATCH 017/244] Improve transaction relay logic (#4985) Combines four related changes: 1. "Decrease `shouldRelay` limit to 30s." Pretty self-explanatory. Currently, the limit is 5 minutes, by which point the `HashRouter` entry could have expired, making this transaction look brand new (and thus causing it to be relayed back to peers which have sent it to us recently). 2. "Give a transaction more chances to be retried." Will put a transaction into `LedgerMaster`'s held transactions if the transaction gets a `ter`, `tel`, or `tef` result. Old behavior was just `ter`. * Additionally, to prevent a transaction from being repeatedly held indefinitely, it must meet some extra conditions. (Documented in a comment in the code.) 3. "Pop all transactions with sequential sequences, or tickets." When a transaction is processed successfully, currently, one held transaction for the same account (if any) will be popped out of the held transactions list, and queued up for the next transaction batch. This change pops all transactions for the account, but only if they have sequential sequences (for non-ticket transactions) or use a ticket. This issue was identified from interactions with @mtrippled's #4504, which was merged, but unfortunately reverted later by #4852. When the batches were spaced out, it could potentially take a very long time for a large number of held transactions for an account to get processed through. However, whether batched or not, this change will help get held transactions cleared out, particularly if a missing earlier transaction is what held them up. 4. "Process held transactions through existing NetworkOPs batching." In the current processing, at the end of each consensus round, all held transactions are directly applied to the open ledger, then the held list is reset. This bypasses all of the logic in `NetworkOPs::apply` which, among other things, broadcasts successful transactions to peers. This means that the transaction may not get broadcast to peers for a really long time (5 minutes in the current implementation, or 30 seconds with this first commit). If the node is a bottleneck (either due to network configuration, or because the transaction was submitted locally), the transaction may not be seen by any other nodes or validators before it expires or causes other problems. --- src/test/app/HashRouter_test.cpp | 134 ++++++++++++- src/xrpld/app/ledger/LocalTxs.h | 5 + src/xrpld/app/ledger/detail/LedgerMaster.cpp | 28 +-- src/xrpld/app/ledger/detail/LocalTxs.cpp | 7 +- src/xrpld/app/main/Application.cpp | 4 +- src/xrpld/app/main/Tuning.h | 4 + src/xrpld/app/misc/CanonicalTXSet.cpp | 12 +- src/xrpld/app/misc/HashRouter.cpp | 40 +++- src/xrpld/app/misc/HashRouter.h | 55 ++++-- src/xrpld/app/misc/NetworkOPs.cpp | 190 +++++++++++++++++-- src/xrpld/app/misc/NetworkOPs.h | 10 + src/xrpld/app/misc/Transaction.h | 24 +++ 12 files changed, 440 insertions(+), 73 deletions(-) diff --git a/src/test/app/HashRouter_test.cpp b/src/test/app/HashRouter_test.cpp index d8d7ca6851..0737116f13 100644 --- a/src/test/app/HashRouter_test.cpp +++ b/src/test/app/HashRouter_test.cpp @@ -18,6 +18,7 @@ //============================================================================== #include +#include #include #include @@ -27,12 +28,22 @@ namespace test { class HashRouter_test : public beast::unit_test::suite { + HashRouter::Setup + getSetup(std::chrono::seconds hold, std::chrono::seconds relay) + { + HashRouter::Setup setup; + setup.holdTime = hold; + setup.relayTime = relay; + return setup; + } + void testNonExpiration() { + testcase("Non-expiration"); using namespace std::chrono_literals; TestStopwatch stopwatch; - HashRouter router(stopwatch, 2s); + HashRouter router(getSetup(2s, 1s), stopwatch); uint256 const key1(1); uint256 const key2(2); @@ -67,9 +78,10 @@ class HashRouter_test : public beast::unit_test::suite void testExpiration() { + testcase("Expiration"); using namespace std::chrono_literals; TestStopwatch stopwatch; - HashRouter router(stopwatch, 2s); + HashRouter router(getSetup(2s, 1s), stopwatch); uint256 const key1(1); uint256 const key2(2); @@ -144,10 +156,11 @@ class HashRouter_test : public beast::unit_test::suite void testSuppression() { + testcase("Suppression"); // Normal HashRouter using namespace std::chrono_literals; TestStopwatch stopwatch; - HashRouter router(stopwatch, 2s); + HashRouter router(getSetup(2s, 1s), stopwatch); uint256 const key1(1); uint256 const key2(2); @@ -173,9 +186,10 @@ class HashRouter_test : public beast::unit_test::suite void testSetFlags() { + testcase("Set Flags"); using namespace std::chrono_literals; TestStopwatch stopwatch; - HashRouter router(stopwatch, 2s); + HashRouter router(getSetup(2s, 1s), stopwatch); uint256 const key1(1); BEAST_EXPECT(router.setFlags(key1, 10)); @@ -186,9 +200,10 @@ class HashRouter_test : public beast::unit_test::suite void testRelay() { + testcase("Relay"); using namespace std::chrono_literals; TestStopwatch stopwatch; - HashRouter router(stopwatch, 1s); + HashRouter router(getSetup(50s, 1s), stopwatch); uint256 const key1(1); @@ -229,9 +244,10 @@ class HashRouter_test : public beast::unit_test::suite void testProcess() { + testcase("Process"); using namespace std::chrono_literals; TestStopwatch stopwatch; - HashRouter router(stopwatch, 5s); + HashRouter router(getSetup(5s, 1s), stopwatch); uint256 const key(1); HashRouter::PeerShortID peer = 1; int flags; @@ -243,6 +259,111 @@ class HashRouter_test : public beast::unit_test::suite BEAST_EXPECT(router.shouldProcess(key, peer, flags, 1s)); } + void + testSetup() + { + testcase("setup_HashRouter"); + + using namespace std::chrono_literals; + { + Config cfg; + // default + auto const setup = setup_HashRouter(cfg); + BEAST_EXPECT(setup.holdTime == 300s); + BEAST_EXPECT(setup.relayTime == 30s); + } + { + Config cfg; + // non-default + auto& h = cfg.section("hashrouter"); + h.set("hold_time", "600"); + h.set("relay_time", "15"); + auto const setup = setup_HashRouter(cfg); + BEAST_EXPECT(setup.holdTime == 600s); + BEAST_EXPECT(setup.relayTime == 15s); + } + { + Config cfg; + // equal + auto& h = cfg.section("hashrouter"); + h.set("hold_time", "400"); + h.set("relay_time", "400"); + auto const setup = setup_HashRouter(cfg); + BEAST_EXPECT(setup.holdTime == 400s); + BEAST_EXPECT(setup.relayTime == 400s); + } + { + Config cfg; + // wrong order + auto& h = cfg.section("hashrouter"); + h.set("hold_time", "60"); + h.set("relay_time", "120"); + try + { + setup_HashRouter(cfg); + fail(); + } + catch (std::exception const& e) + { + std::string expected = + "HashRouter relay time must be less than or equal to hold " + "time"; + BEAST_EXPECT(e.what() == expected); + } + } + { + Config cfg; + // too small hold + auto& h = cfg.section("hashrouter"); + h.set("hold_time", "10"); + h.set("relay_time", "120"); + try + { + setup_HashRouter(cfg); + fail(); + } + catch (std::exception const& e) + { + std::string expected = + "HashRouter hold time must be at least 12 seconds (the " + "approximate validation time for three " + "ledgers)."; + BEAST_EXPECT(e.what() == expected); + } + } + { + Config cfg; + // too small relay + auto& h = cfg.section("hashrouter"); + h.set("hold_time", "500"); + h.set("relay_time", "6"); + try + { + setup_HashRouter(cfg); + fail(); + } + catch (std::exception const& e) + { + std::string expected = + "HashRouter relay time must be at least 8 seconds (the " + "approximate validation time for two ledgers)."; + BEAST_EXPECT(e.what() == expected); + } + } + { + Config cfg; + // garbage + auto& h = cfg.section("hashrouter"); + h.set("hold_time", "alice"); + h.set("relay_time", "bob"); + auto const setup = setup_HashRouter(cfg); + // The set function ignores values that don't covert, so the + // defaults are left unchanged + BEAST_EXPECT(setup.holdTime == 300s); + BEAST_EXPECT(setup.relayTime == 30s); + } + } + public: void run() override @@ -253,6 +374,7 @@ public: testSetFlags(); testRelay(); testProcess(); + testSetup(); } }; diff --git a/src/xrpld/app/ledger/LocalTxs.h b/src/xrpld/app/ledger/LocalTxs.h index bf86992a69..391bb4f7ef 100644 --- a/src/xrpld/app/ledger/LocalTxs.h +++ b/src/xrpld/app/ledger/LocalTxs.h @@ -34,6 +34,11 @@ namespace ripple { class LocalTxs { public: + // The number of ledgers to hold a transaction is essentially + // arbitrary. It should be sufficient to allow the transaction to + // get into a fully-validated ledger. + static constexpr int holdLedgers = 5; + virtual ~LocalTxs() = default; // Add a new local transaction diff --git a/src/xrpld/app/ledger/detail/LedgerMaster.cpp b/src/xrpld/app/ledger/detail/LedgerMaster.cpp index 65b3257208..88f3de5b12 100644 --- a/src/xrpld/app/ledger/detail/LedgerMaster.cpp +++ b/src/xrpld/app/ledger/detail/LedgerMaster.cpp @@ -454,25 +454,17 @@ LedgerMaster::storeLedger(std::shared_ptr ledger) void LedgerMaster::applyHeldTransactions() { - std::lock_guard sl(m_mutex); + CanonicalTXSet const set = [this]() { + std::lock_guard sl(m_mutex); + // VFALCO NOTE The hash for an open ledger is undefined so we use + // something that is a reasonable substitute. + CanonicalTXSet set(app_.openLedger().current()->info().parentHash); + std::swap(mHeldTransactions, set); + return set; + }(); - app_.openLedger().modify([&](OpenView& view, beast::Journal j) { - bool any = false; - for (auto const& it : mHeldTransactions) - { - ApplyFlags flags = tapNONE; - auto const result = - app_.getTxQ().apply(app_, view, it.second, flags, j); - any |= result.applied; - } - return any; - }); - - // VFALCO TODO recreate the CanonicalTxSet object instead of resetting - // it. - // VFALCO NOTE The hash for an open ledger is undefined so we use - // something that is a reasonable substitute. - mHeldTransactions.reset(app_.openLedger().current()->info().parentHash); + if (!set.empty()) + app_.getOPs().processTransactionSet(set); } std::shared_ptr diff --git a/src/xrpld/app/ledger/detail/LocalTxs.cpp b/src/xrpld/app/ledger/detail/LocalTxs.cpp index b6bfc8f850..140bd32961 100644 --- a/src/xrpld/app/ledger/detail/LocalTxs.cpp +++ b/src/xrpld/app/ledger/detail/LocalTxs.cpp @@ -53,14 +53,9 @@ namespace ripple { class LocalTx { public: - // The number of ledgers to hold a transaction is essentially - // arbitrary. It should be sufficient to allow the transaction to - // get into a fully-validated ledger. - static int const holdLedgers = 5; - LocalTx(LedgerIndex index, std::shared_ptr const& txn) : m_txn(txn) - , m_expire(index + holdLedgers) + , m_expire(index + LocalTxs::holdLedgers) , m_id(txn->getTransactionID()) , m_account(txn->getAccountID(sfAccount)) , m_seqProxy(txn->getSeqProxy()) diff --git a/src/xrpld/app/main/Application.cpp b/src/xrpld/app/main/Application.cpp index aa502b4143..6e222858d8 100644 --- a/src/xrpld/app/main/Application.cpp +++ b/src/xrpld/app/main/Application.cpp @@ -442,8 +442,8 @@ public: std::make_unique(logs_->journal("LoadManager"))) , hashRouter_(std::make_unique( - stopwatch(), - HashRouter::getDefaultHoldTime())) + setup_HashRouter(*config_), + stopwatch())) , mValidations( ValidationParms(), diff --git a/src/xrpld/app/main/Tuning.h b/src/xrpld/app/main/Tuning.h index bc8ef357aa..c6ca81643e 100644 --- a/src/xrpld/app/main/Tuning.h +++ b/src/xrpld/app/main/Tuning.h @@ -20,11 +20,15 @@ #ifndef RIPPLE_APP_MAIN_TUNING_H_INCLUDED #define RIPPLE_APP_MAIN_TUNING_H_INCLUDED +#include + namespace ripple { constexpr std::size_t fullBelowTargetSize = 524288; constexpr std::chrono::seconds fullBelowExpiration = std::chrono::minutes{10}; +constexpr std::size_t maxPoppedTransactions = 10; + } // namespace ripple #endif diff --git a/src/xrpld/app/misc/CanonicalTXSet.cpp b/src/xrpld/app/misc/CanonicalTXSet.cpp index bb89b59896..41ee1988e1 100644 --- a/src/xrpld/app/misc/CanonicalTXSet.cpp +++ b/src/xrpld/app/misc/CanonicalTXSet.cpp @@ -66,18 +66,22 @@ CanonicalTXSet::popAcctTransaction(std::shared_ptr const& tx) // 1. Prioritize transactions with Sequences over transactions with // Tickets. // - // 2. Don't worry about consecutive Sequence numbers. Creating Tickets - // can introduce a discontinuity in Sequence numbers. + // 2. For transactions not using Tickets, look for consecutive Sequence + // numbers. For transactions using Tickets, don't worry about + // consecutive Sequence numbers. Tickets can process out of order. // // 3. After handling all transactions with Sequences, return Tickets // with the lowest Ticket ID first. std::shared_ptr result; uint256 const effectiveAccount{accountKey(tx->getAccountID(sfAccount))}; - Key const after(effectiveAccount, tx->getSeqProxy(), beast::zero); + auto const seqProxy = tx->getSeqProxy(); + Key const after(effectiveAccount, seqProxy, beast::zero); auto const itrNext{map_.lower_bound(after)}; if (itrNext != map_.end() && - itrNext->first.getAccount() == effectiveAccount) + itrNext->first.getAccount() == effectiveAccount && + (!itrNext->second->getSeqProxy().isSeq() || + itrNext->second->getSeqProxy().value() == seqProxy.value() + 1)) { result = std::move(itrNext->second); map_.erase(itrNext); diff --git a/src/xrpld/app/misc/HashRouter.cpp b/src/xrpld/app/misc/HashRouter.cpp index 58e811d4b8..ac522487f5 100644 --- a/src/xrpld/app/misc/HashRouter.cpp +++ b/src/xrpld/app/misc/HashRouter.cpp @@ -18,6 +18,7 @@ //============================================================================== #include +#include namespace ripple { @@ -33,7 +34,7 @@ HashRouter::emplace(uint256 const& key) -> std::pair } // See if any supressions need to be expired - expire(suppressionMap_, holdTime_); + expire(suppressionMap_, setup_.holdTime); return std::make_pair( std::ref(suppressionMap_.emplace(key, Entry()).first->second), true); @@ -122,10 +123,45 @@ HashRouter::shouldRelay(uint256 const& key) auto& s = emplace(key).first; - if (!s.shouldRelay(suppressionMap_.clock().now(), holdTime_)) + if (!s.shouldRelay(suppressionMap_.clock().now(), setup_.relayTime)) return {}; return s.releasePeerSet(); } +HashRouter::Setup +setup_HashRouter(Config const& config) +{ + using namespace std::chrono; + + HashRouter::Setup setup; + auto const& section = config.section("hashrouter"); + + std::int32_t tmp; + + if (set(tmp, "hold_time", section)) + { + if (tmp < 12) + Throw( + "HashRouter hold time must be at least 12 seconds (the " + "approximate validation time for three ledgers)."); + setup.holdTime = seconds(tmp); + } + if (set(tmp, "relay_time", section)) + { + if (tmp < 8) + Throw( + "HashRouter relay time must be at least 8 seconds (the " + "approximate validation time for two ledgers)."); + setup.relayTime = seconds(tmp); + } + if (setup.relayTime > setup.holdTime) + { + Throw( + "HashRouter relay time must be less than or equal to hold time"); + } + + return setup; +} + } // namespace ripple diff --git a/src/xrpld/app/misc/HashRouter.h b/src/xrpld/app/misc/HashRouter.h index e9d040fc8b..a13bcb9f8f 100644 --- a/src/xrpld/app/misc/HashRouter.h +++ b/src/xrpld/app/misc/HashRouter.h @@ -33,6 +33,7 @@ namespace ripple { // TODO convert these macros to int constants or an enum #define SF_BAD 0x02 // Temporarily bad #define SF_SAVED 0x04 +#define SF_HELD 0x08 // Held by LedgerMaster after potential processing failure #define SF_TRUSTED 0x10 // comes from trusted source // Private flags, used internally in apply.cpp. @@ -44,6 +45,8 @@ namespace ripple { #define SF_PRIVATE5 0x1000 #define SF_PRIVATE6 0x2000 +class Config; + /** Routing table for objects identified by hash. This table keeps track of which hashes have been received by which peers. @@ -56,6 +59,30 @@ public: // The type here *MUST* match the type of Peer::id_t using PeerShortID = std::uint32_t; + /** Structure used to customize @ref HashRouter behavior. + * + * Even though these items are configurable, they are undocumented. Don't + * change them unless there is a good reason, and network-wide coordination + * to do it. + * + * Configuration is processed in setup_HashRouter. + */ + struct Setup + { + /// Default constructor + explicit Setup() = default; + + using seconds = std::chrono::seconds; + + /** Expiration time for a hash entry + */ + seconds holdTime{300}; + + /** Amount of time required before a relayed item will be relayed again. + */ + seconds relayTime{30}; + }; + private: /** An entry in the routing table. */ @@ -108,9 +135,9 @@ private: bool shouldRelay( Stopwatch::time_point const& now, - std::chrono::seconds holdTime) + std::chrono::seconds relayTime) { - if (relayed_ && *relayed_ + holdTime > now) + if (relayed_ && *relayed_ + relayTime > now) return false; relayed_.emplace(now); return true; @@ -135,16 +162,8 @@ private: }; public: - static inline std::chrono::seconds - getDefaultHoldTime() - { - using namespace std::chrono; - - return 300s; - } - - HashRouter(Stopwatch& clock, std::chrono::seconds entryHoldTimeInSeconds) - : suppressionMap_(clock), holdTime_(entryHoldTimeInSeconds) + HashRouter(Setup const& setup, Stopwatch& clock) + : setup_(setup), suppressionMap_(clock) { } @@ -195,11 +214,11 @@ public: Effects: If the item should be relayed, this function will not - return `true` again until the hold time has expired. + return a seated optional again until the relay time has expired. The internal set of peers will also be reset. @return A `std::optional` set of peers which do not need to be - relayed to. If the result is uninitialized, the item should + relayed to. If the result is unseated, the item should _not_ be relayed. */ std::optional> @@ -212,6 +231,9 @@ private: std::mutex mutable mutex_; + // Configurable parameters + Setup const setup_; + // Stores all suppressed hashes and their expiration time beast::aged_unordered_map< uint256, @@ -219,10 +241,11 @@ private: Stopwatch::clock_type, hardened_hash> suppressionMap_; - - std::chrono::seconds const holdTime_; }; +HashRouter::Setup +setup_HashRouter(Config const&); + } // namespace ripple #endif diff --git a/src/xrpld/app/misc/NetworkOPs.cpp b/src/xrpld/app/misc/NetworkOPs.cpp index abd6ff7da7..b05f38f3ed 100644 --- a/src/xrpld/app/misc/NetworkOPs.cpp +++ b/src/xrpld/app/misc/NetworkOPs.cpp @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -298,6 +299,9 @@ public: bool bLocal, FailHard failType) override; + void + processTransactionSet(CanonicalTXSet const& set) override; + /** * For transactions submitted directly by a client, apply batch of * transactions and wait for this transaction to complete. @@ -327,6 +331,16 @@ public: bool bUnlimited, FailHard failtype); +private: + bool + preProcessTransaction(std::shared_ptr& transaction); + + void + doTransactionSyncBatch( + std::unique_lock& lock, + std::function const&)> retryCallback); + +public: /** * Apply transactions in batches. Continue until none are queued. */ @@ -1221,14 +1235,9 @@ NetworkOPsImp::submitTransaction(std::shared_ptr const& iTrans) }); } -void -NetworkOPsImp::processTransaction( - std::shared_ptr& transaction, - bool bUnlimited, - bool bLocal, - FailHard failType) +bool +NetworkOPsImp::preProcessTransaction(std::shared_ptr& transaction) { - auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXN"); auto const newFlags = app_.getHashRouter().getFlags(transaction->getID()); if ((newFlags & SF_BAD) != 0) @@ -1237,7 +1246,7 @@ NetworkOPsImp::processTransaction( JLOG(m_journal.warn()) << transaction->getID() << ": cached bad!\n"; transaction->setStatus(INVALID); transaction->setResult(temBAD_SIGNATURE); - return; + return false; } // NOTE eahennis - I think this check is redundant, @@ -1260,12 +1269,28 @@ NetworkOPsImp::processTransaction( transaction->setStatus(INVALID); transaction->setResult(temBAD_SIGNATURE); app_.getHashRouter().setFlags(transaction->getID(), SF_BAD); - return; + return false; } // canonicalize can change our pointer app_.getMasterTransaction().canonicalize(&transaction); + return true; +} + +void +NetworkOPsImp::processTransaction( + std::shared_ptr& transaction, + bool bUnlimited, + bool bLocal, + FailHard failType) +{ + auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXN"); + + // preProcessTransaction can change our pointer + if (!preProcessTransaction(transaction)) + return; + if (bLocal) doTransactionSync(transaction, bUnlimited, failType); else @@ -1312,6 +1337,17 @@ NetworkOPsImp::doTransactionSync( transaction->setApplying(); } + doTransactionSyncBatch( + lock, [&transaction](std::unique_lock const&) { + return transaction->getApplying(); + }); +} + +void +NetworkOPsImp::doTransactionSyncBatch( + std::unique_lock& lock, + std::function const&)> retryCallback) +{ do { if (mDispatchState == DispatchState::running) @@ -1334,7 +1370,70 @@ NetworkOPsImp::doTransactionSync( } } } - } while (transaction->getApplying()); + } while (retryCallback(lock)); +} + +void +NetworkOPsImp::processTransactionSet(CanonicalTXSet const& set) +{ + auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXNSet"); + std::vector> candidates; + candidates.reserve(set.size()); + for (auto const& [_, tx] : set) + { + std::string reason; + auto transaction = std::make_shared(tx, reason, app_); + + if (transaction->getStatus() == INVALID) + { + if (!reason.empty()) + { + JLOG(m_journal.trace()) + << "Exception checking transaction: " << reason; + } + app_.getHashRouter().setFlags(tx->getTransactionID(), SF_BAD); + continue; + } + + // preProcessTransaction can change our pointer + if (!preProcessTransaction(transaction)) + continue; + + candidates.emplace_back(transaction); + } + + std::vector transactions; + transactions.reserve(candidates.size()); + + std::unique_lock lock(mMutex); + + for (auto& transaction : candidates) + { + if (!transaction->getApplying()) + { + transactions.emplace_back(transaction, false, false, FailHard::no); + transaction->setApplying(); + } + } + + if (mTransactions.empty()) + mTransactions.swap(transactions); + else + { + mTransactions.reserve(mTransactions.size() + transactions.size()); + for (auto& t : transactions) + mTransactions.push_back(std::move(t)); + } + + doTransactionSyncBatch(lock, [&](std::unique_lock const&) { + XRPL_ASSERT( + lock.owns_lock(), + "ripple::NetworkOPsImp::processTransactionSet has lock"); + return std::any_of( + mTransactions.begin(), mTransactions.end(), [](auto const& t) { + return t.transaction->getApplying(); + }); + }); } void @@ -1441,16 +1540,28 @@ NetworkOPsImp::apply(std::unique_lock& batchLock) << "Transaction is now included in open ledger"; e.transaction->setStatus(INCLUDED); + // Pop as many "reasonable" transactions for this account as + // possible. "Reasonable" means they have sequential sequence + // numbers, or use tickets. auto const& txCur = e.transaction->getSTransaction(); - auto const txNext = m_ledgerMaster.popAcctTransaction(txCur); - if (txNext) + + std::size_t count = 0; + for (auto txNext = m_ledgerMaster.popAcctTransaction(txCur); + txNext && count < maxPoppedTransactions; + txNext = m_ledgerMaster.popAcctTransaction(txCur), ++count) { + if (!batchLock.owns_lock()) + batchLock.lock(); std::string reason; auto const trans = sterilize(*txNext); auto t = std::make_shared(trans, reason, app_); + if (t->getApplying()) + break; submit_held.emplace_back(t, false, false, FailHard::no); t->setApplying(); } + if (batchLock.owns_lock()) + batchLock.unlock(); } else if (e.result == tefPAST_SEQ) { @@ -1472,16 +1583,54 @@ NetworkOPsImp::apply(std::unique_lock& batchLock) e.transaction->setQueued(); e.transaction->setKept(); } - else if (isTerRetry(e.result)) + else if ( + isTerRetry(e.result) || isTelLocal(e.result) || + isTefFailure(e.result)) { if (e.failType != FailHard::yes) { - // transaction should be held - JLOG(m_journal.debug()) - << "Transaction should be held: " << e.result; - e.transaction->setStatus(HELD); - m_ledgerMaster.addHeldTransaction(e.transaction); - e.transaction->setKept(); + auto const lastLedgerSeq = + e.transaction->getSTransaction()->at( + ~sfLastLedgerSequence); + auto const ledgersLeft = lastLedgerSeq + ? *lastLedgerSeq - + m_ledgerMaster.getCurrentLedgerIndex() + : std::optional{}; + // If any of these conditions are met, the transaction can + // be held: + // 1. It was submitted locally. (Note that this flag is only + // true on the initial submission.) + // 2. The transaction has a LastLedgerSequence, and the + // LastLedgerSequence is fewer than LocalTxs::holdLedgers + // (5) ledgers into the future. (Remember that an + // unseated optional compares as less than all seated + // values, so it has to be checked explicitly first.) + // 3. The SF_HELD flag is not set on the txID. (setFlags + // checks before setting. If the flag is set, it returns + // false, which means it's been held once without one of + // the other conditions, so don't hold it again. Time's + // up!) + // + if (e.local || + (ledgersLeft && ledgersLeft <= LocalTxs::holdLedgers) || + app_.getHashRouter().setFlags( + e.transaction->getID(), SF_HELD)) + { + // transaction should be held + JLOG(m_journal.debug()) + << "Transaction should be held: " << e.result; + e.transaction->setStatus(HELD); + m_ledgerMaster.addHeldTransaction(e.transaction); + e.transaction->setKept(); + } + else + JLOG(m_journal.debug()) + << "Not holding transaction " + << e.transaction->getID() << ": " + << (e.local ? "local" : "network") << ", " + << "result: " << e.result << " ledgers left: " + << (ledgersLeft ? to_string(*ledgersLeft) + : "unspecified"); } } else @@ -1549,8 +1698,11 @@ NetworkOPsImp::apply(std::unique_lock& batchLock) if (mTransactions.empty()) mTransactions.swap(submit_held); else + { + mTransactions.reserve(mTransactions.size() + submit_held.size()); for (auto& e : submit_held) mTransactions.push_back(std::move(e)); + } } mCond.notify_all(); diff --git a/src/xrpld/app/misc/NetworkOPs.h b/src/xrpld/app/misc/NetworkOPs.h index e353552213..b8da7d7dc7 100644 --- a/src/xrpld/app/misc/NetworkOPs.h +++ b/src/xrpld/app/misc/NetworkOPs.h @@ -42,6 +42,7 @@ class Peer; class LedgerMaster; class Transaction; class ValidatorKeys; +class CanonicalTXSet; // This is the primary interface into the "client" portion of the program. // Code that wants to do normal operations on the network such as @@ -140,6 +141,15 @@ public: bool bLocal, FailHard failType) = 0; + /** + * Process a set of transactions synchronously, and ensuring that they are + * processed in one batch. + * + * @param set Transaction object set + */ + virtual void + processTransactionSet(CanonicalTXSet const& set) = 0; + //-------------------------------------------------------------------------- // // Owner functions diff --git a/src/xrpld/app/misc/Transaction.h b/src/xrpld/app/misc/Transaction.h index 82e5b55bf6..817e68817c 100644 --- a/src/xrpld/app/misc/Transaction.h +++ b/src/xrpld/app/misc/Transaction.h @@ -152,6 +152,8 @@ public: void setApplying() { + // Note that all access to mApplying are made by NetworkOPsImp, and must + // be done under that class's lock. mApplying = true; } @@ -163,6 +165,8 @@ public: bool getApplying() { + // Note that all access to mApplying are made by NetworkOPsImp, and must + // be done under that class's lock. return mApplying; } @@ -172,6 +176,8 @@ public: void clearApplying() { + // Note that all access to mApplying are made by NetworkOPsImp, and must + // be done under that class's lock. mApplying = false; } @@ -396,6 +402,24 @@ private: std::optional mNetworkID; TransStatus mStatus = INVALID; TER mResult = temUNCERTAIN; + /* Note that all access to mApplying are made by NetworkOPsImp, + and must be done under that class's lock. This avoids the overhead of + taking a separate lock, and the consequences of a race condition are + nearly-zero. + + 1. If there is a race condition, and getApplying returns false when it + should be true, the transaction will be processed again. Not that + big a deal if it's a rare one-off. Most of the time, it'll get + tefALREADY or tefPAST_SEQ. + 2. On the flip side, if it returns true, when it should be false, then + the transaction must have been attempted recently, so no big deal if + it doesn't immediately get tried right away. + 3. If there's a race between setApplying and clearApplying, and the + flag ends up set, then a batch is about to try to process the + transaction and will call clearApplying later. If it ends up + cleared, then it might get attempted again later as is the case with + item 1. + */ bool mApplying = false; /** different ways for transaction to be accepted */ From 9ec2d7f8ffe0a50bdfa1d8d9b97688a9347eb83f Mon Sep 17 00:00:00 2001 From: Vito Tumas <5780819+Tapanito@users.noreply.github.com> Date: Fri, 2 May 2025 17:01:45 +0200 Subject: [PATCH 018/244] Enable passive squelching (#5358) This change updates the squelching logic to accept squelch messages for untrusted validators. As a result, servers will also squelch untrusted validator messages reducing duplicate traffic they generate. In particular: * Updates squelch message handling logic to squelch messages for all validators, not only trusted ones. * Updates the logic to send squelch messages to peers that don't squelch themselves * Increases the threshold for the number of messages that a peer has to deliver to consider it as a candidate for validator messages. --- src/xrpld/overlay/ReduceRelayCommon.h | 4 ++-- src/xrpld/overlay/detail/PeerImp.cpp | 15 +-------------- src/xrpld/overlay/detail/PeerImp.h | 5 +---- 3 files changed, 4 insertions(+), 20 deletions(-) diff --git a/src/xrpld/overlay/ReduceRelayCommon.h b/src/xrpld/overlay/ReduceRelayCommon.h index 01d7dc597f..473e5d1527 100644 --- a/src/xrpld/overlay/ReduceRelayCommon.h +++ b/src/xrpld/overlay/ReduceRelayCommon.h @@ -45,8 +45,8 @@ static constexpr auto IDLED = std::chrono::seconds{8}; // of messages from the validator. We add peers who reach // MIN_MESSAGE_THRESHOLD to considered pool once MAX_SELECTED_PEERS // reach MAX_MESSAGE_THRESHOLD. -static constexpr uint16_t MIN_MESSAGE_THRESHOLD = 9; -static constexpr uint16_t MAX_MESSAGE_THRESHOLD = 10; +static constexpr uint16_t MIN_MESSAGE_THRESHOLD = 19; +static constexpr uint16_t MAX_MESSAGE_THRESHOLD = 20; // Max selected peers to choose as the source of messages from validator static constexpr uint16_t MAX_SELECTED_PEERS = 5; // Wait before reduce-relay feature is enabled on boot up to let diff --git a/src/xrpld/overlay/detail/PeerImp.cpp b/src/xrpld/overlay/detail/PeerImp.cpp index 1ee72ea5eb..372ad9de53 100644 --- a/src/xrpld/overlay/detail/PeerImp.cpp +++ b/src/xrpld/overlay/detail/PeerImp.cpp @@ -112,10 +112,7 @@ PeerImp::PeerImp( headers_, FEATURE_TXRR, app_.config().TX_REDUCE_RELAY_ENABLE)) - , vpReduceRelayEnabled_(peerFeatureEnabled( - headers_, - FEATURE_VPRR, - app_.config().VP_REDUCE_RELAY_ENABLE)) + , vpReduceRelayEnabled_(app_.config().VP_REDUCE_RELAY_ENABLE) , ledgerReplayEnabled_(peerFeatureEnabled( headers_, FEATURE_LEDGER_REPLAY, @@ -2705,16 +2702,6 @@ PeerImp::onMessage(std::shared_ptr const& m) } PublicKey key(slice); - // Ignore non-validator squelch - if (!app_.validators().listed(key)) - { - fee_.update(Resource::feeInvalidData, "squelch non-validator"); - JLOG(p_journal_.debug()) - << "onMessage: TMSquelch discarding non-validator squelch " - << slice; - return; - } - // Ignore the squelch for validator's own messages. if (key == app_.getValidationPublicKey()) { diff --git a/src/xrpld/overlay/detail/PeerImp.h b/src/xrpld/overlay/detail/PeerImp.h index 459b359ffb..9835f4c6f4 100644 --- a/src/xrpld/overlay/detail/PeerImp.h +++ b/src/xrpld/overlay/detail/PeerImp.h @@ -705,10 +705,7 @@ PeerImp::PeerImp( headers_, FEATURE_TXRR, app_.config().TX_REDUCE_RELAY_ENABLE)) - , vpReduceRelayEnabled_(peerFeatureEnabled( - headers_, - FEATURE_VPRR, - app_.config().VP_REDUCE_RELAY_ENABLE)) + , vpReduceRelayEnabled_(app_.config().VP_REDUCE_RELAY_ENABLE) , ledgerReplayEnabled_(peerFeatureEnabled( headers_, FEATURE_LEDGER_REPLAY, From 2db279180571e4cb90ace09605df7ed2f19a1bb8 Mon Sep 17 00:00:00 2001 From: yinyiqian1 Date: Thu, 8 May 2025 06:14:02 -0400 Subject: [PATCH 019/244] Add PermissionDelegation feature (#5354) This change implements the account permission delegation described in XLS-75d, see https://github.com/XRPLF/XRPL-Standards/pull/257. * Introduces transaction-level and granular permissions that can be delegated to other accounts. * Adds `DelegateSet` transaction to grant specified permissions to another account. * Adds `ltDelegate` ledger object to maintain the permission list for delegating/delegated account pair. * Adds an optional `Delegate` field in common fields, allowing a delegated account to send transactions on behalf of the delegating account within the granted permission scope. The `Account` field remains the delegating account; the `Delegate` field specifies the delegated account. The transaction is signed by the delegated account. --- include/xrpl/protocol/ErrorCodes.h | 2 +- include/xrpl/protocol/Indexes.h | 4 + include/xrpl/protocol/Permissions.h | 97 ++ include/xrpl/protocol/Protocol.h | 4 + include/xrpl/protocol/TxFlags.h | 9 + include/xrpl/protocol/TxFormats.h | 2 +- include/xrpl/protocol/detail/features.macro | 1 + .../xrpl/protocol/detail/ledger_entries.macro | 12 + .../xrpl/protocol/detail/permissions.macro | 68 + include/xrpl/protocol/detail/sfields.macro | 4 + .../xrpl/protocol/detail/transactions.macro | 124 +- include/xrpl/protocol/jss.h | 3 +- src/libxrpl/protocol/ErrorCodes.cpp | 1 + src/libxrpl/protocol/Indexes.cpp | 9 + src/libxrpl/protocol/InnerObjectFormats.cpp | 4 + src/libxrpl/protocol/Permissions.cpp | 148 ++ src/libxrpl/protocol/STInteger.cpp | 22 + src/libxrpl/protocol/STParsedJSON.cpp | 34 +- src/libxrpl/protocol/TxFormats.cpp | 3 +- src/test/app/Delegate_test.cpp | 1437 +++++++++++++++++ src/test/jtx/delegate.h | 62 + src/test/jtx/flags.h | 12 + src/test/jtx/impl/Env.cpp | 4 +- src/test/jtx/impl/delegate.cpp | 67 + src/test/jtx/impl/mpt.cpp | 2 + src/test/jtx/mpt.h | 1 + src/test/rpc/JSONRPC_test.cpp | 72 + src/test/rpc/LedgerEntry_test.cpp | 112 ++ src/test/rpc/LedgerRPC_test.cpp | 1 + src/xrpld/app/misc/AMMUtils.h | 6 +- src/xrpld/app/misc/DelegateUtils.h | 56 + src/xrpld/app/misc/detail/DelegateUtils.cpp | 66 + src/xrpld/app/tx/detail/DelegateSet.cpp | 162 ++ src/xrpld/app/tx/detail/DelegateSet.h | 56 + src/xrpld/app/tx/detail/DeleteAccount.cpp | 15 + src/xrpld/app/tx/detail/InvariantCheck.cpp | 2 + .../app/tx/detail/MPTokenIssuanceSet.cpp | 38 + src/xrpld/app/tx/detail/MPTokenIssuanceSet.h | 3 + src/xrpld/app/tx/detail/Payment.cpp | 34 + src/xrpld/app/tx/detail/Payment.h | 3 + src/xrpld/app/tx/detail/SetAccount.cpp | 56 + src/xrpld/app/tx/detail/SetAccount.h | 3 + src/xrpld/app/tx/detail/SetTrust.cpp | 64 + src/xrpld/app/tx/detail/SetTrust.h | 3 + src/xrpld/app/tx/detail/Transactor.cpp | 88 +- src/xrpld/app/tx/detail/Transactor.h | 4 + src/xrpld/app/tx/detail/applySteps.cpp | 10 +- src/xrpld/rpc/detail/TransactionSign.cpp | 36 +- src/xrpld/rpc/handlers/LedgerEntry.cpp | 41 + 49 files changed, 2976 insertions(+), 91 deletions(-) create mode 100644 include/xrpl/protocol/Permissions.h create mode 100644 include/xrpl/protocol/detail/permissions.macro create mode 100644 src/libxrpl/protocol/Permissions.cpp create mode 100644 src/test/app/Delegate_test.cpp create mode 100644 src/test/jtx/delegate.h create mode 100644 src/test/jtx/impl/delegate.cpp create mode 100644 src/xrpld/app/misc/DelegateUtils.h create mode 100644 src/xrpld/app/misc/detail/DelegateUtils.cpp create mode 100644 src/xrpld/app/tx/detail/DelegateSet.cpp create mode 100644 src/xrpld/app/tx/detail/DelegateSet.h diff --git a/include/xrpl/protocol/ErrorCodes.h b/include/xrpl/protocol/ErrorCodes.h index 2e7cb3b410..66b4dd178c 100644 --- a/include/xrpl/protocol/ErrorCodes.h +++ b/include/xrpl/protocol/ErrorCodes.h @@ -120,7 +120,7 @@ enum error_code_i { rpcSRC_ACT_MALFORMED = 65, rpcSRC_ACT_MISSING = 66, rpcSRC_ACT_NOT_FOUND = 67, - // unused 68, + rpcDELEGATE_ACT_NOT_FOUND = 68, rpcSRC_CUR_MALFORMED = 69, rpcSRC_ISR_MALFORMED = 70, rpcSTREAM_MALFORMED = 71, diff --git a/include/xrpl/protocol/Indexes.h b/include/xrpl/protocol/Indexes.h index bbed539592..979a994c10 100644 --- a/include/xrpl/protocol/Indexes.h +++ b/include/xrpl/protocol/Indexes.h @@ -279,6 +279,10 @@ amm(Asset const& issue1, Asset const& issue2) noexcept; Keylet amm(uint256 const& amm) noexcept; +/** A keylet for Delegate object */ +Keylet +delegate(AccountID const& account, AccountID const& authorizedAccount) noexcept; + Keylet bridge(STXChainBridge const& bridge, STXChainBridge::ChainType chainType); diff --git a/include/xrpl/protocol/Permissions.h b/include/xrpl/protocol/Permissions.h new file mode 100644 index 0000000000..eb2c733313 --- /dev/null +++ b/include/xrpl/protocol/Permissions.h @@ -0,0 +1,97 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_PROTOCOL_PERMISSION_H_INCLUDED +#define RIPPLE_PROTOCOL_PERMISSION_H_INCLUDED + +#include + +#include +#include +#include +#include + +namespace ripple { +/** + * We have both transaction type permissions and granular type permissions. + * Since we will reuse the TransactionFormats to parse the Transaction + * Permissions, only the GranularPermissionType is defined here. To prevent + * conflicts with TxType, the GranularPermissionType is always set to a value + * greater than the maximum value of uint16. + */ +enum GranularPermissionType : std::uint32_t { +#pragma push_macro("PERMISSION") +#undef PERMISSION + +#define PERMISSION(type, txType, value) type = value, + +#include + +#undef PERMISSION +#pragma pop_macro("PERMISSION") +}; + +enum Delegation { delegatable, notDelegatable }; + +class Permission +{ +private: + Permission(); + + std::unordered_map delegatableTx_; + + std::unordered_map + granularPermissionMap_; + + std::unordered_map granularNameMap_; + + std::unordered_map granularTxTypeMap_; + +public: + static Permission const& + getInstance(); + + Permission(const Permission&) = delete; + Permission& + operator=(const Permission&) = delete; + + std::optional + getGranularValue(std::string const& name) const; + + std::optional + getGranularName(GranularPermissionType const& value) const; + + std::optional + getGranularTxType(GranularPermissionType const& gpType) const; + + bool + isDelegatable(std::uint32_t const& permissionValue) const; + + // for tx level permission, permission value is equal to tx type plus one + uint32_t + txToPermissionType(const TxType& type) const; + + // tx type value is permission value minus one + TxType + permissionToTxType(uint32_t const& value) const; +}; + +} // namespace ripple + +#endif diff --git a/include/xrpl/protocol/Protocol.h b/include/xrpl/protocol/Protocol.h index 1e8c76dbd8..041b53d6cb 100644 --- a/include/xrpl/protocol/Protocol.h +++ b/include/xrpl/protocol/Protocol.h @@ -155,6 +155,10 @@ std::size_t constexpr maxPriceScale = 20; */ std::size_t constexpr maxTrim = 25; +/** The maximum number of delegate permissions an account can grant + */ +std::size_t constexpr permissionMaxSize = 10; + } // namespace ripple #endif diff --git a/include/xrpl/protocol/TxFlags.h b/include/xrpl/protocol/TxFlags.h index e94e79aee5..7a600676f8 100644 --- a/include/xrpl/protocol/TxFlags.h +++ b/include/xrpl/protocol/TxFlags.h @@ -120,6 +120,13 @@ constexpr std::uint32_t tfTrustSetMask = ~(tfUniversal | tfSetfAuth | tfSetNoRipple | tfClearNoRipple | tfSetFreeze | tfClearFreeze | tfSetDeepFreeze | tfClearDeepFreeze); +// valid flags for granular permission +constexpr std::uint32_t tfTrustSetGranularMask = tfSetfAuth | tfSetFreeze | tfClearFreeze; + +// bits representing supportedGranularMask are set to 0 and the bits +// representing other flags are set to 1 in tfPermissionMask. +constexpr std::uint32_t tfTrustSetPermissionMask = (~tfTrustSetMask) & (~tfTrustSetGranularMask); + // EnableAmendment flags: constexpr std::uint32_t tfGotMajority = 0x00010000; constexpr std::uint32_t tfLostMajority = 0x00020000; @@ -155,6 +162,8 @@ constexpr std::uint32_t const tfMPTokenAuthorizeMask = ~(tfUniversal | tfMPTUna constexpr std::uint32_t const tfMPTLock = 0x00000001; constexpr std::uint32_t const tfMPTUnlock = 0x00000002; constexpr std::uint32_t const tfMPTokenIssuanceSetMask = ~(tfUniversal | tfMPTLock | tfMPTUnlock); +constexpr std::uint32_t const tfMPTokenIssuanceSetGranularMask = tfMPTLock | tfMPTUnlock; +constexpr std::uint32_t const tfMPTokenIssuanceSetPermissionMask = (~tfMPTokenIssuanceSetMask) & (~tfMPTokenIssuanceSetGranularMask); // MPTokenIssuanceDestroy flags: constexpr std::uint32_t const tfMPTokenIssuanceDestroyMask = ~tfUniversal; diff --git a/include/xrpl/protocol/TxFormats.h b/include/xrpl/protocol/TxFormats.h index 7eb6fb72f7..70b721a3d7 100644 --- a/include/xrpl/protocol/TxFormats.h +++ b/include/xrpl/protocol/TxFormats.h @@ -59,7 +59,7 @@ enum TxType : std::uint16_t #pragma push_macro("TRANSACTION") #undef TRANSACTION -#define TRANSACTION(tag, value, name, fields) tag = value, +#define TRANSACTION(tag, value, name, delegatable, fields) tag = value, #include diff --git a/include/xrpl/protocol/detail/features.macro b/include/xrpl/protocol/detail/features.macro index ac393eae98..31b5c25d91 100644 --- a/include/xrpl/protocol/detail/features.macro +++ b/include/xrpl/protocol/detail/features.macro @@ -32,6 +32,7 @@ // If you add an amendment here, then do not forget to increment `numFeatures` // in include/xrpl/protocol/Feature.h. +XRPL_FEATURE(PermissionDelegation, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (PayChanCancelAfter, Supported::yes, VoteBehavior::DefaultNo) // Check flags in Credential transactions XRPL_FIX (InvalidTxFlags, Supported::yes, VoteBehavior::DefaultNo) diff --git a/include/xrpl/protocol/detail/ledger_entries.macro b/include/xrpl/protocol/detail/ledger_entries.macro index 5a652baf4f..66573eaf4a 100644 --- a/include/xrpl/protocol/detail/ledger_entries.macro +++ b/include/xrpl/protocol/detail/ledger_entries.macro @@ -460,6 +460,18 @@ LEDGER_ENTRY(ltPERMISSIONED_DOMAIN, 0x0082, PermissionedDomain, permissioned_dom {sfPreviousTxnLgrSeq, soeREQUIRED}, })) +/** A ledger object representing permissions an account has delegated to another account. + \sa keylet::delegate + */ +LEDGER_ENTRY(ltDELEGATE, 0x0083, Delegate, delegate, ({ + {sfAccount, soeREQUIRED}, + {sfAuthorize, soeREQUIRED}, + {sfPermissions, soeREQUIRED}, + {sfOwnerNode, soeREQUIRED}, + {sfPreviousTxnID, soeREQUIRED}, + {sfPreviousTxnLgrSeq, soeREQUIRED}, +})) + #undef EXPAND #undef LEDGER_ENTRY_DUPLICATE diff --git a/include/xrpl/protocol/detail/permissions.macro b/include/xrpl/protocol/detail/permissions.macro new file mode 100644 index 0000000000..ec19c5767f --- /dev/null +++ b/include/xrpl/protocol/detail/permissions.macro @@ -0,0 +1,68 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#if !defined(PERMISSION) +#error "undefined macro: PERMISSION" +#endif + +/** + * PERMISSION(name, type, txType, value) + * + * This macro defines a permission: + * name: the name of the permission. + * type: the GranularPermissionType enum. + * txType: the corresponding TxType for this permission. + * value: the uint32 numeric value for the enum type. + */ + +/** This permission grants the delegated account the ability to authorize a trustline. */ +PERMISSION(TrustlineAuthorize, ttTRUST_SET, 65537) + +/** This permission grants the delegated account the ability to freeze a trustline. */ +PERMISSION(TrustlineFreeze, ttTRUST_SET, 65538) + +/** This permission grants the delegated account the ability to unfreeze a trustline. */ +PERMISSION(TrustlineUnfreeze, ttTRUST_SET, 65539) + +/** This permission grants the delegated account the ability to set Domain. */ +PERMISSION(AccountDomainSet, ttACCOUNT_SET, 65540) + +/** This permission grants the delegated account the ability to set EmailHashSet. */ +PERMISSION(AccountEmailHashSet, ttACCOUNT_SET, 65541) + +/** This permission grants the delegated account the ability to set MessageKey. */ +PERMISSION(AccountMessageKeySet, ttACCOUNT_SET, 65542) + +/** This permission grants the delegated account the ability to set TransferRate. */ +PERMISSION(AccountTransferRateSet, ttACCOUNT_SET, 65543) + +/** This permission grants the delegated account the ability to set TickSize. */ +PERMISSION(AccountTickSizeSet, ttACCOUNT_SET, 65544) + +/** This permission grants the delegated account the ability to mint payment, which means sending a payment for a currency where the sending account is the issuer. */ +PERMISSION(PaymentMint, ttPAYMENT, 65545) + +/** This permission grants the delegated account the ability to burn payment, which means sending a payment for a currency where the destination account is the issuer */ +PERMISSION(PaymentBurn, ttPAYMENT, 65546) + +/** This permission grants the delegated account the ability to lock MPToken. */ +PERMISSION(MPTokenIssuanceLock, ttMPTOKEN_ISSUANCE_SET, 65547) + +/** This permission grants the delegated account the ability to unlock MPToken. */ +PERMISSION(MPTokenIssuanceUnlock, ttMPTOKEN_ISSUANCE_SET, 65548) diff --git a/include/xrpl/protocol/detail/sfields.macro b/include/xrpl/protocol/detail/sfields.macro index 3217bab913..e98709c8c3 100644 --- a/include/xrpl/protocol/detail/sfields.macro +++ b/include/xrpl/protocol/detail/sfields.macro @@ -112,6 +112,7 @@ TYPED_SFIELD(sfEmitGeneration, UINT32, 46) TYPED_SFIELD(sfVoteWeight, UINT32, 48) TYPED_SFIELD(sfFirstNFTokenSequence, UINT32, 50) TYPED_SFIELD(sfOracleDocumentID, UINT32, 51) +TYPED_SFIELD(sfPermissionValue, UINT32, 52) // 64-bit integers (common) TYPED_SFIELD(sfIndexNext, UINT64, 1) @@ -278,6 +279,7 @@ TYPED_SFIELD(sfRegularKey, ACCOUNT, 8) TYPED_SFIELD(sfNFTokenMinter, ACCOUNT, 9) TYPED_SFIELD(sfEmitCallback, ACCOUNT, 10) TYPED_SFIELD(sfHolder, ACCOUNT, 11) +TYPED_SFIELD(sfDelegate, ACCOUNT, 12) // account (uncommon) TYPED_SFIELD(sfHookAccount, ACCOUNT, 16) @@ -327,6 +329,7 @@ UNTYPED_SFIELD(sfSignerEntry, OBJECT, 11) UNTYPED_SFIELD(sfNFToken, OBJECT, 12) UNTYPED_SFIELD(sfEmitDetails, OBJECT, 13) UNTYPED_SFIELD(sfHook, OBJECT, 14) +UNTYPED_SFIELD(sfPermission, OBJECT, 15) // inner object (uncommon) UNTYPED_SFIELD(sfSigner, OBJECT, 16) @@ -377,3 +380,4 @@ UNTYPED_SFIELD(sfAuthAccounts, ARRAY, 25) UNTYPED_SFIELD(sfAuthorizeCredentials, ARRAY, 26) UNTYPED_SFIELD(sfUnauthorizeCredentials, ARRAY, 27) UNTYPED_SFIELD(sfAcceptedCredentials, ARRAY, 28) +UNTYPED_SFIELD(sfPermissions, ARRAY, 29) diff --git a/include/xrpl/protocol/detail/transactions.macro b/include/xrpl/protocol/detail/transactions.macro index dd3ac42325..61479611aa 100644 --- a/include/xrpl/protocol/detail/transactions.macro +++ b/include/xrpl/protocol/detail/transactions.macro @@ -22,14 +22,14 @@ #endif /** - * TRANSACTION(tag, value, name, fields) + * TRANSACTION(tag, value, name, delegatable, fields) * * You must define a transactor class in the `ripple` namespace named `name`, * and include its header in `src/xrpld/app/tx/detail/applySteps.cpp`. */ /** This transaction type executes a payment. */ -TRANSACTION(ttPAYMENT, 0, Payment, ({ +TRANSACTION(ttPAYMENT, 0, Payment, Delegation::delegatable, ({ {sfDestination, soeREQUIRED}, {sfAmount, soeREQUIRED, soeMPTSupported}, {sfSendMax, soeOPTIONAL, soeMPTSupported}, @@ -41,7 +41,7 @@ TRANSACTION(ttPAYMENT, 0, Payment, ({ })) /** This transaction type creates an escrow object. */ -TRANSACTION(ttESCROW_CREATE, 1, EscrowCreate, ({ +TRANSACTION(ttESCROW_CREATE, 1, EscrowCreate, Delegation::delegatable, ({ {sfDestination, soeREQUIRED}, {sfAmount, soeREQUIRED}, {sfCondition, soeOPTIONAL}, @@ -51,7 +51,7 @@ TRANSACTION(ttESCROW_CREATE, 1, EscrowCreate, ({ })) /** This transaction type completes an existing escrow. */ -TRANSACTION(ttESCROW_FINISH, 2, EscrowFinish, ({ +TRANSACTION(ttESCROW_FINISH, 2, EscrowFinish, Delegation::delegatable, ({ {sfOwner, soeREQUIRED}, {sfOfferSequence, soeREQUIRED}, {sfFulfillment, soeOPTIONAL}, @@ -61,7 +61,7 @@ TRANSACTION(ttESCROW_FINISH, 2, EscrowFinish, ({ /** This transaction type adjusts various account settings. */ -TRANSACTION(ttACCOUNT_SET, 3, AccountSet, ({ +TRANSACTION(ttACCOUNT_SET, 3, AccountSet, Delegation::notDelegatable, ({ {sfEmailHash, soeOPTIONAL}, {sfWalletLocator, soeOPTIONAL}, {sfWalletSize, soeOPTIONAL}, @@ -75,20 +75,20 @@ TRANSACTION(ttACCOUNT_SET, 3, AccountSet, ({ })) /** This transaction type cancels an existing escrow. */ -TRANSACTION(ttESCROW_CANCEL, 4, EscrowCancel, ({ +TRANSACTION(ttESCROW_CANCEL, 4, EscrowCancel, Delegation::delegatable, ({ {sfOwner, soeREQUIRED}, {sfOfferSequence, soeREQUIRED}, })) /** This transaction type sets or clears an account's "regular key". */ -TRANSACTION(ttREGULAR_KEY_SET, 5, SetRegularKey, ({ +TRANSACTION(ttREGULAR_KEY_SET, 5, SetRegularKey, Delegation::notDelegatable, ({ {sfRegularKey, soeOPTIONAL}, })) // 6 deprecated /** This transaction type creates an offer to trade one asset for another. */ -TRANSACTION(ttOFFER_CREATE, 7, OfferCreate, ({ +TRANSACTION(ttOFFER_CREATE, 7, OfferCreate, Delegation::delegatable, ({ {sfTakerPays, soeREQUIRED}, {sfTakerGets, soeREQUIRED}, {sfExpiration, soeOPTIONAL}, @@ -96,14 +96,14 @@ TRANSACTION(ttOFFER_CREATE, 7, OfferCreate, ({ })) /** This transaction type cancels existing offers to trade one asset for another. */ -TRANSACTION(ttOFFER_CANCEL, 8, OfferCancel, ({ +TRANSACTION(ttOFFER_CANCEL, 8, OfferCancel, Delegation::delegatable, ({ {sfOfferSequence, soeREQUIRED}, })) // 9 deprecated /** This transaction type creates a new set of tickets. */ -TRANSACTION(ttTICKET_CREATE, 10, TicketCreate, ({ +TRANSACTION(ttTICKET_CREATE, 10, TicketCreate, Delegation::delegatable, ({ {sfTicketCount, soeREQUIRED}, })) @@ -112,13 +112,13 @@ TRANSACTION(ttTICKET_CREATE, 10, TicketCreate, ({ /** This transaction type modifies the signer list associated with an account. */ // The SignerEntries are optional because a SignerList is deleted by // setting the SignerQuorum to zero and omitting SignerEntries. -TRANSACTION(ttSIGNER_LIST_SET, 12, SignerListSet, ({ +TRANSACTION(ttSIGNER_LIST_SET, 12, SignerListSet, Delegation::notDelegatable, ({ {sfSignerQuorum, soeREQUIRED}, {sfSignerEntries, soeOPTIONAL}, })) /** This transaction type creates a new unidirectional XRP payment channel. */ -TRANSACTION(ttPAYCHAN_CREATE, 13, PaymentChannelCreate, ({ +TRANSACTION(ttPAYCHAN_CREATE, 13, PaymentChannelCreate, Delegation::delegatable, ({ {sfDestination, soeREQUIRED}, {sfAmount, soeREQUIRED}, {sfSettleDelay, soeREQUIRED}, @@ -128,14 +128,14 @@ TRANSACTION(ttPAYCHAN_CREATE, 13, PaymentChannelCreate, ({ })) /** This transaction type funds an existing unidirectional XRP payment channel. */ -TRANSACTION(ttPAYCHAN_FUND, 14, PaymentChannelFund, ({ +TRANSACTION(ttPAYCHAN_FUND, 14, PaymentChannelFund, Delegation::delegatable, ({ {sfChannel, soeREQUIRED}, {sfAmount, soeREQUIRED}, {sfExpiration, soeOPTIONAL}, })) /** This transaction type submits a claim against an existing unidirectional payment channel. */ -TRANSACTION(ttPAYCHAN_CLAIM, 15, PaymentChannelClaim, ({ +TRANSACTION(ttPAYCHAN_CLAIM, 15, PaymentChannelClaim, Delegation::delegatable, ({ {sfChannel, soeREQUIRED}, {sfAmount, soeOPTIONAL}, {sfBalance, soeOPTIONAL}, @@ -145,7 +145,7 @@ TRANSACTION(ttPAYCHAN_CLAIM, 15, PaymentChannelClaim, ({ })) /** This transaction type creates a new check. */ -TRANSACTION(ttCHECK_CREATE, 16, CheckCreate, ({ +TRANSACTION(ttCHECK_CREATE, 16, CheckCreate, Delegation::delegatable, ({ {sfDestination, soeREQUIRED}, {sfSendMax, soeREQUIRED}, {sfExpiration, soeOPTIONAL}, @@ -154,19 +154,19 @@ TRANSACTION(ttCHECK_CREATE, 16, CheckCreate, ({ })) /** This transaction type cashes an existing check. */ -TRANSACTION(ttCHECK_CASH, 17, CheckCash, ({ +TRANSACTION(ttCHECK_CASH, 17, CheckCash, Delegation::delegatable, ({ {sfCheckID, soeREQUIRED}, {sfAmount, soeOPTIONAL}, {sfDeliverMin, soeOPTIONAL}, })) /** This transaction type cancels an existing check. */ -TRANSACTION(ttCHECK_CANCEL, 18, CheckCancel, ({ +TRANSACTION(ttCHECK_CANCEL, 18, CheckCancel, Delegation::delegatable, ({ {sfCheckID, soeREQUIRED}, })) /** This transaction type grants or revokes authorization to transfer funds. */ -TRANSACTION(ttDEPOSIT_PREAUTH, 19, DepositPreauth, ({ +TRANSACTION(ttDEPOSIT_PREAUTH, 19, DepositPreauth, Delegation::delegatable, ({ {sfAuthorize, soeOPTIONAL}, {sfUnauthorize, soeOPTIONAL}, {sfAuthorizeCredentials, soeOPTIONAL}, @@ -174,14 +174,14 @@ TRANSACTION(ttDEPOSIT_PREAUTH, 19, DepositPreauth, ({ })) /** This transaction type modifies a trustline between two accounts. */ -TRANSACTION(ttTRUST_SET, 20, TrustSet, ({ +TRANSACTION(ttTRUST_SET, 20, TrustSet, Delegation::delegatable, ({ {sfLimitAmount, soeOPTIONAL}, {sfQualityIn, soeOPTIONAL}, {sfQualityOut, soeOPTIONAL}, })) /** This transaction type deletes an existing account. */ -TRANSACTION(ttACCOUNT_DELETE, 21, AccountDelete, ({ +TRANSACTION(ttACCOUNT_DELETE, 21, AccountDelete, Delegation::notDelegatable, ({ {sfDestination, soeREQUIRED}, {sfDestinationTag, soeOPTIONAL}, {sfCredentialIDs, soeOPTIONAL}, @@ -190,7 +190,7 @@ TRANSACTION(ttACCOUNT_DELETE, 21, AccountDelete, ({ // 22 reserved /** This transaction mints a new NFT. */ -TRANSACTION(ttNFTOKEN_MINT, 25, NFTokenMint, ({ +TRANSACTION(ttNFTOKEN_MINT, 25, NFTokenMint, Delegation::delegatable, ({ {sfNFTokenTaxon, soeREQUIRED}, {sfTransferFee, soeOPTIONAL}, {sfIssuer, soeOPTIONAL}, @@ -201,13 +201,13 @@ TRANSACTION(ttNFTOKEN_MINT, 25, NFTokenMint, ({ })) /** This transaction burns (i.e. destroys) an existing NFT. */ -TRANSACTION(ttNFTOKEN_BURN, 26, NFTokenBurn, ({ +TRANSACTION(ttNFTOKEN_BURN, 26, NFTokenBurn, Delegation::delegatable, ({ {sfNFTokenID, soeREQUIRED}, {sfOwner, soeOPTIONAL}, })) /** This transaction creates a new offer to buy or sell an NFT. */ -TRANSACTION(ttNFTOKEN_CREATE_OFFER, 27, NFTokenCreateOffer, ({ +TRANSACTION(ttNFTOKEN_CREATE_OFFER, 27, NFTokenCreateOffer, Delegation::delegatable, ({ {sfNFTokenID, soeREQUIRED}, {sfAmount, soeREQUIRED}, {sfDestination, soeOPTIONAL}, @@ -216,25 +216,25 @@ TRANSACTION(ttNFTOKEN_CREATE_OFFER, 27, NFTokenCreateOffer, ({ })) /** This transaction cancels an existing offer to buy or sell an existing NFT. */ -TRANSACTION(ttNFTOKEN_CANCEL_OFFER, 28, NFTokenCancelOffer, ({ +TRANSACTION(ttNFTOKEN_CANCEL_OFFER, 28, NFTokenCancelOffer, Delegation::delegatable, ({ {sfNFTokenOffers, soeREQUIRED}, })) /** This transaction accepts an existing offer to buy or sell an existing NFT. */ -TRANSACTION(ttNFTOKEN_ACCEPT_OFFER, 29, NFTokenAcceptOffer, ({ +TRANSACTION(ttNFTOKEN_ACCEPT_OFFER, 29, NFTokenAcceptOffer, Delegation::delegatable, ({ {sfNFTokenBuyOffer, soeOPTIONAL}, {sfNFTokenSellOffer, soeOPTIONAL}, {sfNFTokenBrokerFee, soeOPTIONAL}, })) /** This transaction claws back issued tokens. */ -TRANSACTION(ttCLAWBACK, 30, Clawback, ({ +TRANSACTION(ttCLAWBACK, 30, Clawback, Delegation::delegatable, ({ {sfAmount, soeREQUIRED, soeMPTSupported}, {sfHolder, soeOPTIONAL}, })) /** This transaction claws back tokens from an AMM pool. */ -TRANSACTION(ttAMM_CLAWBACK, 31, AMMClawback, ({ +TRANSACTION(ttAMM_CLAWBACK, 31, AMMClawback, Delegation::delegatable, ({ {sfHolder, soeREQUIRED}, {sfAsset, soeREQUIRED}, {sfAsset2, soeREQUIRED}, @@ -242,14 +242,14 @@ TRANSACTION(ttAMM_CLAWBACK, 31, AMMClawback, ({ })) /** This transaction type creates an AMM instance */ -TRANSACTION(ttAMM_CREATE, 35, AMMCreate, ({ +TRANSACTION(ttAMM_CREATE, 35, AMMCreate, Delegation::delegatable, ({ {sfAmount, soeREQUIRED}, {sfAmount2, soeREQUIRED}, {sfTradingFee, soeREQUIRED}, })) /** This transaction type deposits into an AMM instance */ -TRANSACTION(ttAMM_DEPOSIT, 36, AMMDeposit, ({ +TRANSACTION(ttAMM_DEPOSIT, 36, AMMDeposit, Delegation::delegatable, ({ {sfAsset, soeREQUIRED}, {sfAsset2, soeREQUIRED}, {sfAmount, soeOPTIONAL}, @@ -260,7 +260,7 @@ TRANSACTION(ttAMM_DEPOSIT, 36, AMMDeposit, ({ })) /** This transaction type withdraws from an AMM instance */ -TRANSACTION(ttAMM_WITHDRAW, 37, AMMWithdraw, ({ +TRANSACTION(ttAMM_WITHDRAW, 37, AMMWithdraw, Delegation::delegatable, ({ {sfAsset, soeREQUIRED}, {sfAsset2, soeREQUIRED}, {sfAmount, soeOPTIONAL}, @@ -270,14 +270,14 @@ TRANSACTION(ttAMM_WITHDRAW, 37, AMMWithdraw, ({ })) /** This transaction type votes for the trading fee */ -TRANSACTION(ttAMM_VOTE, 38, AMMVote, ({ +TRANSACTION(ttAMM_VOTE, 38, AMMVote, Delegation::delegatable, ({ {sfAsset, soeREQUIRED}, {sfAsset2, soeREQUIRED}, {sfTradingFee, soeREQUIRED}, })) /** This transaction type bids for the auction slot */ -TRANSACTION(ttAMM_BID, 39, AMMBid, ({ +TRANSACTION(ttAMM_BID, 39, AMMBid, Delegation::delegatable, ({ {sfAsset, soeREQUIRED}, {sfAsset2, soeREQUIRED}, {sfBidMin, soeOPTIONAL}, @@ -286,20 +286,20 @@ TRANSACTION(ttAMM_BID, 39, AMMBid, ({ })) /** This transaction type deletes AMM in the empty state */ -TRANSACTION(ttAMM_DELETE, 40, AMMDelete, ({ +TRANSACTION(ttAMM_DELETE, 40, AMMDelete, Delegation::delegatable, ({ {sfAsset, soeREQUIRED}, {sfAsset2, soeREQUIRED}, })) /** This transactions creates a crosschain sequence number */ -TRANSACTION(ttXCHAIN_CREATE_CLAIM_ID, 41, XChainCreateClaimID, ({ +TRANSACTION(ttXCHAIN_CREATE_CLAIM_ID, 41, XChainCreateClaimID, Delegation::delegatable, ({ {sfXChainBridge, soeREQUIRED}, {sfSignatureReward, soeREQUIRED}, {sfOtherChainSource, soeREQUIRED}, })) /** This transactions initiates a crosschain transaction */ -TRANSACTION(ttXCHAIN_COMMIT, 42, XChainCommit, ({ +TRANSACTION(ttXCHAIN_COMMIT, 42, XChainCommit, Delegation::delegatable, ({ {sfXChainBridge, soeREQUIRED}, {sfXChainClaimID, soeREQUIRED}, {sfAmount, soeREQUIRED}, @@ -307,7 +307,7 @@ TRANSACTION(ttXCHAIN_COMMIT, 42, XChainCommit, ({ })) /** This transaction completes a crosschain transaction */ -TRANSACTION(ttXCHAIN_CLAIM, 43, XChainClaim, ({ +TRANSACTION(ttXCHAIN_CLAIM, 43, XChainClaim, Delegation::delegatable, ({ {sfXChainBridge, soeREQUIRED}, {sfXChainClaimID, soeREQUIRED}, {sfDestination, soeREQUIRED}, @@ -316,7 +316,7 @@ TRANSACTION(ttXCHAIN_CLAIM, 43, XChainClaim, ({ })) /** This transaction initiates a crosschain account create transaction */ -TRANSACTION(ttXCHAIN_ACCOUNT_CREATE_COMMIT, 44, XChainAccountCreateCommit, ({ +TRANSACTION(ttXCHAIN_ACCOUNT_CREATE_COMMIT, 44, XChainAccountCreateCommit, Delegation::delegatable, ({ {sfXChainBridge, soeREQUIRED}, {sfDestination, soeREQUIRED}, {sfAmount, soeREQUIRED}, @@ -324,7 +324,7 @@ TRANSACTION(ttXCHAIN_ACCOUNT_CREATE_COMMIT, 44, XChainAccountCreateCommit, ({ })) /** This transaction adds an attestation to a claim */ -TRANSACTION(ttXCHAIN_ADD_CLAIM_ATTESTATION, 45, XChainAddClaimAttestation, ({ +TRANSACTION(ttXCHAIN_ADD_CLAIM_ATTESTATION, 45, XChainAddClaimAttestation, Delegation::delegatable, ({ {sfXChainBridge, soeREQUIRED}, {sfAttestationSignerAccount, soeREQUIRED}, @@ -340,7 +340,7 @@ TRANSACTION(ttXCHAIN_ADD_CLAIM_ATTESTATION, 45, XChainAddClaimAttestation, ({ })) /** This transaction adds an attestation to an account */ -TRANSACTION(ttXCHAIN_ADD_ACCOUNT_CREATE_ATTESTATION, 46, XChainAddAccountCreateAttestation, ({ +TRANSACTION(ttXCHAIN_ADD_ACCOUNT_CREATE_ATTESTATION, 46, XChainAddAccountCreateAttestation, Delegation::delegatable, ({ {sfXChainBridge, soeREQUIRED}, {sfAttestationSignerAccount, soeREQUIRED}, @@ -357,31 +357,31 @@ TRANSACTION(ttXCHAIN_ADD_ACCOUNT_CREATE_ATTESTATION, 46, XChainAddAccountCreateA })) /** This transaction modifies a sidechain */ -TRANSACTION(ttXCHAIN_MODIFY_BRIDGE, 47, XChainModifyBridge, ({ +TRANSACTION(ttXCHAIN_MODIFY_BRIDGE, 47, XChainModifyBridge, Delegation::delegatable, ({ {sfXChainBridge, soeREQUIRED}, {sfSignatureReward, soeOPTIONAL}, {sfMinAccountCreateAmount, soeOPTIONAL}, })) /** This transactions creates a sidechain */ -TRANSACTION(ttXCHAIN_CREATE_BRIDGE, 48, XChainCreateBridge, ({ +TRANSACTION(ttXCHAIN_CREATE_BRIDGE, 48, XChainCreateBridge, Delegation::delegatable, ({ {sfXChainBridge, soeREQUIRED}, {sfSignatureReward, soeREQUIRED}, {sfMinAccountCreateAmount, soeOPTIONAL}, })) /** This transaction type creates or updates a DID */ -TRANSACTION(ttDID_SET, 49, DIDSet, ({ +TRANSACTION(ttDID_SET, 49, DIDSet, Delegation::delegatable, ({ {sfDIDDocument, soeOPTIONAL}, {sfURI, soeOPTIONAL}, {sfData, soeOPTIONAL}, })) /** This transaction type deletes a DID */ -TRANSACTION(ttDID_DELETE, 50, DIDDelete, ({})) +TRANSACTION(ttDID_DELETE, 50, DIDDelete, Delegation::delegatable, ({})) /** This transaction type creates an Oracle instance */ -TRANSACTION(ttORACLE_SET, 51, OracleSet, ({ +TRANSACTION(ttORACLE_SET, 51, OracleSet, Delegation::delegatable, ({ {sfOracleDocumentID, soeREQUIRED}, {sfProvider, soeOPTIONAL}, {sfURI, soeOPTIONAL}, @@ -391,18 +391,18 @@ TRANSACTION(ttORACLE_SET, 51, OracleSet, ({ })) /** This transaction type deletes an Oracle instance */ -TRANSACTION(ttORACLE_DELETE, 52, OracleDelete, ({ +TRANSACTION(ttORACLE_DELETE, 52, OracleDelete, Delegation::delegatable, ({ {sfOracleDocumentID, soeREQUIRED}, })) /** This transaction type fixes a problem in the ledger state */ -TRANSACTION(ttLEDGER_STATE_FIX, 53, LedgerStateFix, ({ +TRANSACTION(ttLEDGER_STATE_FIX, 53, LedgerStateFix, Delegation::notDelegatable, ({ {sfLedgerFixType, soeREQUIRED}, {sfOwner, soeOPTIONAL}, })) /** This transaction type creates a MPTokensIssuance instance */ -TRANSACTION(ttMPTOKEN_ISSUANCE_CREATE, 54, MPTokenIssuanceCreate, ({ +TRANSACTION(ttMPTOKEN_ISSUANCE_CREATE, 54, MPTokenIssuanceCreate, Delegation::delegatable, ({ {sfAssetScale, soeOPTIONAL}, {sfTransferFee, soeOPTIONAL}, {sfMaximumAmount, soeOPTIONAL}, @@ -410,24 +410,24 @@ TRANSACTION(ttMPTOKEN_ISSUANCE_CREATE, 54, MPTokenIssuanceCreate, ({ })) /** This transaction type destroys a MPTokensIssuance instance */ -TRANSACTION(ttMPTOKEN_ISSUANCE_DESTROY, 55, MPTokenIssuanceDestroy, ({ +TRANSACTION(ttMPTOKEN_ISSUANCE_DESTROY, 55, MPTokenIssuanceDestroy, Delegation::delegatable, ({ {sfMPTokenIssuanceID, soeREQUIRED}, })) /** This transaction type sets flags on a MPTokensIssuance or MPToken instance */ -TRANSACTION(ttMPTOKEN_ISSUANCE_SET, 56, MPTokenIssuanceSet, ({ +TRANSACTION(ttMPTOKEN_ISSUANCE_SET, 56, MPTokenIssuanceSet, Delegation::delegatable, ({ {sfMPTokenIssuanceID, soeREQUIRED}, {sfHolder, soeOPTIONAL}, })) /** This transaction type authorizes a MPToken instance */ -TRANSACTION(ttMPTOKEN_AUTHORIZE, 57, MPTokenAuthorize, ({ +TRANSACTION(ttMPTOKEN_AUTHORIZE, 57, MPTokenAuthorize, Delegation::delegatable, ({ {sfMPTokenIssuanceID, soeREQUIRED}, {sfHolder, soeOPTIONAL}, })) /** This transaction type create an Credential instance */ -TRANSACTION(ttCREDENTIAL_CREATE, 58, CredentialCreate, ({ +TRANSACTION(ttCREDENTIAL_CREATE, 58, CredentialCreate, Delegation::delegatable, ({ {sfSubject, soeREQUIRED}, {sfCredentialType, soeREQUIRED}, {sfExpiration, soeOPTIONAL}, @@ -435,41 +435,47 @@ TRANSACTION(ttCREDENTIAL_CREATE, 58, CredentialCreate, ({ })) /** This transaction type accept an Credential object */ -TRANSACTION(ttCREDENTIAL_ACCEPT, 59, CredentialAccept, ({ +TRANSACTION(ttCREDENTIAL_ACCEPT, 59, CredentialAccept, Delegation::delegatable, ({ {sfIssuer, soeREQUIRED}, {sfCredentialType, soeREQUIRED}, })) /** This transaction type delete an Credential object */ -TRANSACTION(ttCREDENTIAL_DELETE, 60, CredentialDelete, ({ +TRANSACTION(ttCREDENTIAL_DELETE, 60, CredentialDelete, Delegation::delegatable, ({ {sfSubject, soeOPTIONAL}, {sfIssuer, soeOPTIONAL}, {sfCredentialType, soeREQUIRED}, })) /** This transaction type modify a NFToken */ -TRANSACTION(ttNFTOKEN_MODIFY, 61, NFTokenModify, ({ +TRANSACTION(ttNFTOKEN_MODIFY, 61, NFTokenModify, Delegation::delegatable, ({ {sfNFTokenID, soeREQUIRED}, {sfOwner, soeOPTIONAL}, {sfURI, soeOPTIONAL}, })) /** This transaction type creates or modifies a Permissioned Domain */ -TRANSACTION(ttPERMISSIONED_DOMAIN_SET, 62, PermissionedDomainSet, ({ +TRANSACTION(ttPERMISSIONED_DOMAIN_SET, 62, PermissionedDomainSet, Delegation::delegatable, ({ {sfDomainID, soeOPTIONAL}, {sfAcceptedCredentials, soeREQUIRED}, })) /** This transaction type deletes a Permissioned Domain */ -TRANSACTION(ttPERMISSIONED_DOMAIN_DELETE, 63, PermissionedDomainDelete, ({ +TRANSACTION(ttPERMISSIONED_DOMAIN_DELETE, 63, PermissionedDomainDelete, Delegation::delegatable, ({ {sfDomainID, soeREQUIRED}, })) +/** This transaction type delegates authorized account specified permissions */ +TRANSACTION(ttDELEGATE_SET, 64, DelegateSet, Delegation::notDelegatable, ({ + {sfAuthorize, soeREQUIRED}, + {sfPermissions, soeREQUIRED}, +})) + /** This system-generated transaction type is used to update the status of the various amendments. For details, see: https://xrpl.org/amendments.html */ -TRANSACTION(ttAMENDMENT, 100, EnableAmendment, ({ +TRANSACTION(ttAMENDMENT, 100, EnableAmendment, Delegation::notDelegatable, ({ {sfLedgerSequence, soeREQUIRED}, {sfAmendment, soeREQUIRED}, })) @@ -477,7 +483,7 @@ TRANSACTION(ttAMENDMENT, 100, EnableAmendment, ({ /** This system-generated transaction type is used to update the network's fee settings. For details, see: https://xrpl.org/fee-voting.html */ -TRANSACTION(ttFEE, 101, SetFee, ({ +TRANSACTION(ttFEE, 101, SetFee, Delegation::notDelegatable, ({ {sfLedgerSequence, soeOPTIONAL}, // Old version uses raw numbers {sfBaseFee, soeOPTIONAL}, @@ -494,7 +500,7 @@ TRANSACTION(ttFEE, 101, SetFee, ({ For details, see: https://xrpl.org/negative-unl.html */ -TRANSACTION(ttUNL_MODIFY, 102, UNLModify, ({ +TRANSACTION(ttUNL_MODIFY, 102, UNLModify, Delegation::notDelegatable, ({ {sfUNLModifyDisabling, soeREQUIRED}, {sfLedgerSequence, soeREQUIRED}, {sfUNLModifyValidator, soeREQUIRED}, diff --git a/include/xrpl/protocol/jss.h b/include/xrpl/protocol/jss.h index 483b69a962..bb2ffa7bb0 100644 --- a/include/xrpl/protocol/jss.h +++ b/include/xrpl/protocol/jss.h @@ -145,6 +145,7 @@ JSS(attestations); JSS(attestation_reward_account); JSS(auction_slot); // out: amm_info JSS(authorized); // out: AccountLines +JSS(authorize); // out: delegate JSS(authorized_credentials); // in: ledger_entry DepositPreauth JSS(auth_accounts); // out: amm_info JSS(auth_change); // out: AccountInfo @@ -699,7 +700,7 @@ JSS(write_load); // out: GetCounts #pragma push_macro("TRANSACTION") #undef TRANSACTION -#define TRANSACTION(tag, value, name, fields) JSS(name); +#define TRANSACTION(tag, value, name, delegatable, fields) JSS(name); #include diff --git a/src/libxrpl/protocol/ErrorCodes.cpp b/src/libxrpl/protocol/ErrorCodes.cpp index 93e30f24be..b3d1b812b5 100644 --- a/src/libxrpl/protocol/ErrorCodes.cpp +++ b/src/libxrpl/protocol/ErrorCodes.cpp @@ -107,6 +107,7 @@ constexpr static ErrorInfo unorderedErrorInfos[]{ {rpcSRC_ACT_MALFORMED, "srcActMalformed", "Source account is malformed.", 400}, {rpcSRC_ACT_MISSING, "srcActMissing", "Source account not provided.", 400}, {rpcSRC_ACT_NOT_FOUND, "srcActNotFound", "Source account not found.", 404}, + {rpcDELEGATE_ACT_NOT_FOUND, "delegateActNotFound", "Delegate account not found.", 404}, {rpcSRC_CUR_MALFORMED, "srcCurMalformed", "Source currency is malformed.", 400}, {rpcSRC_ISR_MALFORMED, "srcIsrMalformed", "Source issuer is malformed.", 400}, {rpcSTREAM_MALFORMED, "malformedStream", "Stream malformed.", 400}, diff --git a/src/libxrpl/protocol/Indexes.cpp b/src/libxrpl/protocol/Indexes.cpp index fe8f0c4778..8256c7a77c 100644 --- a/src/libxrpl/protocol/Indexes.cpp +++ b/src/libxrpl/protocol/Indexes.cpp @@ -94,6 +94,7 @@ enum class LedgerNameSpace : std::uint16_t { MPTOKEN = 't', CREDENTIAL = 'D', PERMISSIONED_DOMAIN = 'm', + DELEGATE = 'E', // No longer used or supported. Left here to reserve the space // to avoid accidental reuse. @@ -452,6 +453,14 @@ amm(uint256 const& id) noexcept return {ltAMM, id}; } +Keylet +delegate(AccountID const& account, AccountID const& authorizedAccount) noexcept +{ + return { + ltDELEGATE, + indexHash(LedgerNameSpace::DELEGATE, account, authorizedAccount)}; +} + Keylet bridge(STXChainBridge const& bridge, STXChainBridge::ChainType chainType) { diff --git a/src/libxrpl/protocol/InnerObjectFormats.cpp b/src/libxrpl/protocol/InnerObjectFormats.cpp index 87abcc2351..ecfca9743d 100644 --- a/src/libxrpl/protocol/InnerObjectFormats.cpp +++ b/src/libxrpl/protocol/InnerObjectFormats.cpp @@ -154,6 +154,10 @@ InnerObjectFormats::InnerObjectFormats() {sfIssuer, soeREQUIRED}, {sfCredentialType, soeREQUIRED}, }); + + add(sfPermission.jsonName.c_str(), + sfPermission.getCode(), + {{sfPermissionValue, soeREQUIRED}}); } InnerObjectFormats const& diff --git a/src/libxrpl/protocol/Permissions.cpp b/src/libxrpl/protocol/Permissions.cpp new file mode 100644 index 0000000000..dbe5325a4e --- /dev/null +++ b/src/libxrpl/protocol/Permissions.cpp @@ -0,0 +1,148 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include + +namespace ripple { + +Permission::Permission() +{ + delegatableTx_ = { +#pragma push_macro("TRANSACTION") +#undef TRANSACTION + +#define TRANSACTION(tag, value, name, delegatable, fields) {value, delegatable}, + +#include + +#undef TRANSACTION +#pragma pop_macro("TRANSACTION") + }; + + granularPermissionMap_ = { +#pragma push_macro("PERMISSION") +#undef PERMISSION + +#define PERMISSION(type, txType, value) {#type, type}, + +#include + +#undef PERMISSION +#pragma pop_macro("PERMISSION") + }; + + granularNameMap_ = { +#pragma push_macro("PERMISSION") +#undef PERMISSION + +#define PERMISSION(type, txType, value) {type, #type}, + +#include + +#undef PERMISSION +#pragma pop_macro("PERMISSION") + }; + + granularTxTypeMap_ = { +#pragma push_macro("PERMISSION") +#undef PERMISSION + +#define PERMISSION(type, txType, value) {type, txType}, + +#include + +#undef PERMISSION +#pragma pop_macro("PERMISSION") + }; + + for ([[maybe_unused]] auto const& permission : granularPermissionMap_) + XRPL_ASSERT( + permission.second > UINT16_MAX, + "ripple::Permission::granularPermissionMap_ : granular permission " + "value must not exceed the maximum uint16_t value."); +} + +Permission const& +Permission::getInstance() +{ + static Permission const instance; + return instance; +} + +std::optional +Permission::getGranularValue(std::string const& name) const +{ + auto const it = granularPermissionMap_.find(name); + if (it != granularPermissionMap_.end()) + return static_cast(it->second); + + return std::nullopt; +} + +std::optional +Permission::getGranularName(GranularPermissionType const& value) const +{ + auto const it = granularNameMap_.find(value); + if (it != granularNameMap_.end()) + return it->second; + + return std::nullopt; +} + +std::optional +Permission::getGranularTxType(GranularPermissionType const& gpType) const +{ + auto const it = granularTxTypeMap_.find(gpType); + if (it != granularTxTypeMap_.end()) + return it->second; + + return std::nullopt; +} + +bool +Permission::isDelegatable(std::uint32_t const& permissionValue) const +{ + auto const granularPermission = + getGranularName(static_cast(permissionValue)); + if (granularPermission) + // granular permissions are always allowed to be delegated + return true; + + auto const it = delegatableTx_.find(permissionValue - 1); + if (it != delegatableTx_.end() && it->second == Delegation::notDelegatable) + return false; + + return true; +} + +uint32_t +Permission::txToPermissionType(TxType const& type) const +{ + return static_cast(type) + 1; +} + +TxType +Permission::permissionToTxType(uint32_t const& value) const +{ + return static_cast(value - 1); +} + +} // namespace ripple \ No newline at end of file diff --git a/src/libxrpl/protocol/STInteger.cpp b/src/libxrpl/protocol/STInteger.cpp index bc5b7e855e..a90e21491c 100644 --- a/src/libxrpl/protocol/STInteger.cpp +++ b/src/libxrpl/protocol/STInteger.cpp @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -177,6 +178,27 @@ template <> Json::Value STUInt32::getJson(JsonOptions) const { + if (getFName() == sfPermissionValue) + { + auto const permissionValue = + static_cast(value_); + auto const granular = + Permission::getInstance().getGranularName(permissionValue); + + if (granular) + { + return *granular; + } + else + { + auto const txType = + Permission::getInstance().permissionToTxType(value_); + auto item = TxFormats::getInstance().findByType(txType); + if (item != nullptr) + return item->getName(); + } + } + return value_; } diff --git a/src/libxrpl/protocol/STParsedJSON.cpp b/src/libxrpl/protocol/STParsedJSON.cpp index 0488189a66..e7568c6818 100644 --- a/src/libxrpl/protocol/STParsedJSON.cpp +++ b/src/libxrpl/protocol/STParsedJSON.cpp @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -373,10 +374,35 @@ parseLeaf( { if (value.isString()) { - ret = detail::make_stvar( - field, - beast::lexicalCastThrow( - value.asString())); + if (field == sfPermissionValue) + { + std::string const strValue = value.asString(); + auto const granularPermission = + Permission::getInstance().getGranularValue( + strValue); + if (granularPermission) + { + ret = detail::make_stvar( + field, *granularPermission); + } + else + { + auto const& txType = + TxFormats::getInstance().findTypeByName( + strValue); + ret = detail::make_stvar( + field, + Permission::getInstance().txToPermissionType( + txType)); + } + } + else + { + ret = detail::make_stvar( + field, + beast::lexicalCastThrow( + value.asString())); + } } else if (value.isInt()) { diff --git a/src/libxrpl/protocol/TxFormats.cpp b/src/libxrpl/protocol/TxFormats.cpp index b2dd3a656f..a23475553d 100644 --- a/src/libxrpl/protocol/TxFormats.cpp +++ b/src/libxrpl/protocol/TxFormats.cpp @@ -46,6 +46,7 @@ TxFormats::TxFormats() {sfTxnSignature, soeOPTIONAL}, {sfSigners, soeOPTIONAL}, // submit_multisigned {sfNetworkID, soeOPTIONAL}, + {sfDelegate, soeOPTIONAL}, }; #pragma push_macro("UNWRAP") @@ -54,7 +55,7 @@ TxFormats::TxFormats() #undef TRANSACTION #define UNWRAP(...) __VA_ARGS__ -#define TRANSACTION(tag, value, name, fields) \ +#define TRANSACTION(tag, value, name, delegatable, fields) \ add(jss::name, tag, UNWRAP fields, commonFields); #include diff --git a/src/test/app/Delegate_test.cpp b/src/test/app/Delegate_test.cpp new file mode 100644 index 0000000000..c8415a558a --- /dev/null +++ b/src/test/app/Delegate_test.cpp @@ -0,0 +1,1437 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 Ripple Labs Inc. + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +#include +#include + +namespace ripple { +namespace test { +class Delegate_test : public beast::unit_test::suite +{ + void + testFeatureDisabled() + { + testcase("test featurePermissionDelegation not enabled"); + using namespace jtx; + + Env env{*this, supported_amendments() - featurePermissionDelegation}; + Account gw{"gateway"}; + Account alice{"alice"}; + Account bob{"bob"}; + env.fund(XRP(1000000), gw, alice, bob); + env.close(); + + // can not set Delegate when feature disabled + env(delegate::set(gw, alice, {"Payment"}), ter(temDISABLED)); + + // can not send delegating transaction when feature disabled + env(pay(alice, bob, XRP(100)), delegate::as(bob), ter(temDISABLED)); + } + + void + testDelegateSet() + { + testcase("test valid request creating, updating, deleting permissions"); + using namespace jtx; + + Env env(*this); + Account gw{"gateway"}; + Account alice{"alice"}; + env.fund(XRP(100000), gw, alice); + env.close(); + + // delegating an empty permission list when the delegate ledger object + // does not exist will not create the ledger object + env(delegate::set(gw, alice, std::vector{})); + env.close(); + auto const entry = delegate::entry(env, gw, alice); + BEAST_EXPECT(entry[jss::result][jss::error] == "entryNotFound"); + + auto const permissions = std::vector{ + "Payment", + "EscrowCreate", + "EscrowFinish", + "TrustlineAuthorize", + "CheckCreate"}; + env(delegate::set(gw, alice, permissions)); + env.close(); + + // this lambda function is used to compare the json value of ledger + // entry response with the given vector of permissions. + auto comparePermissions = + [&](Json::Value const& jle, + std::vector const& permissions, + Account const& account, + Account const& authorize) { + BEAST_EXPECT( + !jle[jss::result].isMember(jss::error) && + jle[jss::result].isMember(jss::node)); + BEAST_EXPECT( + jle[jss::result][jss::node]["LedgerEntryType"] == + jss::Delegate); + BEAST_EXPECT( + jle[jss::result][jss::node][jss::Account] == + account.human()); + BEAST_EXPECT( + jle[jss::result][jss::node][sfAuthorize.jsonName] == + authorize.human()); + + auto const& jPermissions = + jle[jss::result][jss::node][sfPermissions.jsonName]; + unsigned i = 0; + for (auto const& permission : permissions) + { + BEAST_EXPECT( + jPermissions[i][sfPermission.jsonName] + [sfPermissionValue.jsonName] == permission); + i++; + } + }; + + // get ledger entry with valid parameter + comparePermissions( + delegate::entry(env, gw, alice), permissions, gw, alice); + + // gw updates permission + auto const newPermissions = std::vector{ + "Payment", "AMMCreate", "AMMDeposit", "AMMWithdraw"}; + env(delegate::set(gw, alice, newPermissions)); + env.close(); + + // get ledger entry again, permissions should be updated to + // newPermissions + comparePermissions( + delegate::entry(env, gw, alice), newPermissions, gw, alice); + + // gw deletes all permissions delegated to alice, this will delete + // the + // ledger entry + env(delegate::set(gw, alice, {})); + env.close(); + auto const jle = delegate::entry(env, gw, alice); + BEAST_EXPECT(jle[jss::result][jss::error] == "entryNotFound"); + + // alice can delegate permissions to gw as well + env(delegate::set(alice, gw, permissions)); + env.close(); + comparePermissions( + delegate::entry(env, alice, gw), permissions, alice, gw); + auto const response = delegate::entry(env, gw, alice); + // alice has not been granted any permissions by gw + BEAST_EXPECT(response[jss::result][jss::error] == "entryNotFound"); + } + + void + testInvalidRequest() + { + testcase("test invalid DelegateSet"); + using namespace jtx; + + Env env(*this); + Account gw{"gateway"}; + Account alice{"alice"}; + Account bob{"bob"}; + env.fund(XRP(100000), gw, alice, bob); + env.close(); + + // when permissions size exceeds the limit 10, should return + // temARRAY_TOO_LARGE + { + env(delegate::set( + gw, + alice, + {"Payment", + "EscrowCreate", + "EscrowFinish", + "EscrowCancel", + "CheckCreate", + "CheckCash", + "CheckCancel", + "DepositPreauth", + "TrustSet", + "NFTokenMint", + "NFTokenBurn"}), + ter(temARRAY_TOO_LARGE)); + } + + // alice can not authorize herself + { + env(delegate::set(alice, alice, {"Payment"}), ter(temMALFORMED)); + } + + // bad fee + { + Json::Value jv; + jv[jss::TransactionType] = jss::DelegateSet; + jv[jss::Account] = gw.human(); + jv[sfAuthorize.jsonName] = alice.human(); + Json::Value permissionsJson(Json::arrayValue); + Json::Value permissionValue; + permissionValue[sfPermissionValue.jsonName] = "Payment"; + Json::Value permissionObj; + permissionObj[sfPermission.jsonName] = permissionValue; + permissionsJson.append(permissionObj); + jv[sfPermissions.jsonName] = permissionsJson; + jv[sfFee.jsonName] = -1; + env(jv, ter(temBAD_FEE)); + } + + // when provided permissions contains duplicate values, should return + // temMALFORMED + { + env(delegate::set( + gw, + alice, + {"Payment", + "EscrowCreate", + "EscrowFinish", + "TrustlineAuthorize", + "CheckCreate", + "TrustlineAuthorize"}), + ter(temMALFORMED)); + } + + // when authorizing account which does not exist, should return + // terNO_ACCOUNT + { + env(delegate::set(gw, Account("unknown"), {"Payment"}), + ter(terNO_ACCOUNT)); + } + + // for security reasons, AccountSet, SetRegularKey, SignerListSet, + // AccountDelete, DelegateSet are prohibited to be delegated to + // other accounts. + { + env(delegate::set(gw, alice, {"SetRegularKey"}), + ter(tecNO_PERMISSION)); + env(delegate::set(gw, alice, {"AccountSet"}), + ter(tecNO_PERMISSION)); + env(delegate::set(gw, alice, {"SignerListSet"}), + ter(tecNO_PERMISSION)); + env(delegate::set(gw, alice, {"DelegateSet"}), + ter(tecNO_PERMISSION)); + env(delegate::set(gw, alice, {"SetRegularKey"}), + ter(tecNO_PERMISSION)); + } + } + + void + testReserve() + { + testcase("test reserve"); + using namespace jtx; + + // test reserve for DelegateSet + { + Env env(*this); + Account alice{"alice"}; + Account bob{"bob"}; + Account carol{"carol"}; + + env.fund(drops(env.current()->fees().accountReserve(0)), alice); + env.fund( + drops(env.current()->fees().accountReserve(1)), bob, carol); + env.close(); + + // alice does not have enough reserve to create Delegate + env(delegate::set(alice, bob, {"Payment"}), + ter(tecINSUFFICIENT_RESERVE)); + + // bob has enough reserve + env(delegate::set(bob, alice, {"Payment"})); + env.close(); + + // now bob create another Delegate, he does not have + // enough reserve + env(delegate::set(bob, carol, {"Payment"}), + ter(tecINSUFFICIENT_RESERVE)); + } + + // test reserve when sending transaction on behalf of other account + { + Env env(*this); + Account alice{"alice"}; + Account bob{"bob"}; + + env.fund(drops(env.current()->fees().accountReserve(1)), alice); + env.fund(drops(env.current()->fees().accountReserve(2)), bob); + env.close(); + + // alice gives bob permission + env(delegate::set(alice, bob, {"DIDSet", "DIDDelete"})); + env.close(); + + // bob set DID on behalf of alice, but alice does not have enough + // reserve + env(did::set(alice), + did::uri("uri"), + delegate::as(bob), + ter(tecINSUFFICIENT_RESERVE)); + + // bob can set DID for himself because he has enough reserve + env(did::set(bob), did::uri("uri")); + env.close(); + } + } + + void + testFee() + { + testcase("test fee"); + using namespace jtx; + + Env env(*this); + Account alice{"alice"}; + Account bob{"bob"}; + Account carol{"carol"}; + env.fund(XRP(10000), alice, carol); + env.fund(XRP(1000), bob); + env.close(); + + { + // Fee should be checked before permission check, + // otherwise tecNO_PERMISSION returned when permission check fails + // could cause context reset to pay fee because it is tec error + auto aliceBalance = env.balance(alice); + auto bobBalance = env.balance(bob); + auto carolBalance = env.balance(carol); + + env(pay(alice, carol, XRP(100)), + fee(XRP(2000)), + delegate::as(bob), + ter(terINSUF_FEE_B)); + env.close(); + BEAST_EXPECT(env.balance(alice) == aliceBalance); + BEAST_EXPECT(env.balance(bob) == bobBalance); + BEAST_EXPECT(env.balance(carol) == carolBalance); + } + + env(delegate::set(alice, bob, {"Payment"})); + env.close(); + + { + // Delegate pays the fee + auto aliceBalance = env.balance(alice); + auto bobBalance = env.balance(bob); + auto carolBalance = env.balance(carol); + + auto const sendAmt = XRP(100); + auto const feeAmt = XRP(10); + env(pay(alice, carol, sendAmt), fee(feeAmt), delegate::as(bob)); + env.close(); + BEAST_EXPECT(env.balance(alice) == aliceBalance - sendAmt); + BEAST_EXPECT(env.balance(bob) == bobBalance - feeAmt); + BEAST_EXPECT(env.balance(carol) == carolBalance + sendAmt); + } + + { + // insufficient balance to pay fee + auto aliceBalance = env.balance(alice); + auto bobBalance = env.balance(bob); + auto carolBalance = env.balance(carol); + + env(pay(alice, carol, XRP(100)), + fee(XRP(2000)), + delegate::as(bob), + ter(terINSUF_FEE_B)); + env.close(); + BEAST_EXPECT(env.balance(alice) == aliceBalance); + BEAST_EXPECT(env.balance(bob) == bobBalance); + BEAST_EXPECT(env.balance(carol) == carolBalance); + } + + { + // fee is paid by Delegate + // on context reset (tec error) + auto aliceBalance = env.balance(alice); + auto bobBalance = env.balance(bob); + auto carolBalance = env.balance(carol); + auto const feeAmt = XRP(10); + + env(pay(alice, carol, XRP(20000)), + fee(feeAmt), + delegate::as(bob), + ter(tecUNFUNDED_PAYMENT)); + env.close(); + BEAST_EXPECT(env.balance(alice) == aliceBalance); + BEAST_EXPECT(env.balance(bob) == bobBalance - feeAmt); + BEAST_EXPECT(env.balance(carol) == carolBalance); + } + } + + void + testSequence() + { + testcase("test sequence"); + using namespace jtx; + + Env env(*this); + Account alice{"alice"}; + Account bob{"bob"}; + Account carol{"carol"}; + env.fund(XRP(10000), alice, bob, carol); + env.close(); + + auto aliceSeq = env.seq(alice); + auto bobSeq = env.seq(bob); + env(delegate::set(alice, bob, {"Payment"})); + env(delegate::set(bob, alice, {"Payment"})); + env.close(); + BEAST_EXPECT(env.seq(alice) == aliceSeq + 1); + BEAST_EXPECT(env.seq(bob) == bobSeq + 1); + aliceSeq = env.seq(alice); + bobSeq = env.seq(bob); + + for (auto i = 0; i < 20; ++i) + { + // bob is the delegated account, his sequence won't increment + env(pay(alice, carol, XRP(10)), fee(XRP(10)), delegate::as(bob)); + env.close(); + BEAST_EXPECT(env.seq(alice) == aliceSeq + 1); + BEAST_EXPECT(env.seq(bob) == bobSeq); + aliceSeq = env.seq(alice); + + // bob sends payment for himself, his sequence will increment + env(pay(bob, carol, XRP(10)), fee(XRP(10))); + BEAST_EXPECT(env.seq(alice) == aliceSeq); + BEAST_EXPECT(env.seq(bob) == bobSeq + 1); + bobSeq = env.seq(bob); + + // alice is the delegated account, her sequence won't increment + env(pay(bob, carol, XRP(10)), fee(XRP(10)), delegate::as(alice)); + env.close(); + BEAST_EXPECT(env.seq(alice) == aliceSeq); + BEAST_EXPECT(env.seq(bob) == bobSeq + 1); + bobSeq = env.seq(bob); + + // alice sends payment for herself, her sequence will increment + env(pay(alice, carol, XRP(10)), fee(XRP(10))); + BEAST_EXPECT(env.seq(alice) == aliceSeq + 1); + BEAST_EXPECT(env.seq(bob) == bobSeq); + aliceSeq = env.seq(alice); + } + } + + void + testAccountDelete() + { + testcase("test deleting account"); + using namespace jtx; + + Env env(*this); + Account alice{"alice"}; + Account bob{"bob"}; + env.fund(XRP(100000), alice, bob); + env.close(); + + env(delegate::set(alice, bob, {"Payment"})); + env.close(); + BEAST_EXPECT( + env.closed()->exists(keylet::delegate(alice.id(), bob.id()))); + + for (std::uint32_t i = 0; i < 256; ++i) + env.close(); + + auto const aliceBalance = env.balance(alice); + auto const bobBalance = env.balance(bob); + + // alice deletes account, this will remove the Delegate object + auto const deleteFee = drops(env.current()->fees().increment); + env(acctdelete(alice, bob), fee(deleteFee)); + env.close(); + + BEAST_EXPECT(!env.closed()->exists(keylet::account(alice.id()))); + BEAST_EXPECT(!env.closed()->exists(keylet::ownerDir(alice.id()))); + BEAST_EXPECT(env.balance(bob) == bobBalance + aliceBalance - deleteFee); + + BEAST_EXPECT( + !env.closed()->exists(keylet::delegate(alice.id(), bob.id()))); + } + + void + testDelegateTransaction() + { + testcase("test delegate transaction"); + using namespace jtx; + + Env env(*this); + Account alice{"alice"}; + Account bob{"bob"}; + Account carol{"carol"}; + + XRPAmount const baseFee{env.current()->fees().base}; + + // use different initial amount to distinguish the source balance + env.fund(XRP(10000), alice); + env.fund(XRP(20000), bob); + env.fund(XRP(30000), carol); + env.close(); + + auto aliceBalance = env.balance(alice, XRP); + auto bobBalance = env.balance(bob, XRP); + auto carolBalance = env.balance(carol, XRP); + + // can not send transaction on one's own behalf + env(pay(alice, bob, XRP(50)), delegate::as(alice), ter(temBAD_SIGNER)); + env.require(balance(alice, aliceBalance)); + + env(delegate::set(alice, bob, {"Payment"})); + env.close(); + env.require(balance(alice, aliceBalance - drops(baseFee))); + aliceBalance = env.balance(alice, XRP); + + // bob pays 50 XRP to carol on behalf of alice + env(pay(alice, carol, XRP(50)), delegate::as(bob)); + env.close(); + env.require(balance(alice, aliceBalance - XRP(50))); + env.require(balance(carol, carolBalance + XRP(50))); + // bob pays the fee + env.require(balance(bob, bobBalance - drops(baseFee))); + aliceBalance = env.balance(alice, XRP); + bobBalance = env.balance(bob, XRP); + carolBalance = env.balance(carol, XRP); + + // bob pays 50 XRP to bob self on behalf of alice + env(pay(alice, bob, XRP(50)), delegate::as(bob)); + env.close(); + env.require(balance(alice, aliceBalance - XRP(50))); + env.require(balance(bob, bobBalance + XRP(50) - drops(baseFee))); + aliceBalance = env.balance(alice, XRP); + bobBalance = env.balance(bob, XRP); + + // bob pay 50 XRP to alice herself on behalf of alice + env(pay(alice, alice, XRP(50)), delegate::as(bob), ter(temREDUNDANT)); + env.close(); + + // bob does not have permission to create check + env(check::create(alice, bob, XRP(10)), + delegate::as(bob), + ter(tecNO_PERMISSION)); + + // carol does not have permission to create check + env(check::create(alice, bob, XRP(10)), + delegate::as(carol), + ter(tecNO_PERMISSION)); + } + + void + testPaymentGranular() + { + testcase("test payment granular"); + using namespace jtx; + + // test PaymentMint and PaymentBurn + { + Env env(*this); + Account alice{"alice"}; + Account bob{"bob"}; + Account gw{"gateway"}; + Account gw2{"gateway2"}; + auto const USD = gw["USD"]; + auto const EUR = gw2["EUR"]; + + env.fund(XRP(10000), alice); + env.fund(XRP(20000), bob); + env.fund(XRP(40000), gw, gw2); + env.trust(USD(200), alice); + env.trust(EUR(400), gw); + env.close(); + + XRPAmount const baseFee{env.current()->fees().base}; + auto aliceBalance = env.balance(alice, XRP); + auto bobBalance = env.balance(bob, XRP); + auto gwBalance = env.balance(gw, XRP); + auto gw2Balance = env.balance(gw2, XRP); + + // delegate ledger object is not created yet + env(pay(gw, alice, USD(50)), + delegate::as(bob), + ter(tecNO_PERMISSION)); + env.require(balance(bob, bobBalance - drops(baseFee))); + bobBalance = env.balance(bob, XRP); + + // gw gives bob burn permission + env(delegate::set(gw, bob, {"PaymentBurn"})); + env.close(); + env.require(balance(gw, gwBalance - drops(baseFee))); + gwBalance = env.balance(gw, XRP); + + // bob sends a payment transaction on behalf of gw + env(pay(gw, alice, USD(50)), + delegate::as(bob), + ter(tecNO_PERMISSION)); + env.close(); + env.require(balance(bob, bobBalance - drops(baseFee))); + bobBalance = env.balance(bob, XRP); + + // gw gives bob mint permission, alice gives bob burn permission + env(delegate::set(gw, bob, {"PaymentMint"})); + env(delegate::set(alice, bob, {"PaymentBurn"})); + env.close(); + env.require(balance(alice, aliceBalance - drops(baseFee))); + env.require(balance(gw, gwBalance - drops(baseFee))); + aliceBalance = env.balance(alice, XRP); + gwBalance = env.balance(gw, XRP); + + // can not send XRP + env(pay(gw, alice, XRP(50)), + delegate::as(bob), + ter(tecNO_PERMISSION)); + env.close(); + env.require(balance(bob, bobBalance - drops(baseFee))); + bobBalance = env.balance(bob, XRP); + + // mint 50 USD + env(pay(gw, alice, USD(50)), delegate::as(bob)); + env.close(); + env.require(balance(bob, bobBalance - drops(baseFee))); + env.require(balance(gw, gwBalance)); + env.require(balance(gw, alice["USD"](-50))); + env.require(balance(alice, USD(50))); + BEAST_EXPECT(env.balance(bob, USD) == USD(0)); + bobBalance = env.balance(bob, XRP); + + // burn 30 USD + env(pay(alice, gw, USD(30)), delegate::as(bob)); + env.close(); + env.require(balance(bob, bobBalance - drops(baseFee))); + env.require(balance(gw, gwBalance)); + env.require(balance(gw, alice["USD"](-20))); + env.require(balance(alice, USD(20))); + BEAST_EXPECT(env.balance(bob, USD) == USD(0)); + bobBalance = env.balance(bob, XRP); + + // bob has both mint and burn permissions + env(delegate::set(gw, bob, {"PaymentMint", "PaymentBurn"})); + env.close(); + env.require(balance(gw, gwBalance - drops(baseFee))); + gwBalance = env.balance(gw, XRP); + + // mint 100 USD for gw + env(pay(gw, alice, USD(100)), delegate::as(bob)); + env.close(); + env.require(balance(gw, alice["USD"](-120))); + env.require(balance(alice, USD(120))); + env.require(balance(bob, bobBalance - drops(baseFee))); + bobBalance = env.balance(bob, XRP); + + // gw2 pays gw 200 EUR + env(pay(gw2, gw, EUR(200))); + env.close(); + env.require(balance(gw2, gw2Balance - drops(baseFee))); + gw2Balance = env.balance(gw2, XRP); + env.require(balance(gw2, gw["EUR"](-200))); + env.require(balance(gw, EUR(200))); + + // burn 100 EUR for gw + env(pay(gw, gw2, EUR(100)), delegate::as(bob)); + env.close(); + env.require(balance(gw2, gw["EUR"](-100))); + env.require(balance(gw, EUR(100))); + env.require(balance(bob, bobBalance - drops(baseFee))); + env.require(balance(gw, gwBalance)); + env.require(balance(gw2, gw2Balance)); + env.require(balance(alice, aliceBalance)); + } + + // test PaymentMint won't affect Payment transaction level delegation. + { + Env env(*this); + Account alice{"alice"}; + Account bob{"bob"}; + Account gw{"gateway"}; + auto const USD = gw["USD"]; + + env.fund(XRP(10000), alice); + env.fund(XRP(20000), bob); + env.fund(XRP(40000), gw); + env.trust(USD(200), alice); + env.close(); + + XRPAmount const baseFee{env.current()->fees().base}; + + auto aliceBalance = env.balance(alice, XRP); + auto bobBalance = env.balance(bob, XRP); + auto gwBalance = env.balance(gw, XRP); + + // gw gives bob PaymentBurn permission + env(delegate::set(gw, bob, {"PaymentBurn"})); + env.close(); + env.require(balance(gw, gwBalance - drops(baseFee))); + gwBalance = env.balance(gw, XRP); + + // bob can not mint on behalf of gw because he only has burn + // permission + env(pay(gw, alice, USD(50)), + delegate::as(bob), + ter(tecNO_PERMISSION)); + env.close(); + env.require(balance(bob, bobBalance - drops(baseFee))); + bobBalance = env.balance(bob, XRP); + + // gw gives bob Payment permission as well + env(delegate::set(gw, bob, {"PaymentBurn", "Payment"})); + env.close(); + env.require(balance(gw, gwBalance - drops(baseFee))); + gwBalance = env.balance(gw, XRP); + + // bob now can mint on behalf of gw + env(pay(gw, alice, USD(50)), delegate::as(bob)); + env.close(); + env.require(balance(bob, bobBalance - drops(baseFee))); + env.require(balance(gw, gwBalance)); + env.require(balance(alice, aliceBalance)); + env.require(balance(gw, alice["USD"](-50))); + env.require(balance(alice, USD(50))); + BEAST_EXPECT(env.balance(bob, USD) == USD(0)); + } + } + + void + testTrustSetGranular() + { + testcase("test TrustSet granular permissions"); + using namespace jtx; + + // test TrustlineUnfreeze, TrustlineFreeze and TrustlineAuthorize + { + Env env(*this); + Account gw{"gw"}; + Account alice{"alice"}; + Account bob{"bob"}; + env.fund(XRP(10000), gw, alice, bob); + env(fset(gw, asfRequireAuth)); + env.close(); + + env(delegate::set(alice, bob, {"TrustlineUnfreeze"})); + env.close(); + // bob can not create trustline on behalf of alice because he only + // has unfreeze permission + env(trust(alice, gw["USD"](50)), + delegate::as(bob), + ter(tecNO_PERMISSION)); + env.close(); + + // alice creates trustline by herself + env(trust(alice, gw["USD"](50))); + env.close(); + + // gw gives bob unfreeze permission + env(delegate::set(gw, bob, {"TrustlineUnfreeze"})); + env.close(); + + // unsupported flags + env(trust(alice, gw["USD"](50), tfSetNoRipple), + delegate::as(bob), + ter(tecNO_PERMISSION)); + env(trust(alice, gw["USD"](50), tfClearNoRipple), + delegate::as(bob), + ter(tecNO_PERMISSION)); + env(trust(gw, gw["USD"](0), alice, tfSetDeepFreeze), + delegate::as(bob), + ter(tecNO_PERMISSION)); + env(trust(gw, gw["USD"](0), alice, tfClearDeepFreeze), + delegate::as(bob), + ter(tecNO_PERMISSION)); + env.close(); + + // supported flags with wrong permission + env(trust(gw, gw["USD"](0), alice, tfSetfAuth), + delegate::as(bob), + ter(tecNO_PERMISSION)); + env(trust(gw, gw["USD"](0), alice, tfSetFreeze), + delegate::as(bob), + ter(tecNO_PERMISSION)); + env.close(); + + env(delegate::set(gw, bob, {"TrustlineAuthorize"})); + env.close(); + env(trust(gw, gw["USD"](0), alice, tfClearFreeze), + delegate::as(bob), + ter(tecNO_PERMISSION)); + env.close(); + // although trustline authorize is granted, bob can not change the + // limit number + env(trust(gw, gw["USD"](50), alice, tfSetfAuth), + delegate::as(bob), + ter(tecNO_PERMISSION)); + env.close(); + + // supported flags with correct permission + env(trust(gw, gw["USD"](0), alice, tfSetfAuth), delegate::as(bob)); + env.close(); + env(delegate::set( + gw, bob, {"TrustlineAuthorize", "TrustlineFreeze"})); + env.close(); + env(trust(gw, gw["USD"](0), alice, tfSetFreeze), delegate::as(bob)); + env.close(); + env(delegate::set( + gw, bob, {"TrustlineAuthorize", "TrustlineUnfreeze"})); + env.close(); + env(trust(gw, gw["USD"](0), alice, tfClearFreeze), + delegate::as(bob)); + env.close(); + // but bob can not freeze trustline because he no longer has freeze + // permission + env(trust(gw, gw["USD"](0), alice, tfSetFreeze), + delegate::as(bob), + ter(tecNO_PERMISSION)); + + // cannot update LimitAmount with granular permission, both high and + // low account + env(trust(alice, gw["USD"](100)), + delegate::as(bob), + ter(tecNO_PERMISSION)); + env(trust(gw, alice["USD"](100)), + delegate::as(bob), + ter(tecNO_PERMISSION)); + + // can not set QualityIn or QualityOut + auto tx = trust(alice, gw["USD"](50)); + tx["QualityIn"] = "1000"; + env(tx, delegate::as(bob), ter(tecNO_PERMISSION)); + auto tx2 = trust(alice, gw["USD"](50)); + tx2["QualityOut"] = "1000"; + env(tx2, delegate::as(bob), ter(tecNO_PERMISSION)); + auto tx3 = trust(gw, alice["USD"](50)); + tx3["QualityIn"] = "1000"; + env(tx3, delegate::as(bob), ter(tecNO_PERMISSION)); + auto tx4 = trust(gw, alice["USD"](50)); + tx4["QualityOut"] = "1000"; + env(tx4, delegate::as(bob), ter(tecNO_PERMISSION)); + + // granting TrustSet can make it work + env(delegate::set(gw, bob, {"TrustSet"})); + env.close(); + auto tx5 = trust(gw, alice["USD"](50)); + tx5["QualityOut"] = "1000"; + env(tx5, delegate::as(bob)); + auto tx6 = trust(alice, gw["USD"](50)); + tx6["QualityOut"] = "1000"; + env(tx6, delegate::as(bob), ter(tecNO_PERMISSION)); + env(delegate::set(alice, bob, {"TrustSet"})); + env.close(); + env(tx6, delegate::as(bob)); + } + + // test mix of transaction level delegation and granular delegation + { + Env env(*this); + Account gw{"gw"}; + Account alice{"alice"}; + Account bob{"bob"}; + env.fund(XRP(10000), gw, alice, bob); + env(fset(gw, asfRequireAuth)); + env.close(); + + // bob does not have permission + env(trust(alice, gw["USD"](50)), + delegate::as(bob), + ter(tecNO_PERMISSION)); + env(delegate::set( + alice, bob, {"TrustlineUnfreeze", "NFTokenCreateOffer"})); + env.close(); + // bob still does not have permission + env(trust(alice, gw["USD"](50)), + delegate::as(bob), + ter(tecNO_PERMISSION)); + + // add TrustSet permission and some unrelated permission + env(delegate::set( + alice, + bob, + {"TrustlineUnfreeze", + "NFTokenCreateOffer", + "TrustSet", + "AccountTransferRateSet"})); + env.close(); + env(trust(alice, gw["USD"](50)), delegate::as(bob)); + env.close(); + + env(delegate::set( + gw, + bob, + {"TrustlineUnfreeze", + "NFTokenCreateOffer", + "TrustSet", + "AccountTransferRateSet"})); + env.close(); + + // since bob has TrustSet permission, he does not need + // TrustlineFreeze granular permission to freeze the trustline + env(trust(gw, gw["USD"](0), alice, tfSetFreeze), delegate::as(bob)); + env(trust(gw, gw["USD"](0), alice, tfClearFreeze), + delegate::as(bob)); + // bob can perform all the operations regarding TrustSet + env(trust(gw, gw["USD"](0), alice, tfSetFreeze), delegate::as(bob)); + env(trust(gw, gw["USD"](0), alice, tfSetDeepFreeze), + delegate::as(bob)); + env(trust(gw, gw["USD"](0), alice, tfClearDeepFreeze), + delegate::as(bob)); + env(trust(gw, gw["USD"](0), alice, tfSetfAuth), delegate::as(bob)); + env(trust(alice, gw["USD"](50), tfSetNoRipple), delegate::as(bob)); + env(trust(alice, gw["USD"](50), tfClearNoRipple), + delegate::as(bob)); + } + } + + void + testAccountSetGranular() + { + testcase("test AccountSet granular permissions"); + using namespace jtx; + + // test AccountDomainSet, AccountEmailHashSet, + // AccountMessageKeySet,AccountTransferRateSet, and AccountTickSizeSet + // granular permissions + { + Env env(*this); + auto const alice = Account{"alice"}; + auto const bob = Account{"bob"}; + env.fund(XRP(10000), alice, bob); + env.close(); + + // alice gives bob some random permission, which is not related to + // the AccountSet transaction + env(delegate::set(alice, bob, {"TrustlineUnfreeze"})); + env.close(); + + // bob does not have permission to set domain + // on behalf of alice + std::string const domain = "example.com"; + auto jt = noop(alice); + jt[sfDomain.fieldName] = strHex(domain); + jt[sfDelegate.fieldName] = bob.human(); + jt[sfFlags.fieldName] = tfFullyCanonicalSig; + + // add granular permission related to AccountSet but is not the + // correct permission for domain set + env(delegate::set( + alice, bob, {"TrustlineUnfreeze", "AccountEmailHashSet"})); + env.close(); + env(jt, ter(tecNO_PERMISSION)); + + // alice give granular permission of AccountDomainSet to bob + env(delegate::set(alice, bob, {"AccountDomainSet"})); + env.close(); + + // bob set account domain on behalf of alice + env(jt); + BEAST_EXPECT((*env.le(alice))[sfDomain] == makeSlice(domain)); + + // bob can reset domain + jt[sfDomain.fieldName] = ""; + env(jt); + BEAST_EXPECT(!env.le(alice)->isFieldPresent(sfDomain)); + + // if flag is not equal to tfFullyCanonicalSig, which means bob + // is trying to set the flag at the same time, it will fail + std::string const failDomain = "fail_domain_update"; + jt[sfFlags.fieldName] = tfRequireAuth; + jt[sfDomain.fieldName] = strHex(failDomain); + env(jt, ter(tecNO_PERMISSION)); + // reset flag number + jt[sfFlags.fieldName] = tfFullyCanonicalSig; + + // bob tries to update domain and set email hash, + // but he does not have permission to set email hash + jt[sfDomain.fieldName] = strHex(domain); + std::string const mh("5F31A79367DC3137FADA860C05742EE6"); + jt[sfEmailHash.fieldName] = mh; + env(jt, ter(tecNO_PERMISSION)); + + // alice give granular permission of AccountEmailHashSet to bob + env(delegate::set( + alice, bob, {"AccountDomainSet", "AccountEmailHashSet"})); + env.close(); + env(jt); + BEAST_EXPECT(to_string((*env.le(alice))[sfEmailHash]) == mh); + BEAST_EXPECT((*env.le(alice))[sfDomain] == makeSlice(domain)); + + // bob does not have permission to set message key for alice + auto const rkp = randomKeyPair(KeyType::ed25519); + jt[sfMessageKey.fieldName] = strHex(rkp.first.slice()); + env(jt, ter(tecNO_PERMISSION)); + + // alice give granular permission of AccountMessageKeySet to bob + env(delegate::set( + alice, + bob, + {"AccountDomainSet", + "AccountEmailHashSet", + "AccountMessageKeySet"})); + env.close(); + + // bob can set message key for alice + env(jt); + BEAST_EXPECT( + strHex((*env.le(alice))[sfMessageKey]) == + strHex(rkp.first.slice())); + jt[sfMessageKey.fieldName] = ""; + env(jt); + BEAST_EXPECT(!env.le(alice)->isFieldPresent(sfMessageKey)); + + // bob does not have permission to set transfer rate for alice + env(rate(alice, 2.0), delegate::as(bob), ter(tecNO_PERMISSION)); + + // alice give granular permission of AccountTransferRateSet to bob + env(delegate::set( + alice, + bob, + {"AccountDomainSet", + "AccountEmailHashSet", + "AccountMessageKeySet", + "AccountTransferRateSet"})); + env.close(); + auto jtRate = rate(alice, 2.0); + jtRate[sfDelegate.fieldName] = bob.human(); + jtRate[sfFlags.fieldName] = tfFullyCanonicalSig; + env(jtRate, delegate::as(bob)); + BEAST_EXPECT((*env.le(alice))[sfTransferRate] == 2000000000); + + // bob does not have permission to set ticksize for alice + jt[sfTickSize.fieldName] = 8; + env(jt, ter(tecNO_PERMISSION)); + + // alice give granular permission of AccountTickSizeSet to bob + env(delegate::set( + alice, + bob, + {"AccountDomainSet", + "AccountEmailHashSet", + "AccountMessageKeySet", + "AccountTransferRateSet", + "AccountTickSizeSet"})); + env.close(); + env(jt); + BEAST_EXPECT((*env.le(alice))[sfTickSize] == 8); + + // can not set asfRequireAuth flag for alice + env(fset(alice, asfRequireAuth), + delegate::as(bob), + ter(tecNO_PERMISSION)); + + // reset Delegate will delete the Delegate + // object + env(delegate::set(alice, bob, {})); + // bib still does not have permission to set asfRequireAuth for + // alice + env(fset(alice, asfRequireAuth), + delegate::as(bob), + ter(tecNO_PERMISSION)); + // alice can set for herself + env(fset(alice, asfRequireAuth)); + env.require(flags(alice, asfRequireAuth)); + env.close(); + + // can not update tick size because bob no longer has permission + jt[sfTickSize.fieldName] = 7; + env(jt, ter(tecNO_PERMISSION)); + + env(delegate::set( + alice, + bob, + {"AccountDomainSet", + "AccountEmailHashSet", + "AccountMessageKeySet"})); + env.close(); + + // bob does not have permission to set wallet locater for alice + std::string const locator = + "9633EC8AF54F16B5286DB1D7B519EF49EEFC050C0C8AC4384F1D88ACD1BFDF" + "05"; + auto jt2 = noop(alice); + jt2[sfDomain.fieldName] = strHex(domain); + jt2[sfDelegate.fieldName] = bob.human(); + jt2[sfWalletLocator.fieldName] = locator; + jt2[sfFlags.fieldName] = tfFullyCanonicalSig; + env(jt2, ter(tecNO_PERMISSION)); + } + + // can not set AccountSet flags on behalf of other account + { + Env env(*this); + auto const alice = Account{"alice"}; + auto const bob = Account{"bob"}; + env.fund(XRP(10000), alice, bob); + env.close(); + + auto testSetClearFlag = [&](std::uint32_t flag) { + // bob can not set flag on behalf of alice + env(fset(alice, flag), + delegate::as(bob), + ter(tecNO_PERMISSION)); + // alice set by herself + env(fset(alice, flag)); + env.close(); + env.require(flags(alice, flag)); + // bob can not clear on behalf of alice + env(fclear(alice, flag), + delegate::as(bob), + ter(tecNO_PERMISSION)); + }; + + // testSetClearFlag(asfNoFreeze); + testSetClearFlag(asfRequireAuth); + testSetClearFlag(asfAllowTrustLineClawback); + + // alice gives some granular permissions to bob + env(delegate::set( + alice, + bob, + {"AccountDomainSet", + "AccountEmailHashSet", + "AccountMessageKeySet"})); + env.close(); + + testSetClearFlag(asfDefaultRipple); + testSetClearFlag(asfDepositAuth); + testSetClearFlag(asfDisallowIncomingCheck); + testSetClearFlag(asfDisallowIncomingNFTokenOffer); + testSetClearFlag(asfDisallowIncomingPayChan); + testSetClearFlag(asfDisallowIncomingTrustline); + testSetClearFlag(asfDisallowXRP); + testSetClearFlag(asfRequireDest); + testSetClearFlag(asfGlobalFreeze); + + // bob can not set asfAccountTxnID on behalf of alice + env(fset(alice, asfAccountTxnID), + delegate::as(bob), + ter(tecNO_PERMISSION)); + env(fset(alice, asfAccountTxnID)); + env.close(); + BEAST_EXPECT(env.le(alice)->isFieldPresent(sfAccountTxnID)); + env(fclear(alice, asfAccountTxnID), + delegate::as(bob), + ter(tecNO_PERMISSION)); + + // bob can not set asfAuthorizedNFTokenMinter on behalf of alice + Json::Value jt = fset(alice, asfAuthorizedNFTokenMinter); + jt[sfDelegate.fieldName] = bob.human(); + jt[sfNFTokenMinter.fieldName] = bob.human(); + env(jt, ter(tecNO_PERMISSION)); + + // bob gives alice some permissions + env(delegate::set( + bob, + alice, + {"AccountDomainSet", + "AccountEmailHashSet", + "AccountMessageKeySet"})); + env.close(); + + // since we can not set asfNoFreeze if asfAllowTrustLineClawback is + // set, which can not be clear either. Test alice set asfNoFreeze on + // behalf of bob. + env(fset(alice, asfNoFreeze), + delegate::as(bob), + ter(tecNO_PERMISSION)); + env(fset(bob, asfNoFreeze)); + env.close(); + env.require(flags(bob, asfNoFreeze)); + // alice can not clear on behalf of bob + env(fclear(alice, asfNoFreeze), + delegate::as(bob), + ter(tecNO_PERMISSION)); + + // bob can not set asfDisableMaster on behalf of alice + Account const bobKey{"bobKey", KeyType::secp256k1}; + env(regkey(bob, bobKey)); + env.close(); + env(fset(alice, asfDisableMaster), + delegate::as(bob), + sig(bob), + ter(tecNO_PERMISSION)); + } + } + + void + testMPTokenIssuanceSetGranular() + { + testcase("test MPTokenIssuanceSet granular"); + using namespace jtx; + + // test MPTokenIssuanceUnlock and MPTokenIssuanceLock permissions + { + Env env(*this); + Account alice{"alice"}; + Account bob{"bob"}; + env.fund(XRP(100000), alice, bob); + env.close(); + + MPTTester mpt(env, alice, {.fund = false}); + env.close(); + mpt.create({.flags = tfMPTCanLock}); + env.close(); + + // delegate ledger object is not created yet + mpt.set( + {.account = alice, + .flags = tfMPTLock, + .delegate = bob, + .err = tecNO_PERMISSION}); + + // alice gives granular permission to bob of MPTokenIssuanceUnlock + env(delegate::set(alice, bob, {"MPTokenIssuanceUnlock"})); + env.close(); + // bob does not have lock permission + mpt.set( + {.account = alice, + .flags = tfMPTLock, + .delegate = bob, + .err = tecNO_PERMISSION}); + // bob now has lock permission, but does not have unlock permission + env(delegate::set(alice, bob, {"MPTokenIssuanceLock"})); + env.close(); + mpt.set({.account = alice, .flags = tfMPTLock, .delegate = bob}); + mpt.set( + {.account = alice, + .flags = tfMPTUnlock, + .delegate = bob, + .err = tecNO_PERMISSION}); + + // now bob can lock and unlock + env(delegate::set( + alice, bob, {"MPTokenIssuanceLock", "MPTokenIssuanceUnlock"})); + env.close(); + mpt.set({.account = alice, .flags = tfMPTUnlock, .delegate = bob}); + mpt.set({.account = alice, .flags = tfMPTLock, .delegate = bob}); + env.close(); + } + + // test mix of granular and transaction level permission + { + Env env(*this); + Account alice{"alice"}; + Account bob{"bob"}; + env.fund(XRP(100000), alice, bob); + env.close(); + + MPTTester mpt(env, alice, {.fund = false}); + env.close(); + mpt.create({.flags = tfMPTCanLock}); + env.close(); + + // alice gives granular permission to bob of MPTokenIssuanceLock + env(delegate::set(alice, bob, {"MPTokenIssuanceLock"})); + env.close(); + mpt.set({.account = alice, .flags = tfMPTLock, .delegate = bob}); + // bob does not have unlock permission + mpt.set( + {.account = alice, + .flags = tfMPTUnlock, + .delegate = bob, + .err = tecNO_PERMISSION}); + + // alice gives bob some unrelated permission with + // MPTokenIssuanceLock + env(delegate::set( + alice, + bob, + {"NFTokenMint", "MPTokenIssuanceLock", "NFTokenBurn"})); + env.close(); + // bob can not unlock + mpt.set( + {.account = alice, + .flags = tfMPTUnlock, + .delegate = bob, + .err = tecNO_PERMISSION}); + + // alice add MPTokenIssuanceSet to permissions + env(delegate::set( + alice, + bob, + {"NFTokenMint", + "MPTokenIssuanceLock", + "NFTokenBurn", + "MPTokenIssuanceSet"})); + mpt.set({.account = alice, .flags = tfMPTUnlock, .delegate = bob}); + // alice can lock by herself + mpt.set({.account = alice, .flags = tfMPTLock}); + mpt.set({.account = alice, .flags = tfMPTUnlock, .delegate = bob}); + mpt.set({.account = alice, .flags = tfMPTLock, .delegate = bob}); + } + } + + void + testSingleSign() + { + testcase("test single sign"); + using namespace jtx; + + Env env(*this); + Account alice{"alice"}; + Account bob{"bob"}; + Account carol{"carol"}; + env.fund(XRP(100000), alice, bob, carol); + env.close(); + + env(delegate::set(alice, bob, {"Payment"})); + env.close(); + + auto aliceBalance = env.balance(alice); + auto bobBalance = env.balance(bob); + auto carolBalance = env.balance(carol); + + env(pay(alice, carol, XRP(100)), + fee(XRP(10)), + delegate::as(bob), + sig(bob)); + env.close(); + BEAST_EXPECT(env.balance(alice) == aliceBalance - XRP(100)); + BEAST_EXPECT(env.balance(bob) == bobBalance - XRP(10)); + BEAST_EXPECT(env.balance(carol) == carolBalance + XRP(100)); + } + + void + testSingleSignBadSecret() + { + testcase("test single sign with bad secret"); + using namespace jtx; + + Env env(*this); + Account alice{"alice"}; + Account bob{"bob"}; + Account carol{"carol"}; + env.fund(XRP(100000), alice, bob, carol); + env.close(); + + env(delegate::set(alice, bob, {"Payment"})); + env.close(); + + auto aliceBalance = env.balance(alice); + auto bobBalance = env.balance(bob); + auto carolBalance = env.balance(carol); + + env(pay(alice, carol, XRP(100)), + fee(XRP(10)), + delegate::as(bob), + sig(alice), + ter(tefBAD_AUTH)); + env.close(); + BEAST_EXPECT(env.balance(alice) == aliceBalance); + BEAST_EXPECT(env.balance(bob) == bobBalance); + BEAST_EXPECT(env.balance(carol) == carolBalance); + } + + void + testMultiSign() + { + testcase("test multi sign"); + using namespace jtx; + + Env env(*this); + Account alice{"alice"}; + Account bob{"bob"}; + Account carol{"carol"}; + Account daria{"daria"}; + Account edward{"edward"}; + env.fund(XRP(100000), alice, bob, carol, daria, edward); + env.close(); + + env(signers(bob, 2, {{daria, 1}, {edward, 1}})); + env.close(); + + env(delegate::set(alice, bob, {"Payment"})); + env.close(); + + auto aliceBalance = env.balance(alice); + auto bobBalance = env.balance(bob); + auto carolBalance = env.balance(carol); + auto dariaBalance = env.balance(daria); + auto edwardBalance = env.balance(edward); + + env(pay(alice, carol, XRP(100)), + fee(XRP(10)), + delegate::as(bob), + msig(daria, edward)); + env.close(); + BEAST_EXPECT(env.balance(alice) == aliceBalance - XRP(100)); + BEAST_EXPECT(env.balance(bob) == bobBalance - XRP(10)); + BEAST_EXPECT(env.balance(carol) == carolBalance + XRP(100)); + BEAST_EXPECT(env.balance(daria) == dariaBalance); + BEAST_EXPECT(env.balance(edward) == edwardBalance); + } + + void + testMultiSignQuorumNotMet() + { + testcase("test multi sign which does not meet quorum"); + using namespace jtx; + + Env env(*this); + Account alice{"alice"}; + Account bob{"bob"}; + Account carol{"carol"}; + Account daria = Account{"daria"}; + Account edward = Account{"edward"}; + Account fred = Account{"fred"}; + env.fund(XRP(100000), alice, bob, carol, daria, edward, fred); + env.close(); + + env(signers(bob, 3, {{daria, 1}, {edward, 1}, {fred, 1}})); + env.close(); + + env(delegate::set(alice, bob, {"Payment"})); + env.close(); + + auto aliceBalance = env.balance(alice); + auto bobBalance = env.balance(bob); + auto carolBalance = env.balance(carol); + auto dariaBalance = env.balance(daria); + auto edwardBalance = env.balance(edward); + + env(pay(alice, carol, XRP(100)), + fee(XRP(10)), + delegate::as(bob), + msig(daria, edward), + ter(tefBAD_QUORUM)); + env.close(); + BEAST_EXPECT(env.balance(alice) == aliceBalance); + BEAST_EXPECT(env.balance(bob) == bobBalance); + BEAST_EXPECT(env.balance(carol) == carolBalance); + BEAST_EXPECT(env.balance(daria) == dariaBalance); + BEAST_EXPECT(env.balance(edward) == edwardBalance); + } + + void + run() override + { + testFeatureDisabled(); + testDelegateSet(); + testInvalidRequest(); + testReserve(); + testFee(); + testSequence(); + testAccountDelete(); + testDelegateTransaction(); + testPaymentGranular(); + testTrustSetGranular(); + testAccountSetGranular(); + testMPTokenIssuanceSetGranular(); + testSingleSign(); + testSingleSignBadSecret(); + testMultiSign(); + testMultiSignQuorumNotMet(); + } +}; +BEAST_DEFINE_TESTSUITE(Delegate, app, ripple); +} // namespace test +} // namespace ripple \ No newline at end of file diff --git a/src/test/jtx/delegate.h b/src/test/jtx/delegate.h new file mode 100644 index 0000000000..9e8850fbe2 --- /dev/null +++ b/src/test/jtx/delegate.h @@ -0,0 +1,62 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include + +namespace ripple { +namespace test { +namespace jtx { + +namespace delegate { + +Json::Value +set(jtx::Account const& account, + jtx::Account const& authorize, + std::vector const& permissions); + +Json::Value +entry( + jtx::Env& env, + jtx::Account const& account, + jtx::Account const& authorize); + +struct as +{ +private: + jtx::Account delegate_; + +public: + explicit as(jtx::Account const& account) : delegate_(account) + { + } + + void + operator()(jtx::Env&, jtx::JTx& jtx) const + { + jtx.jv[sfDelegate.jsonName] = delegate_.human(); + } +}; + +} // namespace delegate +} // namespace jtx +} // namespace test +} // namespace ripple \ No newline at end of file diff --git a/src/test/jtx/flags.h b/src/test/jtx/flags.h index b5e8207803..09e5dac52f 100644 --- a/src/test/jtx/flags.h +++ b/src/test/jtx/flags.h @@ -84,6 +84,18 @@ private: case asfAllowTrustLineClawback: mask_ |= lsfAllowTrustLineClawback; break; + case asfDisallowIncomingCheck: + mask_ |= lsfDisallowIncomingCheck; + break; + case asfDisallowIncomingNFTokenOffer: + mask_ |= lsfDisallowIncomingNFTokenOffer; + break; + case asfDisallowIncomingPayChan: + mask_ |= lsfDisallowIncomingPayChan; + break; + case asfDisallowIncomingTrustline: + mask_ |= lsfDisallowIncomingTrustline; + break; default: Throw("unknown flag"); } diff --git a/src/test/jtx/impl/Env.cpp b/src/test/jtx/impl/Env.cpp index e006f268cf..ecb2d62f43 100644 --- a/src/test/jtx/impl/Env.cpp +++ b/src/test/jtx/impl/Env.cpp @@ -465,7 +465,9 @@ Env::autofill_sig(JTx& jt) return jt.signer(*this, jt); if (!jt.fill_sig) return; - auto const account = lookup(jv[jss::Account].asString()); + auto const account = jv.isMember(sfDelegate.jsonName) + ? lookup(jv[sfDelegate.jsonName].asString()) + : lookup(jv[jss::Account].asString()); if (!app().checkSigs()) { jv[jss::SigningPubKey] = strHex(account.pk().slice()); diff --git a/src/test/jtx/impl/delegate.cpp b/src/test/jtx/impl/delegate.cpp new file mode 100644 index 0000000000..3ceedff190 --- /dev/null +++ b/src/test/jtx/impl/delegate.cpp @@ -0,0 +1,67 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include + +namespace ripple { +namespace test { +namespace jtx { + +namespace delegate { + +Json::Value +set(jtx::Account const& account, + jtx::Account const& authorize, + std::vector const& permissions) +{ + Json::Value jv; + jv[jss::TransactionType] = jss::DelegateSet; + jv[jss::Account] = account.human(); + jv[sfAuthorize.jsonName] = authorize.human(); + Json::Value permissionsJson(Json::arrayValue); + for (auto const& permission : permissions) + { + Json::Value permissionValue; + permissionValue[sfPermissionValue.jsonName] = permission; + Json::Value permissionObj; + permissionObj[sfPermission.jsonName] = permissionValue; + permissionsJson.append(permissionObj); + } + + jv[sfPermissions.jsonName] = permissionsJson; + + return jv; +} + +Json::Value +entry(jtx::Env& env, jtx::Account const& account, jtx::Account const& authorize) +{ + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::delegate][jss::account] = account.human(); + jvParams[jss::delegate][jss::authorize] = authorize.human(); + return env.rpc("json", "ledger_entry", to_string(jvParams)); +} + +} // namespace delegate +} // namespace jtx +} // namespace test +} // namespace ripple \ No newline at end of file diff --git a/src/test/jtx/impl/mpt.cpp b/src/test/jtx/impl/mpt.cpp index ead6a47c25..51490ad21e 100644 --- a/src/test/jtx/impl/mpt.cpp +++ b/src/test/jtx/impl/mpt.cpp @@ -233,6 +233,8 @@ MPTTester::set(MPTSet const& arg) } if (arg.holder) jv[sfHolder] = arg.holder->human(); + if (arg.delegate) + jv[sfDelegate] = arg.delegate->human(); if (submit(arg, jv) == tesSUCCESS && arg.flags.value_or(0)) { auto require = [&](std::optional const& holder, diff --git a/src/test/jtx/mpt.h b/src/test/jtx/mpt.h index 12b9d74d27..950ab0d409 100644 --- a/src/test/jtx/mpt.h +++ b/src/test/jtx/mpt.h @@ -136,6 +136,7 @@ struct MPTSet std::optional ownerCount = std::nullopt; std::optional holderCount = std::nullopt; std::optional flags = std::nullopt; + std::optional delegate = std::nullopt; std::optional err = std::nullopt; }; diff --git a/src/test/rpc/JSONRPC_test.cpp b/src/test/rpc/JSONRPC_test.cpp index 8d4f763125..cd26758c1f 100644 --- a/src/test/rpc/JSONRPC_test.cpp +++ b/src/test/rpc/JSONRPC_test.cpp @@ -2042,6 +2042,78 @@ static constexpr TxnTestData txnTestArray[] = { "Cannot specify differing 'Amount' and 'DeliverMax'", "Cannot specify differing 'Amount' and 'DeliverMax'"}}}, + {"Minimal delegated transaction.", + __LINE__, + R"({ + "command": "doesnt_matter", + "secret": "a", + "tx_json": { + "Account": "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", + "Amount": "1000000000", + "Destination": "rnUy2SHTrB9DubsPmkJZUXTf5FcNDGrYEA", + "TransactionType": "Payment", + "Delegate": "rnUy2SHTrB9DubsPmkJZUXTf5FcNDGrYEA" + } +})", + {{"", + "", + "Missing field 'account'.", + "Missing field 'tx_json.Sequence'."}}}, + + {"Delegate not well formed.", + __LINE__, + R"({ + "command": "doesnt_matter", + "secret": "a", + "tx_json": { + "Account": "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", + "Amount": "1000000000", + "Destination": "rJrxi4Wxev4bnAGVNP9YCdKPdAoKfAmcsi", + "TransactionType": "Payment", + "Delegate": "NotAnAccount" + } +})", + {{"Invalid field 'tx_json.Delegate'.", + "Invalid field 'tx_json.Delegate'.", + "Missing field 'account'.", + "Missing field 'tx_json.Sequence'."}}}, + + {"Delegate not in ledger.", + __LINE__, + R"({ + "command": "doesnt_matter", + "secret": "a", + "tx_json": { + "Account": "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", + "Amount": "1000000000", + "Destination": "rJrxi4Wxev4bnAGVNP9YCdKPdAoKfAmcsi", + "TransactionType": "Payment", + "Delegate": "rDg53Haik2475DJx8bjMDSDPj4VX7htaMd" + } +})", + {{"Delegate account not found.", + "Delegate account not found.", + "Missing field 'account'.", + "Missing field 'tx_json.Sequence'."}}}, + + {"Delegate and secret not match.", + __LINE__, + R"({ + "command": "doesnt_matter", + "secret": "aa", + "tx_json": { + "Account": "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", + "Amount": "1000000000", + "Destination": "rJrxi4Wxev4bnAGVNP9YCdKPdAoKfAmcsi", + "TransactionType": "Payment", + "Delegate": "rnUy2SHTrB9DubsPmkJZUXTf5FcNDGrYEA" + } +})", + {{"Secret does not match account.", + "Secret does not match account.", + "Missing field 'account'.", + "Missing field 'tx_json.Sequence'."}}}, + }; class JSONRPC_test : public beast::unit_test::suite diff --git a/src/test/rpc/LedgerEntry_test.cpp b/src/test/rpc/LedgerEntry_test.cpp index 32332adb20..465d6c6631 100644 --- a/src/test/rpc/LedgerEntry_test.cpp +++ b/src/test/rpc/LedgerEntry_test.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -439,6 +440,116 @@ class LedgerEntry_test : public beast::unit_test::suite } } + void + testLedgerEntryDelegate() + { + testcase("ledger_entry Delegate"); + + using namespace test::jtx; + + Env env{*this}; + Account const alice{"alice"}; + Account const bob{"bob"}; + env.fund(XRP(10000), alice, bob); + env.close(); + env(delegate::set(alice, bob, {"Payment", "CheckCreate"})); + env.close(); + std::string const ledgerHash{to_string(env.closed()->info().hash)}; + std::string delegateIndex; + { + // Request by account and authorize + Json::Value jvParams; + jvParams[jss::delegate][jss::account] = alice.human(); + jvParams[jss::delegate][jss::authorize] = bob.human(); + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + BEAST_EXPECT( + jrr[jss::node][sfLedgerEntryType.jsonName] == jss::Delegate); + BEAST_EXPECT(jrr[jss::node][sfAccount.jsonName] == alice.human()); + BEAST_EXPECT(jrr[jss::node][sfAuthorize.jsonName] == bob.human()); + delegateIndex = jrr[jss::node][jss::index].asString(); + } + { + // Request by index. + Json::Value jvParams; + jvParams[jss::delegate] = delegateIndex; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + BEAST_EXPECT( + jrr[jss::node][sfLedgerEntryType.jsonName] == jss::Delegate); + BEAST_EXPECT(jrr[jss::node][sfAccount.jsonName] == alice.human()); + BEAST_EXPECT(jrr[jss::node][sfAuthorize.jsonName] == bob.human()); + } + { + // Malformed request: delegate neither object nor string. + Json::Value jvParams; + jvParams[jss::delegate] = 5; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + { + // Malformed request: delegate not hex string. + Json::Value jvParams; + jvParams[jss::delegate] = "0123456789ABCDEFG"; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedRequest", ""); + } + { + // Malformed request: account not a string + Json::Value jvParams; + jvParams[jss::delegate][jss::account] = 5; + jvParams[jss::delegate][jss::authorize] = bob.human(); + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedAddress", ""); + } + { + // Malformed request: authorize not a string + Json::Value jvParams; + jvParams[jss::delegate][jss::account] = alice.human(); + jvParams[jss::delegate][jss::authorize] = 5; + jvParams[jss::ledger_hash] = ledgerHash; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + checkErrorValue(jrr, "malformedAddress", ""); + } + { + // this lambda function is used test malformed account and authroize + auto testMalformedAccount = + [&](std::optional const& account, + std::optional const& authorize, + std::string const& error) { + Json::Value jvParams; + jvParams[jss::ledger_hash] = ledgerHash; + if (account) + jvParams[jss::delegate][jss::account] = *account; + if (authorize) + jvParams[jss::delegate][jss::authorize] = *authorize; + auto const jrr = env.rpc( + "json", + "ledger_entry", + to_string(jvParams))[jss::result]; + checkErrorValue(jrr, error, ""); + }; + // missing account + testMalformedAccount(std::nullopt, bob.human(), "malformedRequest"); + // missing authorize + testMalformedAccount( + alice.human(), std::nullopt, "malformedRequest"); + // malformed account + testMalformedAccount("-", bob.human(), "malformedAddress"); + // malformed authorize + testMalformedAccount(alice.human(), "-", "malformedAddress"); + } + } + void testLedgerEntryDepositPreauth() { @@ -2266,6 +2377,7 @@ public: testLedgerEntryAccountRoot(); testLedgerEntryCheck(); testLedgerEntryCredentials(); + testLedgerEntryDelegate(); testLedgerEntryDepositPreauth(); testLedgerEntryDepositPreauthCred(); testLedgerEntryDirectory(); diff --git a/src/test/rpc/LedgerRPC_test.cpp b/src/test/rpc/LedgerRPC_test.cpp index 78caecb945..4e8d2964ca 100644 --- a/src/test/rpc/LedgerRPC_test.cpp +++ b/src/test/rpc/LedgerRPC_test.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #include diff --git a/src/xrpld/app/misc/AMMUtils.h b/src/xrpld/app/misc/AMMUtils.h index ebc2834109..b2c0007dc7 100644 --- a/src/xrpld/app/misc/AMMUtils.h +++ b/src/xrpld/app/misc/AMMUtils.h @@ -17,8 +17,8 @@ */ //============================================================================== -#ifndef RIPPLE_APP_MISC_AMMUTILS_H_INLCUDED -#define RIPPLE_APP_MISC_AMMUTILS_H_INLCUDED +#ifndef RIPPLE_APP_MISC_AMMUTILS_H_INCLUDED +#define RIPPLE_APP_MISC_AMMUTILS_H_INCLUDED #include @@ -127,4 +127,4 @@ isOnlyLiquidityProvider( } // namespace ripple -#endif // RIPPLE_APP_MISC_AMMUTILS_H_INLCUDED +#endif // RIPPLE_APP_MISC_AMMUTILS_H_INCLUDED diff --git a/src/xrpld/app/misc/DelegateUtils.h b/src/xrpld/app/misc/DelegateUtils.h new file mode 100644 index 0000000000..cad3bed376 --- /dev/null +++ b/src/xrpld/app/misc/DelegateUtils.h @@ -0,0 +1,56 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_APP_MISC_DELEGATEUTILS_H_INCLUDED +#define RIPPLE_APP_MISC_DELEGATEUTILS_H_INCLUDED + +#include +#include +#include +#include + +namespace ripple { + +/** + * Check if the delegate account has permission to execute the transaction. + * @param delegate The delegate account. + * @param tx The transaction that the delegate account intends to execute. + * @return tesSUCCESS if the transaction is allowed, tecNO_PERMISSION if not. + */ +TER +checkTxPermission(std::shared_ptr const& delegate, STTx const& tx); + +/** + * Load the granular permissions granted to the delegate account for the + * specified transaction type + * @param delegate The delegate account. + * @param type Used to determine which granted granular permissions to load, + * based on the transaction type. + * @param granularPermissions Granted granular permissions tied to the + * transaction type. + */ +void +loadGranularPermission( + std::shared_ptr const& delegate, + TxType const& type, + std::unordered_set& granularPermissions); + +} // namespace ripple + +#endif // RIPPLE_APP_MISC_DELEGATEUTILS_H_INCLUDED diff --git a/src/xrpld/app/misc/detail/DelegateUtils.cpp b/src/xrpld/app/misc/detail/DelegateUtils.cpp new file mode 100644 index 0000000000..7b7021fe9e --- /dev/null +++ b/src/xrpld/app/misc/detail/DelegateUtils.cpp @@ -0,0 +1,66 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include + +namespace ripple { +TER +checkTxPermission(std::shared_ptr const& delegate, STTx const& tx) +{ + if (!delegate) + return tecNO_PERMISSION; // LCOV_EXCL_LINE + + auto const permissionArray = delegate->getFieldArray(sfPermissions); + auto const txPermission = tx.getTxnType() + 1; + + for (auto const& permission : permissionArray) + { + auto const permissionValue = permission[sfPermissionValue]; + if (permissionValue == txPermission) + return tesSUCCESS; + } + + return tecNO_PERMISSION; +} + +void +loadGranularPermission( + std::shared_ptr const& delegate, + TxType const& txType, + std::unordered_set& granularPermissions) +{ + if (!delegate) + return; // LCOV_EXCL_LINE + + auto const permissionArray = delegate->getFieldArray(sfPermissions); + for (auto const& permission : permissionArray) + { + auto const permissionValue = permission[sfPermissionValue]; + auto const granularValue = + static_cast(permissionValue); + auto const& type = + Permission::getInstance().getGranularTxType(granularValue); + if (type && *type == txType) + granularPermissions.insert(granularValue); + } +} + +} // namespace ripple diff --git a/src/xrpld/app/tx/detail/DelegateSet.cpp b/src/xrpld/app/tx/detail/DelegateSet.cpp new file mode 100644 index 0000000000..d93ed6fa96 --- /dev/null +++ b/src/xrpld/app/tx/detail/DelegateSet.cpp @@ -0,0 +1,162 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +#include +#include +#include +#include +#include + +namespace ripple { + +NotTEC +DelegateSet::preflight(PreflightContext const& ctx) +{ + if (!ctx.rules.enabled(featurePermissionDelegation)) + return temDISABLED; + + if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) + return ret; + + auto const& permissions = ctx.tx.getFieldArray(sfPermissions); + if (permissions.size() > permissionMaxSize) + return temARRAY_TOO_LARGE; + + // can not authorize self + if (ctx.tx[sfAccount] == ctx.tx[sfAuthorize]) + return temMALFORMED; + + std::unordered_set permissionSet; + + for (auto const& permission : permissions) + { + if (!permissionSet.insert(permission[sfPermissionValue]).second) + return temMALFORMED; + } + + return preflight2(ctx); +} + +TER +DelegateSet::preclaim(PreclaimContext const& ctx) +{ + if (!ctx.view.exists(keylet::account(ctx.tx[sfAccount]))) + return terNO_ACCOUNT; // LCOV_EXCL_LINE + + if (!ctx.view.exists(keylet::account(ctx.tx[sfAuthorize]))) + return terNO_ACCOUNT; + + auto const& permissions = ctx.tx.getFieldArray(sfPermissions); + for (auto const& permission : permissions) + { + auto const permissionValue = permission[sfPermissionValue]; + if (!Permission::getInstance().isDelegatable(permissionValue)) + return tecNO_PERMISSION; + } + + return tesSUCCESS; +} + +TER +DelegateSet::doApply() +{ + auto const sleOwner = ctx_.view().peek(keylet::account(account_)); + if (!sleOwner) + return tefINTERNAL; // LCOV_EXCL_LINE + + auto const& authAccount = ctx_.tx[sfAuthorize]; + auto const delegateKey = keylet::delegate(account_, authAccount); + + auto sle = ctx_.view().peek(delegateKey); + if (sle) + { + auto const& permissions = ctx_.tx.getFieldArray(sfPermissions); + if (permissions.empty()) + // if permissions array is empty, delete the ledger object. + return deleteDelegate(view(), sle, account_, j_); + + sle->setFieldArray(sfPermissions, permissions); + ctx_.view().update(sle); + return tesSUCCESS; + } + + STAmount const reserve{ctx_.view().fees().accountReserve( + sleOwner->getFieldU32(sfOwnerCount) + 1)}; + + if (mPriorBalance < reserve) + return tecINSUFFICIENT_RESERVE; + + auto const& permissions = ctx_.tx.getFieldArray(sfPermissions); + if (!permissions.empty()) + { + sle = std::make_shared(delegateKey); + sle->setAccountID(sfAccount, account_); + sle->setAccountID(sfAuthorize, authAccount); + + sle->setFieldArray(sfPermissions, permissions); + auto const page = ctx_.view().dirInsert( + keylet::ownerDir(account_), + delegateKey, + describeOwnerDir(account_)); + + if (!page) + return tecDIR_FULL; // LCOV_EXCL_LINE + + (*sle)[sfOwnerNode] = *page; + ctx_.view().insert(sle); + adjustOwnerCount(ctx_.view(), sleOwner, 1, ctx_.journal); + } + + return tesSUCCESS; +} + +TER +DelegateSet::deleteDelegate( + ApplyView& view, + std::shared_ptr const& sle, + AccountID const& account, + beast::Journal j) +{ + if (!sle) + return tecINTERNAL; // LCOV_EXCL_LINE + + if (!view.dirRemove( + keylet::ownerDir(account), (*sle)[sfOwnerNode], sle->key(), false)) + { + // LCOV_EXCL_START + JLOG(j.fatal()) << "Unable to delete Delegate from owner."; + return tefBAD_LEDGER; + // LCOV_EXCL_STOP + } + + auto const sleOwner = view.peek(keylet::account(account)); + if (!sleOwner) + return tecINTERNAL; // LCOV_EXCL_LINE + + adjustOwnerCount(view, sleOwner, -1, j); + + view.erase(sle); + + return tesSUCCESS; +} + +} // namespace ripple \ No newline at end of file diff --git a/src/xrpld/app/tx/detail/DelegateSet.h b/src/xrpld/app/tx/detail/DelegateSet.h new file mode 100644 index 0000000000..6b01d63281 --- /dev/null +++ b/src/xrpld/app/tx/detail/DelegateSet.h @@ -0,0 +1,56 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_TX_DELEGATESET_H_INCLUDED +#define RIPPLE_TX_DELEGATESET_H_INCLUDED + +#include + +namespace ripple { + +class DelegateSet : public Transactor +{ +public: + static constexpr ConsequencesFactoryType ConsequencesFactory{Normal}; + + explicit DelegateSet(ApplyContext& ctx) : Transactor(ctx) + { + } + + static NotTEC + preflight(PreflightContext const& ctx); + + static TER + preclaim(PreclaimContext const& ctx); + + TER + doApply() override; + + // Interface used by DeleteAccount + static TER + deleteDelegate( + ApplyView& view, + std::shared_ptr const& sle, + AccountID const& account, + beast::Journal j); +}; + +} // namespace ripple + +#endif \ No newline at end of file diff --git a/src/xrpld/app/tx/detail/DeleteAccount.cpp b/src/xrpld/app/tx/detail/DeleteAccount.cpp index d562069460..7aa47e05f3 100644 --- a/src/xrpld/app/tx/detail/DeleteAccount.cpp +++ b/src/xrpld/app/tx/detail/DeleteAccount.cpp @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -180,6 +181,18 @@ removeCredentialFromLedger( return credentials::deleteSLE(view, sleDel, j); } +TER +removeDelegateFromLedger( + Application& app, + ApplyView& view, + AccountID const& account, + uint256 const& delIndex, + std::shared_ptr const& sleDel, + beast::Journal j) +{ + return DelegateSet::deleteDelegate(view, sleDel, account, j); +} + // Return nullptr if the LedgerEntryType represents an obligation that can't // be deleted. Otherwise return the pointer to the function that can delete // the non-obligation @@ -204,6 +217,8 @@ nonObligationDeleter(LedgerEntryType t) return removeOracleFromLedger; case ltCREDENTIAL: return removeCredentialFromLedger; + case ltDELEGATE: + return removeDelegateFromLedger; default: return nullptr; } diff --git a/src/xrpld/app/tx/detail/InvariantCheck.cpp b/src/xrpld/app/tx/detail/InvariantCheck.cpp index b97a0c02ee..2441cb040a 100644 --- a/src/xrpld/app/tx/detail/InvariantCheck.cpp +++ b/src/xrpld/app/tx/detail/InvariantCheck.cpp @@ -387,6 +387,7 @@ AccountRootsDeletedClean::finalize( view.rules().enabled(featureInvariantsV1_1); auto const objectExists = [&view, enforce, &j](auto const& keylet) { + (void)enforce; if (auto const sle = view.read(keylet)) { // Finding the object is bad @@ -463,6 +464,7 @@ LedgerEntryTypesMatch::visitEntry( switch (after->getType()) { case ltACCOUNT_ROOT: + case ltDELEGATE: case ltDIR_NODE: case ltRIPPLE_STATE: case ltTICKET: diff --git a/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp b/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp index 12208dba1b..85a1f6cf1a 100644 --- a/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp +++ b/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp @@ -17,6 +17,7 @@ */ //============================================================================== +#include #include #include @@ -50,6 +51,43 @@ MPTokenIssuanceSet::preflight(PreflightContext const& ctx) return preflight2(ctx); } +TER +MPTokenIssuanceSet::checkPermission(ReadView const& view, STTx const& tx) +{ + auto const delegate = tx[~sfDelegate]; + if (!delegate) + return tesSUCCESS; + + auto const delegateKey = keylet::delegate(tx[sfAccount], *delegate); + auto const sle = view.read(delegateKey); + + if (!sle) + return tecNO_PERMISSION; + + if (checkTxPermission(sle, tx) == tesSUCCESS) + return tesSUCCESS; + + auto const txFlags = tx.getFlags(); + + // this is added in case more flags will be added for MPTokenIssuanceSet + // in the future. Currently unreachable. + if (txFlags & tfMPTokenIssuanceSetPermissionMask) + return tecNO_PERMISSION; // LCOV_EXCL_LINE + + std::unordered_set granularPermissions; + loadGranularPermission(sle, ttMPTOKEN_ISSUANCE_SET, granularPermissions); + + if (txFlags & tfMPTLock && + !granularPermissions.contains(MPTokenIssuanceLock)) + return tecNO_PERMISSION; + + if (txFlags & tfMPTUnlock && + !granularPermissions.contains(MPTokenIssuanceUnlock)) + return tecNO_PERMISSION; + + return tesSUCCESS; +} + TER MPTokenIssuanceSet::preclaim(PreclaimContext const& ctx) { diff --git a/src/xrpld/app/tx/detail/MPTokenIssuanceSet.h b/src/xrpld/app/tx/detail/MPTokenIssuanceSet.h index 895be97312..5b3db0e75b 100644 --- a/src/xrpld/app/tx/detail/MPTokenIssuanceSet.h +++ b/src/xrpld/app/tx/detail/MPTokenIssuanceSet.h @@ -36,6 +36,9 @@ public: static NotTEC preflight(PreflightContext const& ctx); + static TER + checkPermission(ReadView const& view, STTx const& tx); + static TER preclaim(PreclaimContext const& ctx); diff --git a/src/xrpld/app/tx/detail/Payment.cpp b/src/xrpld/app/tx/detail/Payment.cpp index c2b7b23a6a..f2f4ac4f7c 100644 --- a/src/xrpld/app/tx/detail/Payment.cpp +++ b/src/xrpld/app/tx/detail/Payment.cpp @@ -18,6 +18,7 @@ //============================================================================== #include +#include #include #include #include @@ -238,6 +239,39 @@ Payment::preflight(PreflightContext const& ctx) return preflight2(ctx); } +TER +Payment::checkPermission(ReadView const& view, STTx const& tx) +{ + auto const delegate = tx[~sfDelegate]; + if (!delegate) + return tesSUCCESS; + + auto const delegateKey = keylet::delegate(tx[sfAccount], *delegate); + auto const sle = view.read(delegateKey); + + if (!sle) + return tecNO_PERMISSION; + + if (checkTxPermission(sle, tx) == tesSUCCESS) + return tesSUCCESS; + + std::unordered_set granularPermissions; + loadGranularPermission(sle, ttPAYMENT, granularPermissions); + + auto const& dstAmount = tx.getFieldAmount(sfAmount); + auto const& amountIssue = dstAmount.issue(); + + if (granularPermissions.contains(PaymentMint) && !isXRP(amountIssue) && + amountIssue.account == tx[sfAccount]) + return tesSUCCESS; + + if (granularPermissions.contains(PaymentBurn) && !isXRP(amountIssue) && + amountIssue.account == tx[sfDestination]) + return tesSUCCESS; + + return tecNO_PERMISSION; +} + TER Payment::preclaim(PreclaimContext const& ctx) { diff --git a/src/xrpld/app/tx/detail/Payment.h b/src/xrpld/app/tx/detail/Payment.h index 775d4e8d46..010a2453cf 100644 --- a/src/xrpld/app/tx/detail/Payment.h +++ b/src/xrpld/app/tx/detail/Payment.h @@ -45,6 +45,9 @@ public: static NotTEC preflight(PreflightContext const& ctx); + static TER + checkPermission(ReadView const& view, STTx const& tx); + static TER preclaim(PreclaimContext const& ctx); diff --git a/src/xrpld/app/tx/detail/SetAccount.cpp b/src/xrpld/app/tx/detail/SetAccount.cpp index d871cc3280..599819151a 100644 --- a/src/xrpld/app/tx/detail/SetAccount.cpp +++ b/src/xrpld/app/tx/detail/SetAccount.cpp @@ -17,6 +17,7 @@ */ //============================================================================== +#include #include #include #include @@ -188,6 +189,61 @@ SetAccount::preflight(PreflightContext const& ctx) return preflight2(ctx); } +TER +SetAccount::checkPermission(ReadView const& view, STTx const& tx) +{ + // SetAccount is prohibited to be granted on a transaction level, + // but some granular permissions are allowed. + auto const delegate = tx[~sfDelegate]; + if (!delegate) + return tesSUCCESS; + + auto const delegateKey = keylet::delegate(tx[sfAccount], *delegate); + auto const sle = view.read(delegateKey); + + if (!sle) + return tecNO_PERMISSION; + + std::unordered_set granularPermissions; + loadGranularPermission(sle, ttACCOUNT_SET, granularPermissions); + + auto const uSetFlag = tx.getFieldU32(sfSetFlag); + auto const uClearFlag = tx.getFieldU32(sfClearFlag); + auto const uTxFlags = tx.getFlags(); + // We don't support any flag based granular permission under + // AccountSet transaction. If any delegated account is trying to + // update the flag on behalf of another account, it is not + // authorized. + if (uSetFlag != 0 || uClearFlag != 0 || uTxFlags != tfFullyCanonicalSig) + return tecNO_PERMISSION; + + if (tx.isFieldPresent(sfEmailHash) && + !granularPermissions.contains(AccountEmailHashSet)) + return tecNO_PERMISSION; + + if (tx.isFieldPresent(sfWalletLocator) || + tx.isFieldPresent(sfNFTokenMinter)) + return tecNO_PERMISSION; + + if (tx.isFieldPresent(sfMessageKey) && + !granularPermissions.contains(AccountMessageKeySet)) + return tecNO_PERMISSION; + + if (tx.isFieldPresent(sfDomain) && + !granularPermissions.contains(AccountDomainSet)) + return tecNO_PERMISSION; + + if (tx.isFieldPresent(sfTransferRate) && + !granularPermissions.contains(AccountTransferRateSet)) + return tecNO_PERMISSION; + + if (tx.isFieldPresent(sfTickSize) && + !granularPermissions.contains(AccountTickSizeSet)) + return tecNO_PERMISSION; + + return tesSUCCESS; +} + TER SetAccount::preclaim(PreclaimContext const& ctx) { diff --git a/src/xrpld/app/tx/detail/SetAccount.h b/src/xrpld/app/tx/detail/SetAccount.h index 4604a11a6c..ed4242c250 100644 --- a/src/xrpld/app/tx/detail/SetAccount.h +++ b/src/xrpld/app/tx/detail/SetAccount.h @@ -41,6 +41,9 @@ public: static NotTEC preflight(PreflightContext const& ctx); + static TER + checkPermission(ReadView const& view, STTx const& tx); + static TER preclaim(PreclaimContext const& ctx); diff --git a/src/xrpld/app/tx/detail/SetTrust.cpp b/src/xrpld/app/tx/detail/SetTrust.cpp index 93abcdc4c4..9fe267b8e1 100644 --- a/src/xrpld/app/tx/detail/SetTrust.cpp +++ b/src/xrpld/app/tx/detail/SetTrust.cpp @@ -17,6 +17,7 @@ */ //============================================================================== +#include #include #include @@ -127,6 +128,69 @@ SetTrust::preflight(PreflightContext const& ctx) return preflight2(ctx); } +TER +SetTrust::checkPermission(ReadView const& view, STTx const& tx) +{ + auto const delegate = tx[~sfDelegate]; + if (!delegate) + return tesSUCCESS; + + auto const delegateKey = keylet::delegate(tx[sfAccount], *delegate); + auto const sle = view.read(delegateKey); + + if (!sle) + return tecNO_PERMISSION; + + if (checkTxPermission(sle, tx) == tesSUCCESS) + return tesSUCCESS; + + std::uint32_t const txFlags = tx.getFlags(); + + // Currently we only support TrustlineAuthorize, TrustlineFreeze and + // TrustlineUnfreeze granular permission. Setting other flags returns + // error. + if (txFlags & tfTrustSetPermissionMask) + return tecNO_PERMISSION; + + if (tx.isFieldPresent(sfQualityIn) || tx.isFieldPresent(sfQualityOut)) + return tecNO_PERMISSION; + + auto const saLimitAmount = tx.getFieldAmount(sfLimitAmount); + auto const sleRippleState = view.read(keylet::line( + tx[sfAccount], saLimitAmount.getIssuer(), saLimitAmount.getCurrency())); + + // if the trustline does not exist, granular permissions are + // not allowed to create trustline + if (!sleRippleState) + return tecNO_PERMISSION; + + std::unordered_set granularPermissions; + loadGranularPermission(sle, ttTRUST_SET, granularPermissions); + + if (txFlags & tfSetfAuth && + !granularPermissions.contains(TrustlineAuthorize)) + return tecNO_PERMISSION; + if (txFlags & tfSetFreeze && !granularPermissions.contains(TrustlineFreeze)) + return tecNO_PERMISSION; + if (txFlags & tfClearFreeze && + !granularPermissions.contains(TrustlineUnfreeze)) + return tecNO_PERMISSION; + + // updating LimitAmount is not allowed only with granular permissions, + // unless there's a new granular permission for this in the future. + auto const curLimit = tx[sfAccount] > saLimitAmount.getIssuer() + ? sleRippleState->getFieldAmount(sfHighLimit) + : sleRippleState->getFieldAmount(sfLowLimit); + + STAmount saLimitAllow = saLimitAmount; + saLimitAllow.setIssuer(tx[sfAccount]); + + if (curLimit != saLimitAllow) + return tecNO_PERMISSION; + + return tesSUCCESS; +} + TER SetTrust::preclaim(PreclaimContext const& ctx) { diff --git a/src/xrpld/app/tx/detail/SetTrust.h b/src/xrpld/app/tx/detail/SetTrust.h index 7a5394c684..a0476918ac 100644 --- a/src/xrpld/app/tx/detail/SetTrust.h +++ b/src/xrpld/app/tx/detail/SetTrust.h @@ -38,6 +38,9 @@ public: static NotTEC preflight(PreflightContext const& ctx); + static TER + checkPermission(ReadView const& view, STTx const& tx); + static TER preclaim(PreclaimContext const& ctx); diff --git a/src/xrpld/app/tx/detail/Transactor.cpp b/src/xrpld/app/tx/detail/Transactor.cpp index 2e49794c2d..390f32e02b 100644 --- a/src/xrpld/app/tx/detail/Transactor.cpp +++ b/src/xrpld/app/tx/detail/Transactor.cpp @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -89,6 +90,15 @@ preflight1(PreflightContext const& ctx) return temMALFORMED; } + if (ctx.tx.isFieldPresent(sfDelegate)) + { + if (!ctx.rules.enabled(featurePermissionDelegation)) + return temDISABLED; + + if (ctx.tx[sfDelegate] == ctx.tx[sfAccount]) + return temBAD_SIGNER; + } + auto const ret = preflight0(ctx); if (!isTesSuccess(ret)) return ret; @@ -190,6 +200,22 @@ Transactor::Transactor(ApplyContext& ctx) { } +TER +Transactor::checkPermission(ReadView const& view, STTx const& tx) +{ + auto const delegate = tx[~sfDelegate]; + if (!delegate) + return tesSUCCESS; + + auto const delegateKey = keylet::delegate(tx[sfAccount], *delegate); + auto const sle = view.read(delegateKey); + + if (!sle) + return tecNO_PERMISSION; + + return checkTxPermission(sle, tx); +} + XRPAmount Transactor::calculateBaseFee(ReadView const& view, STTx const& tx) { @@ -246,7 +272,9 @@ Transactor::checkFee(PreclaimContext const& ctx, XRPAmount baseFee) if (feePaid == beast::zero) return tesSUCCESS; - auto const id = ctx.tx.getAccountID(sfAccount); + auto const id = ctx.tx.isFieldPresent(sfDelegate) + ? ctx.tx.getAccountID(sfDelegate) + : ctx.tx.getAccountID(sfAccount); auto const sle = ctx.view.read(keylet::account(id)); if (!sle) return terNO_ACCOUNT; @@ -276,17 +304,32 @@ Transactor::payFee() { auto const feePaid = ctx_.tx[sfFee].xrp(); - auto const sle = view().peek(keylet::account(account_)); - if (!sle) - return tefINTERNAL; + if (ctx_.tx.isFieldPresent(sfDelegate)) + { + // Delegated transactions are paid by the delegated account. + auto const delegate = ctx_.tx.getAccountID(sfDelegate); + auto const delegatedSle = view().peek(keylet::account(delegate)); + if (!delegatedSle) + return tefINTERNAL; // LCOV_EXCL_LINE - // Deduct the fee, so it's not available during the transaction. - // Will only write the account back if the transaction succeeds. + delegatedSle->setFieldAmount( + sfBalance, delegatedSle->getFieldAmount(sfBalance) - feePaid); + view().update(delegatedSle); + } + else + { + auto const sle = view().peek(keylet::account(account_)); + if (!sle) + return tefINTERNAL; // LCOV_EXCL_LINE - mSourceBalance -= feePaid; - sle->setFieldAmount(sfBalance, mSourceBalance); + // Deduct the fee, so it's not available during the transaction. + // Will only write the account back if the transaction succeeds. - // VFALCO Should we call view().rawDestroyXRP() here as well? + mSourceBalance -= feePaid; + sle->setFieldAmount(sfBalance, mSourceBalance); + + // VFALCO Should we call view().rawDestroyXRP() here as well? + } return tesSUCCESS; } @@ -542,7 +585,9 @@ Transactor::checkSingleSign(PreclaimContext const& ctx) } // Look up the account. - auto const idAccount = ctx.tx.getAccountID(sfAccount); + auto const idAccount = ctx.tx.isFieldPresent(sfDelegate) + ? ctx.tx.getAccountID(sfDelegate) + : ctx.tx.getAccountID(sfAccount); auto const sleAccount = ctx.view.read(keylet::account(idAccount)); if (!sleAccount) return terNO_ACCOUNT; @@ -612,7 +657,9 @@ Transactor::checkSingleSign(PreclaimContext const& ctx) NotTEC Transactor::checkMultiSign(PreclaimContext const& ctx) { - auto const id = ctx.tx.getAccountID(sfAccount); + auto const id = ctx.tx.isFieldPresent(sfDelegate) + ? ctx.tx.getAccountID(sfDelegate) + : ctx.tx.getAccountID(sfAccount); // Get mTxnAccountID's SignerList and Quorum. std::shared_ptr sleAccountSigners = ctx.view.read(keylet::signers(id)); @@ -870,15 +917,22 @@ Transactor::reset(XRPAmount fee) // is missing then we can't very well charge it a fee, can we? return {tefINTERNAL, beast::zero}; - auto const balance = txnAcct->getFieldAmount(sfBalance).xrp(); + auto const payerSle = ctx_.tx.isFieldPresent(sfDelegate) + ? view().peek(keylet::account(ctx_.tx.getAccountID(sfDelegate))) + : txnAcct; + if (!payerSle) + return {tefINTERNAL, beast::zero}; // LCOV_EXCL_LINE + + auto const balance = payerSle->getFieldAmount(sfBalance).xrp(); // balance should have already been checked in checkFee / preFlight. XRPL_ASSERT( balance != beast::zero && (!view().open() || balance >= fee), "ripple::Transactor::reset : valid balance"); - // We retry/reject the transaction if the account balance is zero or we're - // applying against an open ledger and the balance is less than the fee + // We retry/reject the transaction if the account balance is zero or + // we're applying against an open ledger and the balance is less than + // the fee if (fee > balance) fee = balance; @@ -888,13 +942,17 @@ Transactor::reset(XRPAmount fee) // If for some reason we are unable to consume the ticket or sequence // then the ledger is corrupted. Rather than make things worse we // reject the transaction. - txnAcct->setFieldAmount(sfBalance, balance - fee); + payerSle->setFieldAmount(sfBalance, balance - fee); TER const ter{consumeSeqProxy(txnAcct)}; XRPL_ASSERT( isTesSuccess(ter), "ripple::Transactor::reset : result is tesSUCCESS"); if (isTesSuccess(ter)) + { view().update(txnAcct); + if (payerSle != txnAcct) + view().update(payerSle); + } return {ter, fee}; } diff --git a/src/xrpld/app/tx/detail/Transactor.h b/src/xrpld/app/tx/detail/Transactor.h index e98269c38a..4956f021df 100644 --- a/src/xrpld/app/tx/detail/Transactor.h +++ b/src/xrpld/app/tx/detail/Transactor.h @@ -24,6 +24,7 @@ #include #include +#include #include namespace ripple { @@ -149,6 +150,9 @@ public: // after checkSeq/Fee/Sign. return tesSUCCESS; } + + static TER + checkPermission(ReadView const& view, STTx const& tx); ///////////////////////////////////////////////////// // Interface used by DeleteAccount diff --git a/src/xrpld/app/tx/detail/applySteps.cpp b/src/xrpld/app/tx/detail/applySteps.cpp index 4cb505db50..b20b5a29f6 100644 --- a/src/xrpld/app/tx/detail/applySteps.cpp +++ b/src/xrpld/app/tx/detail/applySteps.cpp @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include @@ -89,8 +90,8 @@ with_txn_type(TxType txnType, F&& f) #pragma push_macro("TRANSACTION") #undef TRANSACTION -#define TRANSACTION(tag, value, name, fields) \ - case tag: \ +#define TRANSACTION(tag, value, name, delegatable, fields) \ + case tag: \ return f.template operator()(); #include @@ -193,6 +194,11 @@ invoke_preclaim(PreclaimContext const& ctx) result = T::checkFee(ctx, calculateBaseFee(ctx.view, ctx.tx)); + if (result != tesSUCCESS) + return result; + + result = T::checkPermission(ctx.view, ctx.tx); + if (result != tesSUCCESS) return result; diff --git a/src/xrpld/rpc/detail/TransactionSign.cpp b/src/xrpld/rpc/detail/TransactionSign.cpp index 2a7807f8ca..3f388e636f 100644 --- a/src/xrpld/rpc/detail/TransactionSign.cpp +++ b/src/xrpld/rpc/detail/TransactionSign.cpp @@ -531,10 +531,40 @@ transactionPreProcessImpl( if (!signingArgs.isMultiSigning()) { // Make sure the account and secret belong together. - auto const err = acctMatchesPubKey(sle, srcAddressID, pk); + if (tx_json.isMember(sfDelegate.jsonName)) + { + // Delegated transaction + auto const delegateJson = tx_json[sfDelegate.jsonName]; + auto const ptrDelegatedAddressID = delegateJson.isString() + ? parseBase58(delegateJson.asString()) + : std::nullopt; - if (err != rpcSUCCESS) - return rpcError(err); + if (!ptrDelegatedAddressID) + { + return RPC::make_error( + rpcSRC_ACT_MALFORMED, + RPC::invalid_field_message("tx_json.Delegate")); + } + + auto delegatedAddressID = *ptrDelegatedAddressID; + auto delegatedSle = app.openLedger().current()->read( + keylet::account(delegatedAddressID)); + if (!delegatedSle) + return rpcError(rpcDELEGATE_ACT_NOT_FOUND); + + auto const err = + acctMatchesPubKey(delegatedSle, delegatedAddressID, pk); + + if (err != rpcSUCCESS) + return rpcError(err); + } + else + { + auto const err = acctMatchesPubKey(sle, srcAddressID, pk); + + if (err != rpcSUCCESS) + return rpcError(err); + } } } diff --git a/src/xrpld/rpc/handlers/LedgerEntry.cpp b/src/xrpld/rpc/handlers/LedgerEntry.cpp index 1d15825786..ade9b9578b 100644 --- a/src/xrpld/rpc/handlers/LedgerEntry.cpp +++ b/src/xrpld/rpc/handlers/LedgerEntry.cpp @@ -230,6 +230,46 @@ parseAuthorizeCredentials(Json::Value const& jv) return arr; } +static std::optional +parseDelegate(Json::Value const& params, Json::Value& jvResult) +{ + if (!params.isObject()) + { + uint256 uNodeIndex; + if (!params.isString() || !uNodeIndex.parseHex(params.asString())) + { + jvResult[jss::error] = "malformedRequest"; + return std::nullopt; + } + return uNodeIndex; + } + if (!params.isMember(jss::account) || !params.isMember(jss::authorize)) + { + jvResult[jss::error] = "malformedRequest"; + return std::nullopt; + } + if (!params[jss::account].isString() || !params[jss::authorize].isString()) + { + jvResult[jss::error] = "malformedAddress"; + return std::nullopt; + } + auto const account = + parseBase58(params[jss::account].asString()); + if (!account) + { + jvResult[jss::error] = "malformedAddress"; + return std::nullopt; + } + auto const authorize = + parseBase58(params[jss::authorize].asString()); + if (!authorize) + { + jvResult[jss::error] = "malformedAddress"; + return std::nullopt; + } + return keylet::delegate(*account, *authorize).key; +} + static std::optional parseDepositPreauth(Json::Value const& dp, Json::Value& jvResult) { @@ -884,6 +924,7 @@ doLedgerEntry(RPC::JsonContext& context) {jss::bridge, parseBridge, ltBRIDGE}, {jss::check, parseCheck, ltCHECK}, {jss::credential, parseCredential, ltCREDENTIAL}, + {jss::delegate, parseDelegate, ltDELEGATE}, {jss::deposit_preauth, parseDepositPreauth, ltDEPOSIT_PREAUTH}, {jss::did, parseDID, ltDID}, {jss::directory, parseDirectory, ltDIR_NODE}, From 3e152fec745a3d09565a5e366c1a7f6f2c163d18 Mon Sep 17 00:00:00 2001 From: Vito Tumas <5780819+Tapanito@users.noreply.github.com> Date: Thu, 8 May 2025 13:00:42 +0200 Subject: [PATCH 020/244] refactor: use east const convention (#5409) This change refactors the codebase to use the "east const convention", and adds a clang-format rule to follow this convention. --- .clang-format | 1 + include/xrpl/basics/BasicConfig.h | 2 +- include/xrpl/basics/CompressionAlgorithms.h | 4 +- include/xrpl/basics/Expected.h | 4 +- include/xrpl/basics/TaggedCache.h | 16 +-- include/xrpl/basics/TaggedCache.ipp | 12 +- include/xrpl/basics/base_uint.h | 18 +-- include/xrpl/basics/comparators.h | 4 +- .../xrpl/basics/partitioned_unordered_map.h | 2 +- include/xrpl/basics/tagged_integer.h | 8 +- include/xrpl/beast/utility/temp_dir.h | 4 +- include/xrpl/json/json_reader.h | 6 +- include/xrpl/json/json_value.h | 114 +++++++++--------- include/xrpl/json/json_writer.h | 26 ++-- include/xrpl/json/to_string.h | 2 +- include/xrpl/protocol/FeeUnits.h | 2 +- include/xrpl/protocol/MultiApiJson.h | 4 +- include/xrpl/protocol/Permissions.h | 6 +- include/xrpl/protocol/Quality.h | 4 +- include/xrpl/protocol/SField.h | 16 +-- include/xrpl/protocol/STAccount.h | 2 +- include/xrpl/protocol/STAmount.h | 22 ++-- include/xrpl/protocol/STArray.h | 12 +- include/xrpl/protocol/STBase.h | 10 +- include/xrpl/protocol/STBitString.h | 14 +-- include/xrpl/protocol/STBlob.h | 2 +- include/xrpl/protocol/STCurrency.h | 2 +- include/xrpl/protocol/STInteger.h | 6 +- include/xrpl/protocol/STIssue.h | 2 +- include/xrpl/protocol/STLedgerEntry.h | 2 +- include/xrpl/protocol/STObject.h | 44 +++---- include/xrpl/protocol/STPathSet.h | 14 +-- include/xrpl/protocol/STVector256.h | 6 +- include/xrpl/protocol/STXChainBridge.h | 2 +- include/xrpl/protocol/Serializer.h | 16 +-- include/xrpl/protocol/XRPAmount.h | 2 +- include/xrpl/protocol/detail/token_errors.h | 4 +- include/xrpl/protocol/json_get_or_throw.h | 4 +- src/libxrpl/basics/Archive.cpp | 2 +- src/libxrpl/basics/FileUtilities.cpp | 2 +- src/libxrpl/basics/Number.cpp | 2 +- src/libxrpl/basics/StringUtilities.cpp | 6 +- src/libxrpl/basics/UptimeClock.cpp | 2 +- src/libxrpl/json/Writer.cpp | 16 +-- src/libxrpl/json/json_reader.cpp | 12 +- src/libxrpl/json/json_value.cpp | 66 +++++----- src/libxrpl/json/json_valueiterator.cpp | 28 ++--- src/libxrpl/json/json_writer.cpp | 40 +++--- src/libxrpl/protocol/Feature.cpp | 20 +-- src/libxrpl/protocol/LedgerFormats.cpp | 2 +- src/libxrpl/protocol/Quality.cpp | 2 +- src/libxrpl/protocol/SField.cpp | 2 +- src/libxrpl/protocol/STAccount.cpp | 2 +- src/libxrpl/protocol/STAmount.cpp | 14 +-- src/libxrpl/protocol/STArray.cpp | 6 +- src/libxrpl/protocol/STBase.cpp | 10 +- src/libxrpl/protocol/STBlob.cpp | 4 +- src/libxrpl/protocol/STCurrency.cpp | 4 +- src/libxrpl/protocol/STIssue.cpp | 4 +- src/libxrpl/protocol/STObject.cpp | 26 ++-- src/libxrpl/protocol/STParsedJSON.cpp | 5 +- src/libxrpl/protocol/STPathSet.cpp | 4 +- src/libxrpl/protocol/STVector256.cpp | 4 +- src/libxrpl/protocol/STXChainBridge.cpp | 4 +- src/libxrpl/protocol/Serializer.cpp | 8 +- src/libxrpl/protocol/TxFormats.cpp | 2 +- src/test/app/AMMExtended_test.cpp | 2 +- src/test/app/AMM_test.cpp | 22 ++-- src/test/app/AccountDelete_test.cpp | 6 +- src/test/app/Credentials_test.cpp | 22 ++-- src/test/app/DID_test.cpp | 2 +- src/test/app/DepositAuth_test.cpp | 12 +- src/test/app/Escrow_test.cpp | 4 +- src/test/app/FeeVote_test.cpp | 2 +- src/test/app/LedgerReplay_test.cpp | 6 +- src/test/app/MPToken_test.cpp | 2 +- src/test/app/PayChan_test.cpp | 2 +- src/test/app/PermissionedDomains_test.cpp | 2 +- src/test/app/SHAMapStore_test.cpp | 24 ++-- src/test/app/SetTrust_test.cpp | 2 +- src/test/app/TxQ_test.cpp | 8 +- src/test/app/ValidatorKeys_test.cpp | 10 +- src/test/app/ValidatorList_test.cpp | 8 +- src/test/app/ValidatorSite_test.cpp | 2 +- src/test/app/XChain_test.cpp | 20 +-- src/test/app/tx/apply_test.cpp | 2 +- src/test/basics/FileUtilities_test.cpp | 2 +- src/test/basics/PerfLog_test.cpp | 2 +- src/test/basics/mulDiv_test.cpp | 4 +- .../consensus/ByzantineFailureSim_test.cpp | 4 +- src/test/core/SociDB_test.cpp | 2 +- src/test/csf/Peer.h | 6 +- src/test/csf/Tx.h | 2 +- src/test/csf/collectors.h | 2 +- src/test/csf/ledgers.h | 2 +- src/test/jtx/Env.h | 2 +- src/test/jtx/TrustedPublisherServer.h | 12 +- src/test/jtx/deposit.h | 2 +- src/test/jtx/envconfig.h | 2 +- src/test/jtx/impl/amount.cpp | 2 +- src/test/jtx/impl/mpt.cpp | 4 +- src/test/jtx/xchain_bridge.h | 10 +- src/test/overlay/reduce_relay_test.cpp | 15 +-- src/test/overlay/tx_reduce_relay_test.cpp | 4 +- src/test/protocol/MultiApiJson_test.cpp | 8 +- src/test/protocol/STAccount_test.cpp | 2 +- src/test/protocol/STAmount_test.cpp | 2 +- src/test/protocol/TER_test.cpp | 4 +- src/test/rpc/DepositAuthorized_test.cpp | 6 +- src/test/rpc/Feature_test.cpp | 4 +- src/test/rpc/Handler_test.cpp | 4 +- src/test/rpc/LedgerEntry_test.cpp | 6 +- src/test/rpc/LedgerRPC_test.cpp | 8 +- src/test/rpc/RPCCall_test.cpp | 4 +- src/test/rpc/Transaction_test.cpp | 24 ++-- src/test/rpc/ValidatorInfo_test.cpp | 2 +- src/test/unit_test/multi_runner.h | 4 +- src/xrpld/app/consensus/RCLConsensus.cpp | 18 +-- src/xrpld/app/consensus/RCLConsensus.h | 4 +- src/xrpld/app/ledger/BuildLedger.h | 2 +- src/xrpld/app/ledger/Ledger.cpp | 2 +- src/xrpld/app/ledger/LedgerHistory.cpp | 60 ++++----- src/xrpld/app/ledger/LedgerMaster.h | 2 +- src/xrpld/app/ledger/OrderBookDB.cpp | 2 +- src/xrpld/app/ledger/OrderBookDB.h | 2 +- src/xrpld/app/ledger/detail/BuildLedger.cpp | 4 +- src/xrpld/app/ledger/detail/LedgerMaster.cpp | 2 +- src/xrpld/app/ledger/detail/LedgerToJson.cpp | 2 +- .../app/ledger/detail/TimeoutCounter.cpp | 4 +- src/xrpld/app/main/Application.cpp | 2 +- src/xrpld/app/main/Application.h | 2 +- src/xrpld/app/main/GRPCServer.cpp | 4 +- src/xrpld/app/main/GRPCServer.h | 16 +-- src/xrpld/app/main/Main.cpp | 2 +- src/xrpld/app/misc/AMMHelpers.h | 6 +- src/xrpld/app/misc/FeeVoteImpl.cpp | 4 +- src/xrpld/app/misc/HashRouter.cpp | 2 +- src/xrpld/app/misc/NetworkOPs.cpp | 30 ++--- src/xrpld/app/misc/Transaction.h | 2 +- src/xrpld/app/misc/TxQ.h | 4 +- src/xrpld/app/misc/ValidatorList.h | 2 +- src/xrpld/app/misc/ValidatorSite.h | 4 +- src/xrpld/app/misc/detail/AMMUtils.cpp | 6 +- src/xrpld/app/misc/detail/AmendmentTable.cpp | 4 +- src/xrpld/app/misc/detail/TxQ.cpp | 12 +- src/xrpld/app/misc/detail/ValidatorList.cpp | 2 +- src/xrpld/app/paths/AMMLiquidity.h | 2 +- src/xrpld/app/paths/Flow.cpp | 4 +- src/xrpld/app/paths/PathRequest.cpp | 2 +- src/xrpld/app/paths/PathRequest.h | 4 +- src/xrpld/app/paths/Pathfinder.cpp | 9 +- src/xrpld/app/paths/detail/BookStep.cpp | 3 +- src/xrpld/app/paths/detail/DirectStep.cpp | 3 +- .../app/paths/detail/XRPEndpointStep.cpp | 3 +- src/xrpld/app/rdb/RelationalDatabase.h | 4 +- src/xrpld/app/rdb/backend/detail/Node.cpp | 8 +- .../app/rdb/detail/RelationalDatabase.cpp | 2 +- src/xrpld/app/tx/detail/CancelOffer.cpp | 8 +- src/xrpld/app/tx/detail/Change.cpp | 6 +- src/xrpld/app/tx/detail/Transactor.cpp | 6 +- src/xrpld/conditions/detail/error.cpp | 2 +- src/xrpld/conditions/detail/utils.h | 4 +- src/xrpld/consensus/Consensus.h | 20 +-- src/xrpld/consensus/DisputedTx.h | 3 +- src/xrpld/core/Job.h | 8 +- src/xrpld/core/detail/Config.cpp | 4 +- src/xrpld/core/detail/Job.cpp | 8 +- src/xrpld/core/detail/SociDB.cpp | 2 +- src/xrpld/ledger/ReadView.h | 2 +- src/xrpld/ledger/View.h | 12 +- src/xrpld/ledger/detail/RawStateTable.h | 2 +- src/xrpld/ledger/detail/View.cpp | 22 ++-- src/xrpld/net/AutoSocket.h | 14 +-- src/xrpld/net/HTTPClient.h | 12 +- src/xrpld/net/HTTPClientSSLContext.h | 2 +- src/xrpld/net/InfoSub.h | 4 +- src/xrpld/net/RPCCall.h | 6 +- src/xrpld/net/detail/HTTPClient.cpp | 40 +++--- src/xrpld/net/detail/InfoSub.cpp | 4 +- src/xrpld/net/detail/RPCCall.cpp | 16 +-- src/xrpld/net/detail/RPCSub.cpp | 2 +- src/xrpld/net/detail/RegisterSSLCerts.cpp | 2 +- src/xrpld/nodestore/detail/DummyScheduler.cpp | 4 +- src/xrpld/overlay/PeerSet.h | 2 +- src/xrpld/overlay/Slot.h | 2 +- src/xrpld/overlay/detail/PeerImp.cpp | 18 +-- src/xrpld/overlay/detail/PeerImp.h | 2 +- src/xrpld/overlay/detail/PeerSet.cpp | 6 +- src/xrpld/overlay/detail/TrafficCount.h | 2 +- src/xrpld/overlay/detail/ZeroCopyStream.h | 4 +- src/xrpld/peerfinder/detail/Logic.h | 6 +- src/xrpld/perflog/PerfLog.h | 4 +- src/xrpld/rpc/CTID.h | 4 +- src/xrpld/rpc/detail/Handler.cpp | 2 +- src/xrpld/rpc/detail/Handler.h | 2 +- src/xrpld/rpc/detail/RPCHelpers.cpp | 2 +- src/xrpld/rpc/detail/RPCHelpers.h | 2 +- src/xrpld/rpc/detail/ServerHandler.cpp | 2 +- src/xrpld/rpc/detail/TransactionSign.cpp | 7 +- src/xrpld/rpc/handlers/GetAggregatePrice.cpp | 4 +- src/xrpld/rpc/handlers/GetCounts.cpp | 2 +- src/xrpld/rpc/handlers/LedgerEntry.cpp | 2 +- src/xrpld/rpc/handlers/ServerInfo.cpp | 4 +- src/xrpld/rpc/handlers/Simulate.cpp | 2 +- src/xrpld/rpc/handlers/Subscribe.cpp | 2 +- src/xrpld/shamap/SHAMap.h | 4 +- src/xrpld/shamap/SHAMapLeafNode.h | 4 +- src/xrpld/shamap/detail/SHAMap.cpp | 4 +- src/xrpld/shamap/detail/SHAMapInnerNode.cpp | 2 +- src/xrpld/shamap/detail/SHAMapLeafNode.cpp | 2 +- src/xrpld/shamap/detail/SHAMapSync.cpp | 2 +- src/xrpld/shamap/detail/SHAMapTreeNode.cpp | 2 +- 212 files changed, 812 insertions(+), 795 deletions(-) diff --git a/.clang-format b/.clang-format index 9100396885..7b0fda27c9 100644 --- a/.clang-format +++ b/.clang-format @@ -94,3 +94,4 @@ SpacesInSquareBrackets: false Standard: Cpp11 TabWidth: 8 UseTab: Never +QualifierAlignment: Right \ No newline at end of file diff --git a/include/xrpl/basics/BasicConfig.h b/include/xrpl/basics/BasicConfig.h index 2e3478644e..dcd96e7738 100644 --- a/include/xrpl/basics/BasicConfig.h +++ b/include/xrpl/basics/BasicConfig.h @@ -367,7 +367,7 @@ get(Section const& section, } inline std::string -get(Section const& section, std::string const& name, const char* defaultValue) +get(Section const& section, std::string const& name, char const* defaultValue) { try { diff --git a/include/xrpl/basics/CompressionAlgorithms.h b/include/xrpl/basics/CompressionAlgorithms.h index 19db3568d6..4d6cf21cd8 100644 --- a/include/xrpl/basics/CompressionAlgorithms.h +++ b/include/xrpl/basics/CompressionAlgorithms.h @@ -55,7 +55,7 @@ lz4Compress(void const* in, std::size_t inSize, BufferFactory&& bf) auto compressed = bf(outCapacity); auto compressedSize = LZ4_compress_default( - reinterpret_cast(in), + reinterpret_cast(in), reinterpret_cast(compressed), inSize, outCapacity); @@ -89,7 +89,7 @@ lz4Decompress( Throw("lz4Decompress: integer overflow (output)"); if (LZ4_decompress_safe( - reinterpret_cast(in), + reinterpret_cast(in), reinterpret_cast(decompressed), inSize, decompressedSize) != decompressedSize) diff --git a/include/xrpl/basics/Expected.h b/include/xrpl/basics/Expected.h index 9abc7c8432..9afb160d9d 100644 --- a/include/xrpl/basics/Expected.h +++ b/include/xrpl/basics/Expected.h @@ -93,7 +93,7 @@ public: { } - constexpr const E& + constexpr E const& value() const& { return val_; @@ -111,7 +111,7 @@ public: return std::move(val_); } - constexpr const E&& + constexpr E const&& value() const&& { return std::move(val_); diff --git a/include/xrpl/basics/TaggedCache.h b/include/xrpl/basics/TaggedCache.h index 64570ae061..99c91fe393 100644 --- a/include/xrpl/basics/TaggedCache.h +++ b/include/xrpl/basics/TaggedCache.h @@ -115,7 +115,7 @@ public: sweep(); bool - del(const key_type& key, bool valid); + del(key_type const& key, bool valid); public: /** Replace aliased objects with originals. @@ -134,20 +134,20 @@ public: template bool canonicalize( - const key_type& key, + key_type const& key, SharedPointerType& data, R&& replaceCallback); bool canonicalize_replace_cache( - const key_type& key, + key_type const& key, SharedPointerType const& data); bool - canonicalize_replace_client(const key_type& key, SharedPointerType& data); + canonicalize_replace_client(key_type const& key, SharedPointerType& data); SharedPointerType - fetch(const key_type& key); + fetch(key_type const& key); /** Insert the element into the container. If the key already exists, nothing happens. @@ -168,7 +168,7 @@ public: // simply return an iterator. // bool - retrieve(const key_type& key, T& data); + retrieve(key_type const& key, T& data); mutex_type& peekMutex(); @@ -322,10 +322,10 @@ private: std::string m_name; // Desired number of cache entries (0 = ignore) - const int m_target_size; + int const m_target_size; // Desired maximum cache age - const clock_type::duration m_target_age; + clock_type::duration const m_target_age; // Number of items cached int m_cache_count; diff --git a/include/xrpl/basics/TaggedCache.ipp b/include/xrpl/basics/TaggedCache.ipp index 0108061680..16a3f7587a 100644 --- a/include/xrpl/basics/TaggedCache.ipp +++ b/include/xrpl/basics/TaggedCache.ipp @@ -365,7 +365,7 @@ TaggedCache< SharedPointerType, Hash, KeyEqual, - Mutex>::del(const key_type& key, bool valid) + Mutex>::del(key_type const& key, bool valid) { // Remove from cache, if !valid, remove from map too. Returns true if // removed from cache @@ -414,7 +414,7 @@ TaggedCache< KeyEqual, Mutex>:: canonicalize( - const key_type& key, + key_type const& key, SharedPointerType& data, R&& replaceCallback) { @@ -509,7 +509,7 @@ TaggedCache< KeyEqual, Mutex>:: canonicalize_replace_cache( - const key_type& key, + key_type const& key, SharedPointerType const& data) { return canonicalize( @@ -535,7 +535,7 @@ TaggedCache< Hash, KeyEqual, Mutex>:: - canonicalize_replace_client(const key_type& key, SharedPointerType& data) + canonicalize_replace_client(key_type const& key, SharedPointerType& data) { return canonicalize(key, data, []() { return false; }); } @@ -558,7 +558,7 @@ TaggedCache< SharedPointerType, Hash, KeyEqual, - Mutex>::fetch(const key_type& key) + Mutex>::fetch(key_type const& key) { std::lock_guard l(m_mutex); auto ret = initialFetch(key, l); @@ -656,7 +656,7 @@ TaggedCache< SharedPointerType, Hash, KeyEqual, - Mutex>::retrieve(const key_type& key, T& data) + Mutex>::retrieve(key_type const& key, T& data) { // retrieve the value of the stored data auto entry = fetch(key); diff --git a/include/xrpl/basics/base_uint.h b/include/xrpl/basics/base_uint.h index 4b93330ef2..d36bf74c54 100644 --- a/include/xrpl/basics/base_uint.h +++ b/include/xrpl/basics/base_uint.h @@ -374,7 +374,7 @@ public: } base_uint& - operator^=(const base_uint& b) + operator^=(base_uint const& b) { for (int i = 0; i < WIDTH; i++) data_[i] ^= b.data_[i]; @@ -383,7 +383,7 @@ public: } base_uint& - operator&=(const base_uint& b) + operator&=(base_uint const& b) { for (int i = 0; i < WIDTH; i++) data_[i] &= b.data_[i]; @@ -392,7 +392,7 @@ public: } base_uint& - operator|=(const base_uint& b) + operator|=(base_uint const& b) { for (int i = 0; i < WIDTH; i++) data_[i] |= b.data_[i]; @@ -415,11 +415,11 @@ public: return *this; } - const base_uint + base_uint const operator++(int) { // postfix operator - const base_uint ret = *this; + base_uint const ret = *this; ++(*this); return ret; @@ -441,11 +441,11 @@ public: return *this; } - const base_uint + base_uint const operator--(int) { // postfix operator - const base_uint ret = *this; + base_uint const ret = *this; --(*this); return ret; @@ -466,7 +466,7 @@ public: } base_uint& - operator+=(const base_uint& b) + operator+=(base_uint const& b) { std::uint64_t carry = 0; @@ -511,7 +511,7 @@ public: } [[nodiscard]] constexpr bool - parseHex(const char* str) + parseHex(char const* str) { return parseHex(std::string_view{str}); } diff --git a/include/xrpl/basics/comparators.h b/include/xrpl/basics/comparators.h index ce782dcd39..0e5f11e9e5 100644 --- a/include/xrpl/basics/comparators.h +++ b/include/xrpl/basics/comparators.h @@ -43,7 +43,7 @@ struct less using result_type = bool; constexpr bool - operator()(const T& left, const T& right) const + operator()(T const& left, T const& right) const { return std::less()(left, right); } @@ -55,7 +55,7 @@ struct equal_to using result_type = bool; constexpr bool - operator()(const T& left, const T& right) const + operator()(T const& left, T const& right) const { return std::equal_to()(left, right); } diff --git a/include/xrpl/basics/partitioned_unordered_map.h b/include/xrpl/basics/partitioned_unordered_map.h index a378011520..4e503ad0fa 100644 --- a/include/xrpl/basics/partitioned_unordered_map.h +++ b/include/xrpl/basics/partitioned_unordered_map.h @@ -52,7 +52,7 @@ template < typename Value, typename Hash, typename Pred = std::equal_to, - typename Alloc = std::allocator>> + typename Alloc = std::allocator>> class partitioned_unordered_map { std::size_t partitions_; diff --git a/include/xrpl/basics/tagged_integer.h b/include/xrpl/basics/tagged_integer.h index b826b87db1..471fa8eb1e 100644 --- a/include/xrpl/basics/tagged_integer.h +++ b/include/xrpl/basics/tagged_integer.h @@ -76,13 +76,13 @@ public: } bool - operator<(const tagged_integer& rhs) const noexcept + operator<(tagged_integer const& rhs) const noexcept { return m_value < rhs.m_value; } bool - operator==(const tagged_integer& rhs) const noexcept + operator==(tagged_integer const& rhs) const noexcept { return m_value == rhs.m_value; } @@ -144,14 +144,14 @@ public: } tagged_integer& - operator<<=(const tagged_integer& rhs) noexcept + operator<<=(tagged_integer const& rhs) noexcept { m_value <<= rhs.m_value; return *this; } tagged_integer& - operator>>=(const tagged_integer& rhs) noexcept + operator>>=(tagged_integer const& rhs) noexcept { m_value >>= rhs.m_value; return *this; diff --git a/include/xrpl/beast/utility/temp_dir.h b/include/xrpl/beast/utility/temp_dir.h index bbb7afc7b4..074b7461a4 100644 --- a/include/xrpl/beast/utility/temp_dir.h +++ b/include/xrpl/beast/utility/temp_dir.h @@ -37,9 +37,9 @@ class temp_dir public: #if !GENERATING_DOCS - temp_dir(const temp_dir&) = delete; + temp_dir(temp_dir const&) = delete; temp_dir& - operator=(const temp_dir&) = delete; + operator=(temp_dir const&) = delete; #endif /// Construct a temporary directory. diff --git a/include/xrpl/json/json_reader.h b/include/xrpl/json/json_reader.h index 27d608f850..81866819a5 100644 --- a/include/xrpl/json/json_reader.h +++ b/include/xrpl/json/json_reader.h @@ -39,7 +39,7 @@ class Reader { public: using Char = char; - using Location = const Char*; + using Location = Char const*; /** \brief Constructs a Reader allowing all features * for parsing. @@ -64,7 +64,7 @@ public: * error occurred. */ bool - parse(const char* beginDoc, const char* endDoc, Value& root); + parse(char const* beginDoc, char const* endDoc, Value& root); /// \brief Parse from input stream. /// \see Json::operator>>(std::istream&, Json::Value&). @@ -133,7 +133,7 @@ private: using Errors = std::deque; bool - expectToken(TokenType type, Token& token, const char* message); + expectToken(TokenType type, Token& token, char const* message); bool readToken(Token& token); void diff --git a/include/xrpl/json/json_value.h b/include/xrpl/json/json_value.h index 91d29c28d6..3431ab7744 100644 --- a/include/xrpl/json/json_value.h +++ b/include/xrpl/json/json_value.h @@ -61,24 +61,24 @@ enum ValueType { class StaticString { public: - constexpr explicit StaticString(const char* czstring) : str_(czstring) + constexpr explicit StaticString(char const* czstring) : str_(czstring) { } constexpr - operator const char*() const + operator char const*() const { return str_; } - constexpr const char* + constexpr char const* c_str() const { return str_; } private: - const char* str_; + char const* str_; }; inline bool @@ -156,10 +156,10 @@ public: using Int = Json::Int; using ArrayIndex = UInt; - static const Value null; - static const Int minInt; - static const Int maxInt; - static const UInt maxUInt; + static Value const null; + static Int const minInt; + static Int const maxInt; + static UInt const maxUInt; private: class CZString @@ -171,24 +171,24 @@ private: duplicateOnCopy }; CZString(int index); - CZString(const char* cstr, DuplicationPolicy allocate); - CZString(const CZString& other); + CZString(char const* cstr, DuplicationPolicy allocate); + CZString(CZString const& other); ~CZString(); CZString& - operator=(const CZString& other) = delete; + operator=(CZString const& other) = delete; bool - operator<(const CZString& other) const; + operator<(CZString const& other) const; bool - operator==(const CZString& other) const; + operator==(CZString const& other) const; int index() const; - const char* + char const* c_str() const; bool isStaticString() const; private: - const char* cstr_; + char const* cstr_; int index_; }; @@ -215,7 +215,7 @@ public: Value(Int value); Value(UInt value); Value(double value); - Value(const char* value); + Value(char const* value); /** \brief Constructs a value from a static string. * Like other value string constructor but do not duplicate the string for @@ -227,10 +227,10 @@ public: * Json::Value aValue( StaticString("some text") ); * \endcode */ - Value(const StaticString& value); + Value(StaticString const& value); Value(std::string const& value); Value(bool value); - Value(const Value& other); + Value(Value const& other); ~Value(); Value& @@ -247,7 +247,7 @@ public: ValueType type() const; - const char* + char const* asCString() const; /** Returns the unquoted string value. */ std::string @@ -317,12 +317,12 @@ public: /// Access an array element (zero based index ) /// (You may need to say 'value[0u]' to get your compiler to distinguish /// this from the operator[] which takes a string.) - const Value& + Value const& operator[](UInt index) const; /// If the array contains at least index+1 elements, returns the element /// value, otherwise returns defaultValue. Value - get(UInt index, const Value& defaultValue) const; + get(UInt index, Value const& defaultValue) const; /// Return true if index < size(). bool isValidIndex(UInt index) const; @@ -330,25 +330,25 @@ public: /// /// Equivalent to jsonvalue[jsonvalue.size()] = value; Value& - append(const Value& value); + append(Value const& value); Value& append(Value&& value); /// Access an object value by name, create a null member if it does not /// exist. Value& - operator[](const char* key); + operator[](char const* key); /// Access an object value by name, returns null if there is no member with /// that name. - const Value& - operator[](const char* key) const; + Value const& + operator[](char const* key) const; /// Access an object value by name, create a null member if it does not /// exist. Value& operator[](std::string const& key); /// Access an object value by name, returns null if there is no member with /// that name. - const Value& + Value const& operator[](std::string const& key) const; /** \brief Access an object value by name, create a null member if it does not exist. @@ -364,14 +364,14 @@ public: * \endcode */ Value& - operator[](const StaticString& key); + operator[](StaticString const& key); /// Return the member named key if it exist, defaultValue otherwise. Value - get(const char* key, const Value& defaultValue) const; + get(char const* key, Value const& defaultValue) const; /// Return the member named key if it exist, defaultValue otherwise. Value - get(std::string const& key, const Value& defaultValue) const; + get(std::string const& key, Value const& defaultValue) const; /// \brief Remove and return the named member. /// @@ -380,14 +380,14 @@ public: /// \pre type() is objectValue or nullValue /// \post type() is unchanged Value - removeMember(const char* key); + removeMember(char const* key); /// Same as removeMember(const char*) Value removeMember(std::string const& key); /// Return true if the object has a member named key. bool - isMember(const char* key) const; + isMember(char const* key) const; /// Return true if the object has a member named key. bool isMember(std::string const& key) const; @@ -414,13 +414,13 @@ public: end(); friend bool - operator==(const Value&, const Value&); + operator==(Value const&, Value const&); friend bool - operator<(const Value&, const Value&); + operator<(Value const&, Value const&); private: Value& - resolveReference(const char* key, bool isStatic); + resolveReference(char const* key, bool isStatic); private: union ValueHolder @@ -437,31 +437,31 @@ private: }; bool -operator==(const Value&, const Value&); +operator==(Value const&, Value const&); inline bool -operator!=(const Value& x, const Value& y) +operator!=(Value const& x, Value const& y) { return !(x == y); } bool -operator<(const Value&, const Value&); +operator<(Value const&, Value const&); inline bool -operator<=(const Value& x, const Value& y) +operator<=(Value const& x, Value const& y) { return !(y < x); } inline bool -operator>(const Value& x, const Value& y) +operator>(Value const& x, Value const& y) { return y < x; } inline bool -operator>=(const Value& x, const Value& y) +operator>=(Value const& x, Value const& y) { return !(x < y); } @@ -482,11 +482,11 @@ public: virtual ~ValueAllocator() = default; virtual char* - makeMemberName(const char* memberName) = 0; + makeMemberName(char const* memberName) = 0; virtual void releaseMemberName(char* memberName) = 0; virtual char* - duplicateStringValue(const char* value, unsigned int length = unknown) = 0; + duplicateStringValue(char const* value, unsigned int length = unknown) = 0; virtual void releaseStringValue(char* value) = 0; }; @@ -503,16 +503,16 @@ public: ValueIteratorBase(); - explicit ValueIteratorBase(const Value::ObjectValues::iterator& current); + explicit ValueIteratorBase(Value::ObjectValues::iterator const& current); bool - operator==(const SelfType& other) const + operator==(SelfType const& other) const { return isEqual(other); } bool - operator!=(const SelfType& other) const + operator!=(SelfType const& other) const { return !isEqual(other); } @@ -528,7 +528,7 @@ public: /// Return the member name of the referenced Value. "" if it is not an /// objectValue. - const char* + char const* memberName() const; protected: @@ -542,13 +542,13 @@ protected: decrement(); difference_type - computeDistance(const SelfType& other) const; + computeDistance(SelfType const& other) const; bool - isEqual(const SelfType& other) const; + isEqual(SelfType const& other) const; void - copy(const SelfType& other); + copy(SelfType const& other); private: Value::ObjectValues::iterator current_; @@ -566,8 +566,8 @@ class ValueConstIterator : public ValueIteratorBase public: using size_t = unsigned int; using difference_type = int; - using reference = const Value&; - using pointer = const Value*; + using reference = Value const&; + using pointer = Value const*; using SelfType = ValueConstIterator; ValueConstIterator() = default; @@ -575,11 +575,11 @@ public: private: /*! \internal Use by Value to create an iterator. */ - explicit ValueConstIterator(const Value::ObjectValues::iterator& current); + explicit ValueConstIterator(Value::ObjectValues::iterator const& current); public: SelfType& - operator=(const ValueIteratorBase& other); + operator=(ValueIteratorBase const& other); SelfType operator++(int) @@ -632,17 +632,17 @@ public: using SelfType = ValueIterator; ValueIterator() = default; - ValueIterator(const ValueConstIterator& other); - ValueIterator(const ValueIterator& other); + ValueIterator(ValueConstIterator const& other); + ValueIterator(ValueIterator const& other); private: /*! \internal Use by Value to create an iterator. */ - explicit ValueIterator(const Value::ObjectValues::iterator& current); + explicit ValueIterator(Value::ObjectValues::iterator const& current); public: SelfType& - operator=(const SelfType& other); + operator=(SelfType const& other); SelfType operator++(int) diff --git a/include/xrpl/json/json_writer.h b/include/xrpl/json/json_writer.h index 1b4ff15508..7e21e766e3 100644 --- a/include/xrpl/json/json_writer.h +++ b/include/xrpl/json/json_writer.h @@ -39,7 +39,7 @@ public: { } virtual std::string - write(const Value& root) = 0; + write(Value const& root) = 0; }; /** \brief Outputs a Value in JSON format @@ -60,11 +60,11 @@ public: public: // overridden from Writer std::string - write(const Value& root) override; + write(Value const& root) override; private: void - writeValue(const Value& value); + writeValue(Value const& value); std::string document_; }; @@ -101,15 +101,15 @@ public: // overridden from Writer * JSON document that represents the root value. */ std::string - write(const Value& root) override; + write(Value const& root) override; private: void - writeValue(const Value& value); + writeValue(Value const& value); void - writeArrayValue(const Value& value); + writeArrayValue(Value const& value); bool - isMultineArray(const Value& value); + isMultineArray(Value const& value); void pushValue(std::string const& value); void @@ -168,15 +168,15 @@ public: * return a value. */ void - write(std::ostream& out, const Value& root); + write(std::ostream& out, Value const& root); private: void - writeValue(const Value& value); + writeValue(Value const& value); void - writeArrayValue(const Value& value); + writeArrayValue(Value const& value); bool - isMultineArray(const Value& value); + isMultineArray(Value const& value); void pushValue(std::string const& value); void @@ -207,12 +207,12 @@ valueToString(double value); std::string valueToString(bool value); std::string -valueToQuotedString(const char* value); +valueToQuotedString(char const* value); /// \brief Output using the StyledStreamWriter. /// \see Json::operator>>() std::ostream& -operator<<(std::ostream&, const Value& root); +operator<<(std::ostream&, Value const& root); //------------------------------------------------------------------------------ diff --git a/include/xrpl/json/to_string.h b/include/xrpl/json/to_string.h index 5f692a415e..5b20293f06 100644 --- a/include/xrpl/json/to_string.h +++ b/include/xrpl/json/to_string.h @@ -37,7 +37,7 @@ pretty(Value const&); /** Output using the StyledStreamWriter. @see Json::operator>>(). */ std::ostream& -operator<<(std::ostream&, const Value& root); +operator<<(std::ostream&, Value const& root); } // namespace Json diff --git a/include/xrpl/protocol/FeeUnits.h b/include/xrpl/protocol/FeeUnits.h index 0cbf1b608a..c6949a434c 100644 --- a/include/xrpl/protocol/FeeUnits.h +++ b/include/xrpl/protocol/FeeUnits.h @@ -336,7 +336,7 @@ public: // Output Fees as just their numeric value. template std::basic_ostream& -operator<<(std::basic_ostream& os, const TaggedFee& q) +operator<<(std::basic_ostream& os, TaggedFee const& q) { return os << q.value(); } diff --git a/include/xrpl/protocol/MultiApiJson.h b/include/xrpl/protocol/MultiApiJson.h index 15743e856b..1e35bdbda2 100644 --- a/include/xrpl/protocol/MultiApiJson.h +++ b/include/xrpl/protocol/MultiApiJson.h @@ -80,7 +80,7 @@ struct MultiApiJson } void - set(const char* key, auto const& v) + set(char const* key, auto const& v) requires std::constructible_from { for (auto& a : this->val) @@ -91,7 +91,7 @@ struct MultiApiJson enum IsMemberResult : int { none = 0, some, all }; [[nodiscard]] IsMemberResult - isMember(const char* key) const + isMember(char const* key) const { int count = 0; for (auto& a : this->val) diff --git a/include/xrpl/protocol/Permissions.h b/include/xrpl/protocol/Permissions.h index eb2c733313..8ba53d94d7 100644 --- a/include/xrpl/protocol/Permissions.h +++ b/include/xrpl/protocol/Permissions.h @@ -67,9 +67,9 @@ public: static Permission const& getInstance(); - Permission(const Permission&) = delete; + Permission(Permission const&) = delete; Permission& - operator=(const Permission&) = delete; + operator=(Permission const&) = delete; std::optional getGranularValue(std::string const& name) const; @@ -85,7 +85,7 @@ public: // for tx level permission, permission value is equal to tx type plus one uint32_t - txToPermissionType(const TxType& type) const; + txToPermissionType(TxType const& type) const; // tx type value is permission value minus one TxType diff --git a/include/xrpl/protocol/Quality.h b/include/xrpl/protocol/Quality.h index 6783fbf6da..f1a3a58224 100644 --- a/include/xrpl/protocol/Quality.h +++ b/include/xrpl/protocol/Quality.h @@ -113,8 +113,8 @@ public: // have lower unsigned integer representations. using value_type = std::uint64_t; - static const int minTickSize = 3; - static const int maxTickSize = 16; + static int const minTickSize = 3; + static int const maxTickSize = 16; private: // This has the same representation as STAmount, see the comment on the diff --git a/include/xrpl/protocol/SField.h b/include/xrpl/protocol/SField.h index 01909b1986..04d4dc82fc 100644 --- a/include/xrpl/protocol/SField.h +++ b/include/xrpl/protocol/SField.h @@ -182,22 +182,22 @@ public: private_access_tag_t, SerializedTypeID tid, int fv, - const char* fn, + char const* fn, int meta = sMD_Default, IsSigning signing = IsSigning::yes); explicit SField(private_access_tag_t, int fc); - static const SField& + static SField const& getField(int fieldCode); - static const SField& + static SField const& getField(std::string const& fieldName); - static const SField& + static SField const& getField(int type, int value) { return getField(field_code(type, value)); } - static const SField& + static SField const& getField(SerializedTypeID type, int value) { return getField(field_code(type, value)); @@ -284,19 +284,19 @@ public: } bool - operator==(const SField& f) const + operator==(SField const& f) const { return fieldCode == f.fieldCode; } bool - operator!=(const SField& f) const + operator!=(SField const& f) const { return fieldCode != f.fieldCode; } static int - compare(const SField& f1, const SField& f2); + compare(SField const& f1, SField const& f2); static std::map const& getKnownCodeToField() diff --git a/include/xrpl/protocol/STAccount.h b/include/xrpl/protocol/STAccount.h index 537a336e5d..422a01defa 100644 --- a/include/xrpl/protocol/STAccount.h +++ b/include/xrpl/protocol/STAccount.h @@ -58,7 +58,7 @@ public: add(Serializer& s) const override; bool - isEquivalent(const STBase& t) const override; + isEquivalent(STBase const& t) const override; bool isDefault() const override; diff --git a/include/xrpl/protocol/STAmount.h b/include/xrpl/protocol/STAmount.h index 23e4c5e5b5..518edacf0b 100644 --- a/include/xrpl/protocol/STAmount.h +++ b/include/xrpl/protocol/STAmount.h @@ -62,20 +62,20 @@ private: public: using value_type = STAmount; - static const int cMinOffset = -96; - static const int cMaxOffset = 80; + static int const cMinOffset = -96; + static int const cMaxOffset = 80; // Maximum native value supported by the code - static const std::uint64_t cMinValue = 1000000000000000ull; - static const std::uint64_t cMaxValue = 9999999999999999ull; - static const std::uint64_t cMaxNative = 9000000000000000000ull; + static std::uint64_t const cMinValue = 1000000000000000ull; + static std::uint64_t const cMaxValue = 9999999999999999ull; + static std::uint64_t const cMaxNative = 9000000000000000000ull; // Max native value on network. - static const std::uint64_t cMaxNativeN = 100000000000000000ull; - static const std::uint64_t cIssuedCurrency = 0x8000000000000000ull; - static const std::uint64_t cPositive = 0x4000000000000000ull; - static const std::uint64_t cMPToken = 0x2000000000000000ull; - static const std::uint64_t cValueMask = ~(cPositive | cMPToken); + static std::uint64_t const cMaxNativeN = 100000000000000000ull; + static std::uint64_t const cIssuedCurrency = 0x8000000000000000ull; + static std::uint64_t const cPositive = 0x4000000000000000ull; + static std::uint64_t const cMPToken = 0x2000000000000000ull; + static std::uint64_t const cValueMask = ~(cPositive | cMPToken); static std::uint64_t const uRateOne; @@ -274,7 +274,7 @@ public: add(Serializer& s) const override; bool - isEquivalent(const STBase& t) const override; + isEquivalent(STBase const& t) const override; bool isDefault() const override; diff --git a/include/xrpl/protocol/STArray.h b/include/xrpl/protocol/STArray.h index 7fa2ecad83..8f1e2dd0ee 100644 --- a/include/xrpl/protocol/STArray.h +++ b/include/xrpl/protocol/STArray.h @@ -128,13 +128,13 @@ public: add(Serializer& s) const override; void - sort(bool (*compare)(const STObject& o1, const STObject& o2)); + sort(bool (*compare)(STObject const& o1, STObject const& o2)); bool - operator==(const STArray& s) const; + operator==(STArray const& s) const; bool - operator!=(const STArray& s) const; + operator!=(STArray const& s) const; iterator erase(iterator pos); @@ -152,7 +152,7 @@ public: getSType() const override; bool - isEquivalent(const STBase& t) const override; + isEquivalent(STBase const& t) const override; bool isDefault() const override; @@ -275,13 +275,13 @@ STArray::swap(STArray& a) noexcept } inline bool -STArray::operator==(const STArray& s) const +STArray::operator==(STArray const& s) const { return v_ == s.v_; } inline bool -STArray::operator!=(const STArray& s) const +STArray::operator!=(STArray const& s) const { return v_ != s.v_; } diff --git a/include/xrpl/protocol/STBase.h b/include/xrpl/protocol/STBase.h index e0f28e03de..8d0aaabe48 100644 --- a/include/xrpl/protocol/STBase.h +++ b/include/xrpl/protocol/STBase.h @@ -129,16 +129,16 @@ class STBase public: virtual ~STBase() = default; STBase(); - STBase(const STBase&) = default; + STBase(STBase const&) = default; STBase& - operator=(const STBase& t); + operator=(STBase const& t); explicit STBase(SField const& n); bool - operator==(const STBase& t) const; + operator==(STBase const& t) const; bool - operator!=(const STBase& t) const; + operator!=(STBase const& t) const; template D& @@ -197,7 +197,7 @@ private: //------------------------------------------------------------------------------ std::ostream& -operator<<(std::ostream& out, const STBase& t); +operator<<(std::ostream& out, STBase const& t); template D& diff --git a/include/xrpl/protocol/STBitString.h b/include/xrpl/protocol/STBitString.h index bf4ce84a3f..7d41637c89 100644 --- a/include/xrpl/protocol/STBitString.h +++ b/include/xrpl/protocol/STBitString.h @@ -45,8 +45,8 @@ public: STBitString() = default; STBitString(SField const& n); - STBitString(const value_type& v); - STBitString(SField const& n, const value_type& v); + STBitString(value_type const& v); + STBitString(SField const& n, value_type const& v); STBitString(SerialIter& sit, SField const& name); SerializedTypeID @@ -56,7 +56,7 @@ public: getText() const override; bool - isEquivalent(const STBase& t) const override; + isEquivalent(STBase const& t) const override; void add(Serializer& s) const override; @@ -93,12 +93,12 @@ inline STBitString::STBitString(SField const& n) : STBase(n) } template -inline STBitString::STBitString(const value_type& v) : value_(v) +inline STBitString::STBitString(value_type const& v) : value_(v) { } template -inline STBitString::STBitString(SField const& n, const value_type& v) +inline STBitString::STBitString(SField const& n, value_type const& v) : STBase(n), value_(v) { } @@ -160,9 +160,9 @@ STBitString::getText() const template bool -STBitString::isEquivalent(const STBase& t) const +STBitString::isEquivalent(STBase const& t) const { - const STBitString* v = dynamic_cast(&t); + STBitString const* v = dynamic_cast(&t); return v && (value_ == v->value_); } diff --git a/include/xrpl/protocol/STBlob.h b/include/xrpl/protocol/STBlob.h index cfe4ab5af5..80832b2688 100644 --- a/include/xrpl/protocol/STBlob.h +++ b/include/xrpl/protocol/STBlob.h @@ -63,7 +63,7 @@ public: add(Serializer& s) const override; bool - isEquivalent(const STBase& t) const override; + isEquivalent(STBase const& t) const override; bool isDefault() const override; diff --git a/include/xrpl/protocol/STCurrency.h b/include/xrpl/protocol/STCurrency.h index 3383137fb3..90a6589048 100644 --- a/include/xrpl/protocol/STCurrency.h +++ b/include/xrpl/protocol/STCurrency.h @@ -65,7 +65,7 @@ public: add(Serializer& s) const override; bool - isEquivalent(const STBase& t) const override; + isEquivalent(STBase const& t) const override; bool isDefault() const override; diff --git a/include/xrpl/protocol/STInteger.h b/include/xrpl/protocol/STInteger.h index 68e25be1c9..b259638774 100644 --- a/include/xrpl/protocol/STInteger.h +++ b/include/xrpl/protocol/STInteger.h @@ -54,7 +54,7 @@ public: isDefault() const override; bool - isEquivalent(const STBase& t) const override; + isEquivalent(STBase const& t) const override; STInteger& operator=(value_type const& v); @@ -127,9 +127,9 @@ STInteger::isDefault() const template inline bool -STInteger::isEquivalent(const STBase& t) const +STInteger::isEquivalent(STBase const& t) const { - const STInteger* v = dynamic_cast(&t); + STInteger const* v = dynamic_cast(&t); return v && (value_ == v->value_); } diff --git a/include/xrpl/protocol/STIssue.h b/include/xrpl/protocol/STIssue.h index 08812c15ae..c729854e1b 100644 --- a/include/xrpl/protocol/STIssue.h +++ b/include/xrpl/protocol/STIssue.h @@ -71,7 +71,7 @@ public: add(Serializer& s) const override; bool - isEquivalent(const STBase& t) const override; + isEquivalent(STBase const& t) const override; bool isDefault() const override; diff --git a/include/xrpl/protocol/STLedgerEntry.h b/include/xrpl/protocol/STLedgerEntry.h index 96b37af0b9..3609a04d4b 100644 --- a/include/xrpl/protocol/STLedgerEntry.h +++ b/include/xrpl/protocol/STLedgerEntry.h @@ -35,7 +35,7 @@ class STLedgerEntry final : public STObject, public CountedObject public: using pointer = std::shared_ptr; - using ref = const std::shared_ptr&; + using ref = std::shared_ptr const&; /** Create an empty object with the given key and type. */ explicit STLedgerEntry(Keylet const& k); diff --git a/include/xrpl/protocol/STObject.h b/include/xrpl/protocol/STObject.h index b89a415ebe..2efa828267 100644 --- a/include/xrpl/protocol/STObject.h +++ b/include/xrpl/protocol/STObject.h @@ -99,8 +99,8 @@ public: STObject& operator=(STObject&& other); - STObject(const SOTemplate& type, SField const& name); - STObject(const SOTemplate& type, SerialIter& sit, SField const& name); + STObject(SOTemplate const& type, SField const& name); + STObject(SOTemplate const& type, SerialIter& sit, SField const& name); STObject(SerialIter& sit, SField const& name, int depth = 0); STObject(SerialIter&& sit, SField const& name); explicit STObject(SField const& name); @@ -121,7 +121,7 @@ public: reserve(std::size_t n); void - applyTemplate(const SOTemplate& type); + applyTemplate(SOTemplate const& type); void applyTemplateFromSField(SField const&); @@ -130,7 +130,7 @@ public: isFree() const; void - set(const SOTemplate&); + set(SOTemplate const&); bool set(SerialIter& u, int depth = 0); @@ -139,7 +139,7 @@ public: getSType() const override; bool - isEquivalent(const STBase& t) const override; + isEquivalent(STBase const& t) const override; bool isDefault() const override; @@ -183,13 +183,13 @@ public: uint256 getSigningHash(HashPrefix prefix) const; - const STBase& + STBase const& peekAtIndex(int offset) const; STBase& getIndex(int offset); - const STBase* + STBase const* peekAtPIndex(int offset) const; STBase* @@ -201,13 +201,13 @@ public: SField const& getFieldSType(int index) const; - const STBase& + STBase const& peekAtField(SField const& field) const; STBase& getField(SField const& field); - const STBase* + STBase const* peekAtPField(SField const& field) const; STBase* @@ -241,11 +241,11 @@ public: getFieldAmount(SField const& field) const; STPathSet const& getFieldPathSet(SField const& field) const; - const STVector256& + STVector256 const& getFieldV256(SField const& field) const; - const STArray& + STArray const& getFieldArray(SField const& field) const; - const STCurrency& + STCurrency const& getFieldCurrency(SField const& field) const; STNumber const& getFieldNumber(SField const& field) const; @@ -409,12 +409,12 @@ public: delField(int index); bool - hasMatchingEntry(const STBase&); + hasMatchingEntry(STBase const&); bool - operator==(const STObject& o) const; + operator==(STObject const& o) const; bool - operator!=(const STObject& o) const; + operator!=(STObject const& o) const; class FieldErr; @@ -970,7 +970,7 @@ STObject::getCount() const return v_.size(); } -inline const STBase& +inline STBase const& STObject::peekAtIndex(int offset) const { return v_[offset].get(); @@ -982,7 +982,7 @@ STObject::getIndex(int offset) return v_[offset].get(); } -inline const STBase* +inline STBase const* STObject::peekAtPIndex(int offset) const { return &v_[offset].get(); @@ -1117,7 +1117,7 @@ STObject::setFieldH160(SField const& field, base_uint<160, Tag> const& v) } inline bool -STObject::operator!=(const STObject& o) const +STObject::operator!=(STObject const& o) const { return !(*this == o); } @@ -1126,7 +1126,7 @@ template V STObject::getFieldByValue(SField const& field) const { - const STBase* rf = peekAtPField(field); + STBase const* rf = peekAtPField(field); if (!rf) throwFieldNotFound(field); @@ -1136,7 +1136,7 @@ STObject::getFieldByValue(SField const& field) const if (id == STI_NOTPRESENT) return V(); // optional field not present - const T* cf = dynamic_cast(rf); + T const* cf = dynamic_cast(rf); if (!cf) Throw("Wrong field type"); @@ -1153,7 +1153,7 @@ template V const& STObject::getFieldByConstRef(SField const& field, V const& empty) const { - const STBase* rf = peekAtPField(field); + STBase const* rf = peekAtPField(field); if (!rf) throwFieldNotFound(field); @@ -1163,7 +1163,7 @@ STObject::getFieldByConstRef(SField const& field, V const& empty) const if (id == STI_NOTPRESENT) return empty; // optional field not present - const T* cf = dynamic_cast(rf); + T const* cf = dynamic_cast(rf); if (!cf) Throw("Wrong field type"); diff --git a/include/xrpl/protocol/STPathSet.h b/include/xrpl/protocol/STPathSet.h index 7605a2283c..c56dd43e7f 100644 --- a/include/xrpl/protocol/STPathSet.h +++ b/include/xrpl/protocol/STPathSet.h @@ -106,10 +106,10 @@ public: getIssuerID() const; bool - operator==(const STPathElement& t) const; + operator==(STPathElement const& t) const; bool - operator!=(const STPathElement& t) const; + operator!=(STPathElement const& t) const; private: static std::size_t @@ -164,7 +164,7 @@ public: STPathElement& operator[](int i); - const STPathElement& + STPathElement const& operator[](int i) const; void @@ -196,7 +196,7 @@ public: assembleAdd(STPath const& base, STPathElement const& tail); bool - isEquivalent(const STBase& t) const override; + isEquivalent(STBase const& t) const override; bool isDefault() const override; @@ -375,7 +375,7 @@ STPathElement::getIssuerID() const } inline bool -STPathElement::operator==(const STPathElement& t) const +STPathElement::operator==(STPathElement const& t) const { return (mType & typeAccount) == (t.mType & typeAccount) && hash_value_ == t.hash_value_ && mAccountID == t.mAccountID && @@ -383,7 +383,7 @@ STPathElement::operator==(const STPathElement& t) const } inline bool -STPathElement::operator!=(const STPathElement& t) const +STPathElement::operator!=(STPathElement const& t) const { return !operator==(t); } @@ -455,7 +455,7 @@ STPath::operator[](int i) return mPath[i]; } -inline const STPathElement& +inline STPathElement const& STPath::operator[](int i) const { return mPath[i]; diff --git a/include/xrpl/protocol/STVector256.h b/include/xrpl/protocol/STVector256.h index d81ddf977f..bc22ebdc7f 100644 --- a/include/xrpl/protocol/STVector256.h +++ b/include/xrpl/protocol/STVector256.h @@ -50,7 +50,7 @@ public: Json::Value getJson(JsonOptions) const override; bool - isEquivalent(const STBase& t) const override; + isEquivalent(STBase const& t) const override; bool isDefault() const override; @@ -62,7 +62,7 @@ public: operator=(std::vector&& v); void - setValue(const STVector256& v); + setValue(STVector256 const& v); /** Retrieve a copy of the vector we contain */ explicit @@ -153,7 +153,7 @@ STVector256::operator=(std::vector&& v) } inline void -STVector256::setValue(const STVector256& v) +STVector256::setValue(STVector256 const& v) { mValue = v.mValue; } diff --git a/include/xrpl/protocol/STXChainBridge.h b/include/xrpl/protocol/STXChainBridge.h index 813bcc4443..4435857a51 100644 --- a/include/xrpl/protocol/STXChainBridge.h +++ b/include/xrpl/protocol/STXChainBridge.h @@ -107,7 +107,7 @@ public: add(Serializer& s) const override; bool - isEquivalent(const STBase& t) const override; + isEquivalent(STBase const& t) const override; bool isDefault() const override; diff --git a/include/xrpl/protocol/Serializer.h b/include/xrpl/protocol/Serializer.h index 5724a19f57..9c77aa4111 100644 --- a/include/xrpl/protocol/Serializer.h +++ b/include/xrpl/protocol/Serializer.h @@ -139,9 +139,9 @@ public: int addRaw(Slice slice); int - addRaw(const void* ptr, int len); + addRaw(void const* ptr, int len); int - addRaw(const Serializer& s); + addRaw(Serializer const& s); int addVL(Blob const& vector); @@ -151,7 +151,7 @@ public: int addVL(Iter begin, Iter end, int len); int - addVL(const void* ptr, int len); + addVL(void const* ptr, int len); // disassemble functions bool @@ -161,7 +161,7 @@ public: bool getInteger(Integer& number, int offset) { - static const auto bytes = sizeof(Integer); + static auto const bytes = sizeof(Integer); if ((offset + bytes) > mData.size()) return false; number = 0; @@ -220,7 +220,7 @@ public: { return mData.size(); } - const void* + void const* getDataPtr() const { return mData.data(); @@ -238,7 +238,7 @@ public: std::string getString() const { - return std::string(static_cast(getDataPtr()), size()); + return std::string(static_cast(getDataPtr()), size()); } void erase() @@ -296,12 +296,12 @@ public: return v != mData; } bool - operator==(const Serializer& v) const + operator==(Serializer const& v) const { return v.mData == mData; } bool - operator!=(const Serializer& v) const + operator!=(Serializer const& v) const { return v.mData != mData; } diff --git a/include/xrpl/protocol/XRPAmount.h b/include/xrpl/protocol/XRPAmount.h index 1d6cae9ecf..332735dc6f 100644 --- a/include/xrpl/protocol/XRPAmount.h +++ b/include/xrpl/protocol/XRPAmount.h @@ -267,7 +267,7 @@ XRPAmount::decimalXRP() const // Output XRPAmount as just the drops value. template std::basic_ostream& -operator<<(std::basic_ostream& os, const XRPAmount& q) +operator<<(std::basic_ostream& os, XRPAmount const& q) { return os << q.drops(); } diff --git a/include/xrpl/protocol/detail/token_errors.h b/include/xrpl/protocol/detail/token_errors.h index 23a46bd1c5..0ae2728024 100644 --- a/include/xrpl/protocol/detail/token_errors.h +++ b/include/xrpl/protocol/detail/token_errors.h @@ -50,7 +50,7 @@ class TokenCodecErrcCategory : public std::error_category { public: // Return a short descriptive name for the category - virtual const char* + virtual char const* name() const noexcept override final { return "TokenCodecError"; @@ -86,7 +86,7 @@ public: }; } // namespace detail -inline const ripple::detail::TokenCodecErrcCategory& +inline ripple::detail::TokenCodecErrcCategory const& TokenCodecErrcCategory() { static ripple::detail::TokenCodecErrcCategory c; diff --git a/include/xrpl/protocol/json_get_or_throw.h b/include/xrpl/protocol/json_get_or_throw.h index 5277ee8648..c59b5a71a3 100644 --- a/include/xrpl/protocol/json_get_or_throw.h +++ b/include/xrpl/protocol/json_get_or_throw.h @@ -20,7 +20,7 @@ struct JsonMissingKeyError : std::exception JsonMissingKeyError(Json::StaticString const& k) : key{k.c_str()} { } - const char* + char const* what() const noexcept override { if (msg.empty()) @@ -40,7 +40,7 @@ struct JsonTypeMismatchError : std::exception : key{k.c_str()}, expectedType{std::move(et)} { } - const char* + char const* what() const noexcept override { if (msg.empty()) diff --git a/src/libxrpl/basics/Archive.cpp b/src/libxrpl/basics/Archive.cpp index 2ddbc8200c..e60b2f12cf 100644 --- a/src/libxrpl/basics/Archive.cpp +++ b/src/libxrpl/basics/Archive.cpp @@ -94,7 +94,7 @@ extractTarLz4( if (archive_entry_size(entry) > 0) { - const void* buf; + void const* buf; size_t sz; la_int64_t offset; while (true) diff --git a/src/libxrpl/basics/FileUtilities.cpp b/src/libxrpl/basics/FileUtilities.cpp index c0456b0556..291eb43c7b 100644 --- a/src/libxrpl/basics/FileUtilities.cpp +++ b/src/libxrpl/basics/FileUtilities.cpp @@ -63,7 +63,7 @@ getFileContents( return {}; } - const std::string result{ + std::string const result{ std::istreambuf_iterator{fileStream}, std::istreambuf_iterator{}}; diff --git a/src/libxrpl/basics/Number.cpp b/src/libxrpl/basics/Number.cpp index 186a363b41..f43288b57b 100644 --- a/src/libxrpl/basics/Number.cpp +++ b/src/libxrpl/basics/Number.cpp @@ -469,7 +469,7 @@ Number::operator/=(Number const& y) } // Shift by 10^17 gives greatest precision while not overflowing uint128_t // or the cast back to int64_t - const uint128_t f = 100'000'000'000'000'000; + uint128_t const f = 100'000'000'000'000'000; mantissa_ = static_cast(uint128_t(nm) * f / uint128_t(dm)); exponent_ = ne - de - 17; mantissa_ *= np * dp; diff --git a/src/libxrpl/basics/StringUtilities.cpp b/src/libxrpl/basics/StringUtilities.cpp index 5008730718..3cf3df209e 100644 --- a/src/libxrpl/basics/StringUtilities.cpp +++ b/src/libxrpl/basics/StringUtilities.cpp @@ -89,13 +89,13 @@ parseUrl(parsedURL& pUrl, std::string const& strUrl) boost::algorithm::to_lower(pUrl.scheme); pUrl.username = smMatch[2]; pUrl.password = smMatch[3]; - const std::string domain = smMatch[4]; + std::string const domain = smMatch[4]; // We need to use Endpoint to parse the domain to // strip surrounding brackets from IPv6 addresses, // e.g. [::1] => ::1. - const auto result = beast::IP::Endpoint::from_string_checked(domain); + auto const result = beast::IP::Endpoint::from_string_checked(domain); pUrl.domain = result ? result->address().to_string() : domain; - const std::string port = smMatch[5]; + std::string const port = smMatch[5]; if (!port.empty()) { pUrl.port = beast::lexicalCast(port); diff --git a/src/libxrpl/basics/UptimeClock.cpp b/src/libxrpl/basics/UptimeClock.cpp index 42ba479fae..7b61a5397e 100644 --- a/src/libxrpl/basics/UptimeClock.cpp +++ b/src/libxrpl/basics/UptimeClock.cpp @@ -67,7 +67,7 @@ UptimeClock::time_point UptimeClock::now() { // start the update thread on first use - static const auto init = start_clock(); + static auto const init = start_clock(); // Return the number of seconds since rippled start return time_point{duration{now_}}; diff --git a/src/libxrpl/json/Writer.cpp b/src/libxrpl/json/Writer.cpp index 94c7344788..369763da09 100644 --- a/src/libxrpl/json/Writer.cpp +++ b/src/libxrpl/json/Writer.cpp @@ -34,7 +34,7 @@ namespace Json { namespace { -std::map jsonSpecialCharacterEscape = { +std::map jsonSpecialCharacterEscape = { {'"', "\\\""}, {'\\', "\\\\"}, {'/', "\\/"}, @@ -47,13 +47,13 @@ std::map jsonSpecialCharacterEscape = { static size_t const jsonEscapeLength = 2; // All other JSON punctuation. -const char closeBrace = '}'; -const char closeBracket = ']'; -const char colon = ':'; -const char comma = ','; -const char openBrace = '{'; -const char openBracket = '['; -const char quote = '"'; +char const closeBrace = '}'; +char const closeBracket = ']'; +char const colon = ':'; +char const comma = ','; +char const openBrace = '{'; +char const openBracket = '['; +char const quote = '"'; static auto const integralFloatsBecomeInts = false; diff --git a/src/libxrpl/json/json_reader.cpp b/src/libxrpl/json/json_reader.cpp index 6818d73ded..9bad898bee 100644 --- a/src/libxrpl/json/json_reader.cpp +++ b/src/libxrpl/json/json_reader.cpp @@ -78,8 +78,8 @@ bool Reader::parse(std::string const& document, Value& root) { document_ = document; - const char* begin = document_.c_str(); - const char* end = begin + document_.length(); + char const* begin = document_.c_str(); + char const* end = begin + document_.length(); return parse(begin, end, root); } @@ -99,7 +99,7 @@ Reader::parse(std::istream& sin, Value& root) } bool -Reader::parse(const char* beginDoc, const char* endDoc, Value& root) +Reader::parse(char const* beginDoc, char const* endDoc, Value& root) { begin_ = beginDoc; end_ = endDoc; @@ -193,7 +193,7 @@ Reader::skipCommentTokens(Token& token) } bool -Reader::expectToken(TokenType type, Token& token, const char* message) +Reader::expectToken(TokenType type, Token& token, char const* message) { readToken(token); @@ -629,7 +629,7 @@ bool Reader::decodeDouble(Token& token) { double value = 0; - const int bufferSize = 32; + int const bufferSize = 32; int count; int length = int(token.end_ - token.start_); // Sanity check to avoid buffer overflow exploits. @@ -939,7 +939,7 @@ Reader::getFormatedErrorMessages() const itError != errors_.end(); ++itError) { - const ErrorInfo& error = *itError; + ErrorInfo const& error = *itError; formattedMessage += "* " + getLocationLineAndColumn(error.token_.start_) + "\n"; formattedMessage += " " + error.message_ + "\n"; diff --git a/src/libxrpl/json/json_value.cpp b/src/libxrpl/json/json_value.cpp index 709b425d63..86a8ed5aee 100644 --- a/src/libxrpl/json/json_value.cpp +++ b/src/libxrpl/json/json_value.cpp @@ -31,10 +31,10 @@ namespace Json { -const Value Value::null; -const Int Value::minInt = Int(~(UInt(-1) / 2)); -const Int Value::maxInt = Int(UInt(-1) / 2); -const UInt Value::maxUInt = UInt(-1); +Value const Value::null; +Int const Value::minInt = Int(~(UInt(-1) / 2)); +Int const Value::maxInt = Int(UInt(-1) / 2); +UInt const Value::maxUInt = UInt(-1); class DefaultValueAllocator : public ValueAllocator { @@ -42,7 +42,7 @@ public: virtual ~DefaultValueAllocator() = default; char* - makeMemberName(const char* memberName) override + makeMemberName(char const* memberName) override { return duplicateStringValue(memberName); } @@ -54,7 +54,7 @@ public: } char* - duplicateStringValue(const char* value, unsigned int length = unknown) + duplicateStringValue(char const* value, unsigned int length = unknown) override { //@todo investigate this old optimization @@ -110,14 +110,14 @@ Value::CZString::CZString(int index) : cstr_(0), index_(index) { } -Value::CZString::CZString(const char* cstr, DuplicationPolicy allocate) +Value::CZString::CZString(char const* cstr, DuplicationPolicy allocate) : cstr_( allocate == duplicate ? valueAllocator()->makeMemberName(cstr) : cstr) , index_(allocate) { } -Value::CZString::CZString(const CZString& other) +Value::CZString::CZString(CZString const& other) : cstr_( other.index_ != noDuplication && other.cstr_ != 0 ? valueAllocator()->makeMemberName(other.cstr_) @@ -136,7 +136,7 @@ Value::CZString::~CZString() } bool -Value::CZString::operator<(const CZString& other) const +Value::CZString::operator<(CZString const& other) const { if (cstr_ && other.cstr_) return strcmp(cstr_, other.cstr_) < 0; @@ -145,7 +145,7 @@ Value::CZString::operator<(const CZString& other) const } bool -Value::CZString::operator==(const CZString& other) const +Value::CZString::operator==(CZString const& other) const { if (cstr_ && other.cstr_) return strcmp(cstr_, other.cstr_) == 0; @@ -159,7 +159,7 @@ Value::CZString::index() const return index_; } -const char* +char const* Value::CZString::c_str() const { return cstr_; @@ -232,7 +232,7 @@ Value::Value(double value) : type_(realValue) value_.real_ = value; } -Value::Value(const char* value) : type_(stringValue), allocated_(true) +Value::Value(char const* value) : type_(stringValue), allocated_(true) { value_.string_ = valueAllocator()->duplicateStringValue(value); } @@ -243,7 +243,7 @@ Value::Value(std::string const& value) : type_(stringValue), allocated_(true) value.c_str(), (unsigned int)value.length()); } -Value::Value(const StaticString& value) : type_(stringValue), allocated_(false) +Value::Value(StaticString const& value) : type_(stringValue), allocated_(false) { value_.string_ = const_cast(value.c_str()); } @@ -253,7 +253,7 @@ Value::Value(bool value) : type_(booleanValue) value_.bool_ = value; } -Value::Value(const Value& other) : type_(other.type_) +Value::Value(Value const& other) : type_(other.type_) { switch (type_) { @@ -370,7 +370,7 @@ integerCmp(Int i, UInt ui) } bool -operator<(const Value& x, const Value& y) +operator<(Value const& x, Value const& y) { if (auto signum = x.type_ - y.type_) { @@ -419,7 +419,7 @@ operator<(const Value& x, const Value& y) } bool -operator==(const Value& x, const Value& y) +operator==(Value const& x, Value const& y) { if (x.type_ != y.type_) { @@ -464,7 +464,7 @@ operator==(const Value& x, const Value& y) return 0; // unreachable } -const char* +char const* Value::asCString() const { XRPL_ASSERT(type_ == stringValue, "Json::Value::asCString : valid type"); @@ -795,7 +795,7 @@ Value::operator[](UInt index) return (*it).second; } -const Value& +Value const& Value::operator[](UInt index) const { XRPL_ASSERT( @@ -815,13 +815,13 @@ Value::operator[](UInt index) const } Value& -Value::operator[](const char* key) +Value::operator[](char const* key) { return resolveReference(key, false); } Value& -Value::resolveReference(const char* key, bool isStatic) +Value::resolveReference(char const* key, bool isStatic) { XRPL_ASSERT( type_ == nullValue || type_ == objectValue, @@ -844,9 +844,9 @@ Value::resolveReference(const char* key, bool isStatic) } Value -Value::get(UInt index, const Value& defaultValue) const +Value::get(UInt index, Value const& defaultValue) const { - const Value* value = &((*this)[index]); + Value const* value = &((*this)[index]); return value == &null ? defaultValue : *value; } @@ -856,8 +856,8 @@ Value::isValidIndex(UInt index) const return index < size(); } -const Value& -Value::operator[](const char* key) const +Value const& +Value::operator[](char const* key) const { XRPL_ASSERT( type_ == nullValue || type_ == objectValue, @@ -881,20 +881,20 @@ Value::operator[](std::string const& key) return (*this)[key.c_str()]; } -const Value& +Value const& Value::operator[](std::string const& key) const { return (*this)[key.c_str()]; } Value& -Value::operator[](const StaticString& key) +Value::operator[](StaticString const& key) { return resolveReference(key, true); } Value& -Value::append(const Value& value) +Value::append(Value const& value) { return (*this)[size()] = value; } @@ -906,20 +906,20 @@ Value::append(Value&& value) } Value -Value::get(const char* key, const Value& defaultValue) const +Value::get(char const* key, Value const& defaultValue) const { - const Value* value = &((*this)[key]); + Value const* value = &((*this)[key]); return value == &null ? defaultValue : *value; } Value -Value::get(std::string const& key, const Value& defaultValue) const +Value::get(std::string const& key, Value const& defaultValue) const { return get(key.c_str(), defaultValue); } Value -Value::removeMember(const char* key) +Value::removeMember(char const* key) { XRPL_ASSERT( type_ == nullValue || type_ == objectValue, @@ -946,12 +946,12 @@ Value::removeMember(std::string const& key) } bool -Value::isMember(const char* key) const +Value::isMember(char const* key) const { if (type_ != objectValue) return false; - const Value* value = &((*this)[key]); + Value const* value = &((*this)[key]); return value != &null; } diff --git a/src/libxrpl/json/json_valueiterator.cpp b/src/libxrpl/json/json_valueiterator.cpp index 20dedf2bb2..9f65a48a0d 100644 --- a/src/libxrpl/json/json_valueiterator.cpp +++ b/src/libxrpl/json/json_valueiterator.cpp @@ -37,7 +37,7 @@ ValueIteratorBase::ValueIteratorBase() : current_(), isNull_(true) } ValueIteratorBase::ValueIteratorBase( - const Value::ObjectValues::iterator& current) + Value::ObjectValues::iterator const& current) : current_(current), isNull_(false) { } @@ -61,7 +61,7 @@ ValueIteratorBase::decrement() } ValueIteratorBase::difference_type -ValueIteratorBase::computeDistance(const SelfType& other) const +ValueIteratorBase::computeDistance(SelfType const& other) const { // Iterator for null value are initialized using the default // constructor, which initialize current_ to the default @@ -89,7 +89,7 @@ ValueIteratorBase::computeDistance(const SelfType& other) const } bool -ValueIteratorBase::isEqual(const SelfType& other) const +ValueIteratorBase::isEqual(SelfType const& other) const { if (isNull_) { @@ -100,7 +100,7 @@ ValueIteratorBase::isEqual(const SelfType& other) const } void -ValueIteratorBase::copy(const SelfType& other) +ValueIteratorBase::copy(SelfType const& other) { current_ = other.current_; } @@ -108,7 +108,7 @@ ValueIteratorBase::copy(const SelfType& other) Value ValueIteratorBase::key() const { - const Value::CZString czstring = (*current_).first; + Value::CZString const czstring = (*current_).first; if (czstring.c_str()) { @@ -124,7 +124,7 @@ ValueIteratorBase::key() const UInt ValueIteratorBase::index() const { - const Value::CZString czstring = (*current_).first; + Value::CZString const czstring = (*current_).first; if (!czstring.c_str()) return czstring.index(); @@ -132,10 +132,10 @@ ValueIteratorBase::index() const return Value::UInt(-1); } -const char* +char const* ValueIteratorBase::memberName() const { - const char* name = (*current_).first.c_str(); + char const* name = (*current_).first.c_str(); return name ? name : ""; } @@ -148,13 +148,13 @@ ValueIteratorBase::memberName() const // ////////////////////////////////////////////////////////////////// ValueConstIterator::ValueConstIterator( - const Value::ObjectValues::iterator& current) + Value::ObjectValues::iterator const& current) : ValueIteratorBase(current) { } ValueConstIterator& -ValueConstIterator::operator=(const ValueIteratorBase& other) +ValueConstIterator::operator=(ValueIteratorBase const& other) { copy(other); return *this; @@ -168,23 +168,23 @@ ValueConstIterator::operator=(const ValueIteratorBase& other) // ////////////////////////////////////////////////////////////////// // ////////////////////////////////////////////////////////////////// -ValueIterator::ValueIterator(const Value::ObjectValues::iterator& current) +ValueIterator::ValueIterator(Value::ObjectValues::iterator const& current) : ValueIteratorBase(current) { } -ValueIterator::ValueIterator(const ValueConstIterator& other) +ValueIterator::ValueIterator(ValueConstIterator const& other) : ValueIteratorBase(other) { } -ValueIterator::ValueIterator(const ValueIterator& other) +ValueIterator::ValueIterator(ValueIterator const& other) : ValueIteratorBase(other) { } ValueIterator& -ValueIterator::operator=(const SelfType& other) +ValueIterator::operator=(SelfType const& other) { copy(other); return *this; diff --git a/src/libxrpl/json/json_writer.cpp b/src/libxrpl/json/json_writer.cpp index a830f2855f..fabc697358 100644 --- a/src/libxrpl/json/json_writer.cpp +++ b/src/libxrpl/json/json_writer.cpp @@ -40,7 +40,7 @@ isControlCharacter(char ch) } static bool -containsControlCharacter(const char* str) +containsControlCharacter(char const* str) { while (*str) { @@ -117,7 +117,7 @@ valueToString(bool value) } std::string -valueToQuotedString(const char* value) +valueToQuotedString(char const* value) { // Not sure how to handle unicode... if (strpbrk(value, "\"\\\b\f\n\r\t") == nullptr && @@ -132,7 +132,7 @@ valueToQuotedString(const char* value) result.reserve(maxsize); // to avoid lots of mallocs result += "\""; - for (const char* c = value; *c != 0; ++c) + for (char const* c = value; *c != 0; ++c) { switch (*c) { @@ -197,7 +197,7 @@ valueToQuotedString(const char* value) // ////////////////////////////////////////////////////////////////// std::string -FastWriter::write(const Value& root) +FastWriter::write(Value const& root) { document_ = ""; writeValue(root); @@ -205,7 +205,7 @@ FastWriter::write(const Value& root) } void -FastWriter::writeValue(const Value& value) +FastWriter::writeValue(Value const& value) { switch (value.type()) { @@ -281,7 +281,7 @@ StyledWriter::StyledWriter() : rightMargin_(74), indentSize_(3) } std::string -StyledWriter::write(const Value& root) +StyledWriter::write(Value const& root) { document_ = ""; addChildValues_ = false; @@ -292,7 +292,7 @@ StyledWriter::write(const Value& root) } void -StyledWriter::writeValue(const Value& value) +StyledWriter::writeValue(Value const& value) { switch (value.type()) { @@ -338,7 +338,7 @@ StyledWriter::writeValue(const Value& value) while (true) { std::string const& name = *it; - const Value& childValue = value[name]; + Value const& childValue = value[name]; writeWithIndent(valueToQuotedString(name.c_str())); document_ += " : "; writeValue(childValue); @@ -358,7 +358,7 @@ StyledWriter::writeValue(const Value& value) } void -StyledWriter::writeArrayValue(const Value& value) +StyledWriter::writeArrayValue(Value const& value) { unsigned size = value.size(); @@ -377,7 +377,7 @@ StyledWriter::writeArrayValue(const Value& value) while (true) { - const Value& childValue = value[index]; + Value const& childValue = value[index]; if (hasChildValue) writeWithIndent(childValues_[index]); @@ -417,7 +417,7 @@ StyledWriter::writeArrayValue(const Value& value) } bool -StyledWriter::isMultineArray(const Value& value) +StyledWriter::isMultineArray(Value const& value) { int size = value.size(); bool isMultiLine = size * 3 >= rightMargin_; @@ -425,7 +425,7 @@ StyledWriter::isMultineArray(const Value& value) for (int index = 0; index < size && !isMultiLine; ++index) { - const Value& childValue = value[index]; + Value const& childValue = value[index]; isMultiLine = isMultiLine || ((childValue.isArray() || childValue.isObject()) && childValue.size() > 0); @@ -507,7 +507,7 @@ StyledStreamWriter::StyledStreamWriter(std::string indentation) } void -StyledStreamWriter::write(std::ostream& out, const Value& root) +StyledStreamWriter::write(std::ostream& out, Value const& root) { document_ = &out; addChildValues_ = false; @@ -518,7 +518,7 @@ StyledStreamWriter::write(std::ostream& out, const Value& root) } void -StyledStreamWriter::writeValue(const Value& value) +StyledStreamWriter::writeValue(Value const& value) { switch (value.type()) { @@ -564,7 +564,7 @@ StyledStreamWriter::writeValue(const Value& value) while (true) { std::string const& name = *it; - const Value& childValue = value[name]; + Value const& childValue = value[name]; writeWithIndent(valueToQuotedString(name.c_str())); *document_ << " : "; writeValue(childValue); @@ -584,7 +584,7 @@ StyledStreamWriter::writeValue(const Value& value) } void -StyledStreamWriter::writeArrayValue(const Value& value) +StyledStreamWriter::writeArrayValue(Value const& value) { unsigned size = value.size(); @@ -603,7 +603,7 @@ StyledStreamWriter::writeArrayValue(const Value& value) while (true) { - const Value& childValue = value[index]; + Value const& childValue = value[index]; if (hasChildValue) writeWithIndent(childValues_[index]); @@ -643,7 +643,7 @@ StyledStreamWriter::writeArrayValue(const Value& value) } bool -StyledStreamWriter::isMultineArray(const Value& value) +StyledStreamWriter::isMultineArray(Value const& value) { int size = value.size(); bool isMultiLine = size * 3 >= rightMargin_; @@ -651,7 +651,7 @@ StyledStreamWriter::isMultineArray(const Value& value) for (int index = 0; index < size && !isMultiLine; ++index) { - const Value& childValue = value[index]; + Value const& childValue = value[index]; isMultiLine = isMultiLine || ((childValue.isArray() || childValue.isObject()) && childValue.size() > 0); @@ -726,7 +726,7 @@ StyledStreamWriter::unindent() } std::ostream& -operator<<(std::ostream& sout, const Value& root) +operator<<(std::ostream& sout, Value const& root) { Json::StyledStreamWriter writer; writer.write(sout, root); diff --git a/src/libxrpl/protocol/Feature.cpp b/src/libxrpl/protocol/Feature.cpp index 3065bdf41c..eeeee1c185 100644 --- a/src/libxrpl/protocol/Feature.cpp +++ b/src/libxrpl/protocol/Feature.cpp @@ -139,27 +139,27 @@ class FeatureCollections { if (i >= features.size()) LogicError("Invalid FeatureBitset index"); - const auto& sequence = features.get(); + auto const& sequence = features.get(); return sequence[i]; } size_t getIndex(Feature const& feature) const { - const auto& sequence = features.get(); + auto const& sequence = features.get(); auto const it_to = sequence.iterator_to(feature); return it_to - sequence.begin(); } Feature const* getByFeature(uint256 const& feature) const { - const auto& feature_index = features.get(); + auto const& feature_index = features.get(); auto const feature_it = feature_index.find(feature); return feature_it == feature_index.end() ? nullptr : &*feature_it; } Feature const* getByName(std::string const& name) const { - const auto& name_index = features.get(); + auto const& name_index = features.get(); auto const name_it = name_index.find(name); return name_it == name_index.end() ? nullptr : &*name_it; } @@ -240,7 +240,7 @@ FeatureCollections::getRegisteredFeature(std::string const& name) const } void -check(bool condition, const char* logicErrorMessage) +check(bool condition, char const* logicErrorMessage) { if (!condition) LogicError(logicErrorMessage); @@ -437,9 +437,13 @@ featureToName(uint256 const& f) uint256 const feature##name = registerFeature(#name, supported, vote); #define XRPL_FIX(name, supported, vote) \ uint256 const fix##name = registerFeature("fix" #name, supported, vote); -#define XRPL_RETIRE(name) \ - [[deprecated("The referenced amendment has been retired"), maybe_unused]] \ + +// clang-format off +#define XRPL_RETIRE(name) \ + [[deprecated("The referenced amendment has been retired")]] \ + [[maybe_unused]] \ uint256 const retired##name = retireFeature(#name); +// clang-format on #include @@ -455,7 +459,7 @@ featureToName(uint256 const& f) // // Use initialization of one final static variable to set // featureCollections::readOnly. -[[maybe_unused]] static const bool readOnlySet = +[[maybe_unused]] static bool const readOnlySet = featureCollections.registrationIsDone(); } // namespace ripple diff --git a/src/libxrpl/protocol/LedgerFormats.cpp b/src/libxrpl/protocol/LedgerFormats.cpp index 9755eedf3b..94c6d65c88 100644 --- a/src/libxrpl/protocol/LedgerFormats.cpp +++ b/src/libxrpl/protocol/LedgerFormats.cpp @@ -29,7 +29,7 @@ namespace ripple { LedgerFormats::LedgerFormats() { // Fields shared by all ledger formats: - static const std::initializer_list commonFields{ + static std::initializer_list const commonFields{ {sfLedgerIndex, soeOPTIONAL}, {sfLedgerEntryType, soeREQUIRED}, {sfFlags, soeREQUIRED}, diff --git a/src/libxrpl/protocol/Quality.cpp b/src/libxrpl/protocol/Quality.cpp index f7a970a3b9..18649db561 100644 --- a/src/libxrpl/protocol/Quality.cpp +++ b/src/libxrpl/protocol/Quality.cpp @@ -183,7 +183,7 @@ Quality Quality::round(int digits) const { // Modulus for mantissa - static const std::uint64_t mod[17] = { + static std::uint64_t const mod[17] = { /* 0 */ 10000000000000000, /* 1 */ 1000000000000000, /* 2 */ 100000000000000, diff --git a/src/libxrpl/protocol/SField.cpp b/src/libxrpl/protocol/SField.cpp index b59e2a2b69..1ffce099b8 100644 --- a/src/libxrpl/protocol/SField.cpp +++ b/src/libxrpl/protocol/SField.cpp @@ -87,7 +87,7 @@ SField::SField( private_access_tag_t, SerializedTypeID tid, int fv, - const char* fn, + char const* fn, int meta, IsSigning signing) : fieldCode(field_code(tid, fv)) diff --git a/src/libxrpl/protocol/STAccount.cpp b/src/libxrpl/protocol/STAccount.cpp index ba9a2d1919..7229c85240 100644 --- a/src/libxrpl/protocol/STAccount.cpp +++ b/src/libxrpl/protocol/STAccount.cpp @@ -106,7 +106,7 @@ STAccount::add(Serializer& s) const } bool -STAccount::isEquivalent(const STBase& t) const +STAccount::isEquivalent(STBase const& t) const { auto const* const tPtr = dynamic_cast(&t); return tPtr && (default_ == tPtr->default_) && (value_ == tPtr->value_); diff --git a/src/libxrpl/protocol/STAmount.cpp b/src/libxrpl/protocol/STAmount.cpp index e0815fbef3..f02042bc2c 100644 --- a/src/libxrpl/protocol/STAmount.cpp +++ b/src/libxrpl/protocol/STAmount.cpp @@ -90,13 +90,13 @@ setSTAmountCanonicalizeSwitchover(bool v) *getStaticSTAmountCanonicalizeSwitchover() = v; } -static const std::uint64_t tenTo14 = 100000000000000ull; -static const std::uint64_t tenTo14m1 = tenTo14 - 1; -static const std::uint64_t tenTo17 = tenTo14 * 1000; +static std::uint64_t const tenTo14 = 100000000000000ull; +static std::uint64_t const tenTo14m1 = tenTo14 - 1; +static std::uint64_t const tenTo17 = tenTo14 * 1000; //------------------------------------------------------------------------------ static std::int64_t -getInt64Value(STAmount const& amount, bool valid, const char* error) +getInt64Value(STAmount const& amount, bool valid, char const* error) { if (!valid) Throw(error); @@ -680,9 +680,9 @@ STAmount::add(Serializer& s) const } bool -STAmount::isEquivalent(const STBase& t) const +STAmount::isEquivalent(STBase const& t) const { - const STAmount* v = dynamic_cast(&t); + STAmount const* v = dynamic_cast(&t); return v && (*v == *this); } @@ -1065,7 +1065,7 @@ amountFromJsonNoThrow(STAmount& result, Json::Value const& jvSource) result = amountFromJson(sfGeneric, jvSource); return true; } - catch (const std::exception& e) + catch (std::exception const& e) { JLOG(debugLog().warn()) << "amountFromJsonNoThrow: caught: " << e.what(); diff --git a/src/libxrpl/protocol/STArray.cpp b/src/libxrpl/protocol/STArray.cpp index 0c5b4e198c..bbc890ffda 100644 --- a/src/libxrpl/protocol/STArray.cpp +++ b/src/libxrpl/protocol/STArray.cpp @@ -181,9 +181,9 @@ STArray::getSType() const } bool -STArray::isEquivalent(const STBase& t) const +STArray::isEquivalent(STBase const& t) const { - auto v = dynamic_cast(&t); + auto v = dynamic_cast(&t); return v != nullptr && v_ == v->v_; } @@ -194,7 +194,7 @@ STArray::isDefault() const } void -STArray::sort(bool (*compare)(const STObject&, const STObject&)) +STArray::sort(bool (*compare)(STObject const&, STObject const&)) { std::sort(v_.begin(), v_.end(), compare); } diff --git a/src/libxrpl/protocol/STBase.cpp b/src/libxrpl/protocol/STBase.cpp index 86e52a1f73..417b7e2302 100644 --- a/src/libxrpl/protocol/STBase.cpp +++ b/src/libxrpl/protocol/STBase.cpp @@ -40,7 +40,7 @@ STBase::STBase(SField const& n) : fName(&n) } STBase& -STBase::operator=(const STBase& t) +STBase::operator=(STBase const& t) { if (!fName->isUseful()) fName = t.fName; @@ -48,13 +48,13 @@ STBase::operator=(const STBase& t) } bool -STBase::operator==(const STBase& t) const +STBase::operator==(STBase const& t) const { return (getSType() == t.getSType()) && isEquivalent(t); } bool -STBase::operator!=(const STBase& t) const +STBase::operator!=(STBase const& t) const { return (getSType() != t.getSType()) || !isEquivalent(t); } @@ -116,7 +116,7 @@ STBase::add(Serializer& s) const } bool -STBase::isEquivalent(const STBase& t) const +STBase::isEquivalent(STBase const& t) const { XRPL_ASSERT( getSType() == STI_NOTPRESENT, @@ -154,7 +154,7 @@ STBase::addFieldID(Serializer& s) const //------------------------------------------------------------------------------ std::ostream& -operator<<(std::ostream& out, const STBase& t) +operator<<(std::ostream& out, STBase const& t) { return out << t.getFullText(); } diff --git a/src/libxrpl/protocol/STBlob.cpp b/src/libxrpl/protocol/STBlob.cpp index 33245a1e16..3d62cb5ee4 100644 --- a/src/libxrpl/protocol/STBlob.cpp +++ b/src/libxrpl/protocol/STBlob.cpp @@ -71,9 +71,9 @@ STBlob::add(Serializer& s) const } bool -STBlob::isEquivalent(const STBase& t) const +STBlob::isEquivalent(STBase const& t) const { - const STBlob* v = dynamic_cast(&t); + STBlob const* v = dynamic_cast(&t); return v && (value_ == v->value_); } diff --git a/src/libxrpl/protocol/STCurrency.cpp b/src/libxrpl/protocol/STCurrency.cpp index a17ed2e0d2..68186bbfda 100644 --- a/src/libxrpl/protocol/STCurrency.cpp +++ b/src/libxrpl/protocol/STCurrency.cpp @@ -72,9 +72,9 @@ STCurrency::add(Serializer& s) const } bool -STCurrency::isEquivalent(const STBase& t) const +STCurrency::isEquivalent(STBase const& t) const { - const STCurrency* v = dynamic_cast(&t); + STCurrency const* v = dynamic_cast(&t); return v && (*v == *this); } diff --git a/src/libxrpl/protocol/STIssue.cpp b/src/libxrpl/protocol/STIssue.cpp index 8483b8cfe3..821e17f6a7 100644 --- a/src/libxrpl/protocol/STIssue.cpp +++ b/src/libxrpl/protocol/STIssue.cpp @@ -128,9 +128,9 @@ STIssue::add(Serializer& s) const } bool -STIssue::isEquivalent(const STBase& t) const +STIssue::isEquivalent(STBase const& t) const { - const STIssue* v = dynamic_cast(&t); + STIssue const* v = dynamic_cast(&t); return v && (*v == *this); } diff --git a/src/libxrpl/protocol/STObject.cpp b/src/libxrpl/protocol/STObject.cpp index d0873561e3..9c23898a74 100644 --- a/src/libxrpl/protocol/STObject.cpp +++ b/src/libxrpl/protocol/STObject.cpp @@ -153,7 +153,7 @@ STObject::operator=(STObject&& other) } void -STObject::set(const SOTemplate& type) +STObject::set(SOTemplate const& type) { v_.clear(); v_.reserve(type.size()); @@ -169,7 +169,7 @@ STObject::set(const SOTemplate& type) } void -STObject::applyTemplate(const SOTemplate& type) +STObject::applyTemplate(SOTemplate const& type) { auto throwFieldErr = [](std::string const& field, char const* description) { std::stringstream ss; @@ -296,9 +296,9 @@ STObject::set(SerialIter& sit, int depth) } bool -STObject::hasMatchingEntry(const STBase& t) +STObject::hasMatchingEntry(STBase const& t) { - const STBase* o = peekAtPField(t.getFName()); + STBase const* o = peekAtPField(t.getFName()); if (!o) return false; @@ -357,9 +357,9 @@ STObject::getText() const } bool -STObject::isEquivalent(const STBase& t) const +STObject::isEquivalent(STBase const& t) const { - const STObject* v = dynamic_cast(&t); + STObject const* v = dynamic_cast(&t); if (!v) return false; @@ -425,7 +425,7 @@ STObject::getFieldIndex(SField const& field) const return -1; } -const STBase& +STBase const& STObject::peekAtField(SField const& field) const { int index = getFieldIndex(field); @@ -453,7 +453,7 @@ STObject::getFieldSType(int index) const return v_[index]->getFName(); } -const STBase* +STBase const* STObject::peekAtPField(SField const& field) const { int index = getFieldIndex(field); @@ -536,7 +536,7 @@ STObject::isFlag(std::uint32_t f) const std::uint32_t STObject::getFlags(void) const { - const STUInt32* t = dynamic_cast(peekAtPField(sfFlags)); + STUInt32 const* t = dynamic_cast(peekAtPField(sfFlags)); if (!t) return 0; @@ -574,7 +574,7 @@ STObject::makeFieldAbsent(SField const& field) if (index == -1) throwFieldNotFound(field); - const STBase& f = peekAtIndex(index); + STBase const& f = peekAtIndex(index); if (f.getSType() == STI_NOTPRESENT) return; @@ -675,14 +675,14 @@ STObject::getFieldPathSet(SField const& field) const return getFieldByConstRef(field, empty); } -const STVector256& +STVector256 const& STObject::getFieldV256(SField const& field) const { static STVector256 const empty{}; return getFieldByConstRef(field, empty); } -const STArray& +STArray const& STObject::getFieldArray(SField const& field) const { static STArray const empty{}; @@ -835,7 +835,7 @@ STObject::getJson(JsonOptions options) const } bool -STObject::operator==(const STObject& obj) const +STObject::operator==(STObject const& obj) const { // This is not particularly efficient, and only compares data elements // with binary representations diff --git a/src/libxrpl/protocol/STParsedJSON.cpp b/src/libxrpl/protocol/STParsedJSON.cpp index e7568c6818..1437ed922b 100644 --- a/src/libxrpl/protocol/STParsedJSON.cpp +++ b/src/libxrpl/protocol/STParsedJSON.cpp @@ -851,7 +851,7 @@ parseLeaf( return ret; } -static const int maxDepth = 64; +static int const maxDepth = 64; // Forward declaration since parseObject() and parseArray() call each other. static std::optional @@ -1037,7 +1037,8 @@ parseArray( Json::Value const objectFields(json[i][objectName]); std::stringstream ss; - ss << json_name << "." << "[" << i << "]." << objectName; + ss << json_name << "." + << "[" << i << "]." << objectName; auto ret = parseObject( ss.str(), objectFields, nameField, depth + 1, error); diff --git a/src/libxrpl/protocol/STPathSet.cpp b/src/libxrpl/protocol/STPathSet.cpp index d2bd20cfe8..1252ca7c6c 100644 --- a/src/libxrpl/protocol/STPathSet.cpp +++ b/src/libxrpl/protocol/STPathSet.cpp @@ -145,9 +145,9 @@ STPathSet::assembleAdd(STPath const& base, STPathElement const& tail) } bool -STPathSet::isEquivalent(const STBase& t) const +STPathSet::isEquivalent(STBase const& t) const { - const STPathSet* v = dynamic_cast(&t); + STPathSet const* v = dynamic_cast(&t); return v && (value == v->value); } diff --git a/src/libxrpl/protocol/STVector256.cpp b/src/libxrpl/protocol/STVector256.cpp index 50b022d294..3612b0cc4d 100644 --- a/src/libxrpl/protocol/STVector256.cpp +++ b/src/libxrpl/protocol/STVector256.cpp @@ -86,9 +86,9 @@ STVector256::add(Serializer& s) const } bool -STVector256::isEquivalent(const STBase& t) const +STVector256::isEquivalent(STBase const& t) const { - const STVector256* v = dynamic_cast(&t); + STVector256 const* v = dynamic_cast(&t); return v && (mValue == v->mValue); } diff --git a/src/libxrpl/protocol/STXChainBridge.cpp b/src/libxrpl/protocol/STXChainBridge.cpp index 1499d790cb..fb192d82d6 100644 --- a/src/libxrpl/protocol/STXChainBridge.cpp +++ b/src/libxrpl/protocol/STXChainBridge.cpp @@ -197,9 +197,9 @@ STXChainBridge::getSType() const } bool -STXChainBridge::isEquivalent(const STBase& t) const +STXChainBridge::isEquivalent(STBase const& t) const { - const STXChainBridge* v = dynamic_cast(&t); + STXChainBridge const* v = dynamic_cast(&t); return v && (*v == *this); } diff --git a/src/libxrpl/protocol/Serializer.cpp b/src/libxrpl/protocol/Serializer.cpp index 339e25db1d..b8a68d28b8 100644 --- a/src/libxrpl/protocol/Serializer.cpp +++ b/src/libxrpl/protocol/Serializer.cpp @@ -101,7 +101,7 @@ Serializer::addRaw(Slice slice) } int -Serializer::addRaw(const Serializer& s) +Serializer::addRaw(Serializer const& s) { int ret = mData.size(); mData.insert(mData.end(), s.begin(), s.end()); @@ -109,10 +109,10 @@ Serializer::addRaw(const Serializer& s) } int -Serializer::addRaw(const void* ptr, int len) +Serializer::addRaw(void const* ptr, int len) { int ret = mData.size(); - mData.insert(mData.end(), (const char*)ptr, ((const char*)ptr) + len); + mData.insert(mData.end(), (char const*)ptr, ((char const*)ptr) + len); return ret; } @@ -208,7 +208,7 @@ Serializer::addVL(Slice const& slice) } int -Serializer::addVL(const void* ptr, int len) +Serializer::addVL(void const* ptr, int len) { int ret = addEncoded(len); diff --git a/src/libxrpl/protocol/TxFormats.cpp b/src/libxrpl/protocol/TxFormats.cpp index a23475553d..5edffeb666 100644 --- a/src/libxrpl/protocol/TxFormats.cpp +++ b/src/libxrpl/protocol/TxFormats.cpp @@ -29,7 +29,7 @@ namespace ripple { TxFormats::TxFormats() { // Fields shared by all txFormats: - static const std::initializer_list commonFields{ + static std::initializer_list const commonFields{ {sfTransactionType, soeREQUIRED}, {sfFlags, soeOPTIONAL}, {sfSourceTag, soeOPTIONAL}, diff --git a/src/test/app/AMMExtended_test.cpp b/src/test/app/AMMExtended_test.cpp index a9d0514e3c..d7caed9601 100644 --- a/src/test/app/AMMExtended_test.cpp +++ b/src/test/app/AMMExtended_test.cpp @@ -3845,7 +3845,7 @@ private: int const signerListOwners{features[featureMultiSignReserve] ? 2 : 5}; env.require(owners(alice, signerListOwners + 0)); - const msig ms{becky, bogie}; + msig const ms{becky, bogie}; // Multisign all AMM transactions AMM ammAlice( diff --git a/src/test/app/AMM_test.cpp b/src/test/app/AMM_test.cpp index a0be79913b..87988315f4 100644 --- a/src/test/app/AMM_test.cpp +++ b/src/test/app/AMM_test.cpp @@ -1323,14 +1323,14 @@ private: // equal asset deposit: unit test to exercise the rounding-down of // LPTokens in the AMMHelpers.cpp: adjustLPTokens calculations // The LPTokens need to have 16 significant digits and a fractional part - for (const Number deltaLPTokens : + for (Number const deltaLPTokens : {Number{UINT64_C(100000'0000000009), -10}, Number{UINT64_C(100000'0000000001), -10}}) { testAMM([&](AMM& ammAlice, Env& env) { // initial LPToken balance IOUAmount const initLPToken = ammAlice.getLPTokensBalance(); - const IOUAmount newLPTokens{ + IOUAmount const newLPTokens{ deltaLPTokens.mantissa(), deltaLPTokens.exponent()}; // carol performs a two-asset deposit @@ -1349,17 +1349,17 @@ private: // fraction of newLPTokens/(existing LPToken balance). The // existing LPToken balance is 1e7 - const Number fr = deltaLPTokens / 1e7; + Number const fr = deltaLPTokens / 1e7; // The below equations are based on Equation 1, 2 from XLS-30d // specification, Section: 2.3.1.2 - const Number deltaXRP = fr * 1e10; - const Number deltaUSD = fr * 1e4; + Number const deltaXRP = fr * 1e10; + Number const deltaUSD = fr * 1e4; - const STAmount depositUSD = + STAmount const depositUSD = STAmount{USD, deltaUSD.mantissa(), deltaUSD.exponent()}; - const STAmount depositXRP = + STAmount const depositXRP = STAmount{XRP, deltaXRP.mantissa(), deltaXRP.exponent()}; // initial LPTokens (1e7) + newLPTokens @@ -6682,11 +6682,11 @@ private: testcase("swapRounding"); using namespace jtx; - const STAmount xrpPool{XRP, UINT64_C(51600'000981)}; - const STAmount iouPool{USD, UINT64_C(803040'9987141784), -10}; + STAmount const xrpPool{XRP, UINT64_C(51600'000981)}; + STAmount const iouPool{USD, UINT64_C(803040'9987141784), -10}; - const STAmount xrpBob{XRP, UINT64_C(1092'878933)}; - const STAmount iouBob{ + STAmount const xrpBob{XRP, UINT64_C(1092'878933)}; + STAmount const iouBob{ USD, UINT64_C(3'988035892323031), -28}; // 3.9...e-13 testAMM( diff --git a/src/test/app/AccountDelete_test.cpp b/src/test/app/AccountDelete_test.cpp index c903f95f77..4ae18d9d28 100644 --- a/src/test/app/AccountDelete_test.cpp +++ b/src/test/app/AccountDelete_test.cpp @@ -927,7 +927,7 @@ public: Account const carol{"carol"}; Account const daria{"daria"}; - const char credType[] = "abcd"; + char const credType[] = "abcd"; Env env{*this}; env.fund(XRP(100000), alice, becky, carol, daria); @@ -1193,7 +1193,7 @@ public: Account const becky{"becky"}; Account const carol{"carol"}; - const char credType[] = "abcd"; + char const credType[] = "abcd"; Env env{*this}; env.fund(XRP(100000), alice, becky, carol); @@ -1237,7 +1237,7 @@ public: Account const becky{"becky"}; Account const carol{"carol"}; - const char credType[] = "abcd"; + char const credType[] = "abcd"; Env env{*this}; env.fund(XRP(100000), alice, becky, carol); diff --git a/src/test/app/Credentials_test.cpp b/src/test/app/Credentials_test.cpp index 24e656672d..87946c13bb 100644 --- a/src/test/app/Credentials_test.cpp +++ b/src/test/app/Credentials_test.cpp @@ -60,8 +60,8 @@ struct Credentials_test : public beast::unit_test::suite { using namespace test::jtx; - const char credType[] = "abcde"; - const char uri[] = "uri"; + char const credType[] = "abcde"; + char const uri[] = "uri"; Account const issuer{"issuer"}; Account const subject{"subject"}; @@ -209,7 +209,7 @@ struct Credentials_test : public beast::unit_test::suite { using namespace test::jtx; - const char credType[] = "abcde"; + char const credType[] = "abcde"; Account const issuer{"issuer"}; Account const subject{"subject"}; @@ -458,7 +458,7 @@ struct Credentials_test : public beast::unit_test::suite { using namespace test::jtx; - const char credType[] = "abcde"; + char const credType[] = "abcde"; Account const issuer{"issuer"}; Account const subject{"subject"}; @@ -616,7 +616,7 @@ struct Credentials_test : public beast::unit_test::suite { using namespace jtx; - const char credType[] = "abcde"; + char const credType[] = "abcde"; Account const issuer{"issuer"}; Account const subject{"subject"}; Account const other{"other"}; @@ -726,7 +726,7 @@ struct Credentials_test : public beast::unit_test::suite } { - const char credType2[] = "efghi"; + char const credType2[] = "efghi"; testcase("CredentialsAccept fail, expired credentials."); auto jv = credentials::create(subject, issuer, credType2); @@ -797,7 +797,7 @@ struct Credentials_test : public beast::unit_test::suite { using namespace test::jtx; - const char credType[] = "abcde"; + char const credType[] = "abcde"; Account const issuer{"issuer"}; Account const subject{"subject"}; Account const other{"other"}; @@ -842,7 +842,7 @@ struct Credentials_test : public beast::unit_test::suite } { - const char credType2[] = "fghij"; + char const credType2[] = "fghij"; env(credentials::create(subject, issuer, credType2)); env.close(); @@ -944,7 +944,7 @@ struct Credentials_test : public beast::unit_test::suite { using namespace test::jtx; - const char credType[] = "abcde"; + char const credType[] = "abcde"; Account const issuer{"issuer"}; Account const subject{"subject"}; @@ -972,7 +972,7 @@ struct Credentials_test : public beast::unit_test::suite { using namespace test::jtx; - const char credType[] = "abcde"; + char const credType[] = "abcde"; Account const issuer{"issuer"}; Account const subject{"subject"}; @@ -1069,7 +1069,7 @@ struct Credentials_test : public beast::unit_test::suite std::string("Test flag, fix ") + (enabled ? "enabled" : "disabled")); - const char credType[] = "abcde"; + char const credType[] = "abcde"; Account const issuer{"issuer"}; Account const subject{"subject"}; diff --git a/src/test/app/DID_test.cpp b/src/test/app/DID_test.cpp index 34aa54f234..c885ed0861 100644 --- a/src/test/app/DID_test.cpp +++ b/src/test/app/DID_test.cpp @@ -149,7 +149,7 @@ struct DID_test : public beast::unit_test::suite BEAST_EXPECT(ownerCount(env, alice) == 0); // uri is too long - const std::string longString(257, 'a'); + std::string const longString(257, 'a'); env(did::set(alice), did::uri(longString), ter(temMALFORMED)); env.close(); BEAST_EXPECT(ownerCount(env, alice) == 0); diff --git a/src/test/app/DepositAuth_test.cpp b/src/test/app/DepositAuth_test.cpp index 18f7b410b7..c8dc3c00eb 100644 --- a/src/test/app/DepositAuth_test.cpp +++ b/src/test/app/DepositAuth_test.cpp @@ -664,7 +664,7 @@ struct DepositPreauth_test : public beast::unit_test::suite { // becky setup depositpreauth with credentials - const char credType[] = "abcde"; + char const credType[] = "abcde"; Account const carol{"carol"}; env.fund(XRP(5000), carol); env.close(); @@ -820,7 +820,7 @@ struct DepositPreauth_test : public beast::unit_test::suite { using namespace jtx; - const char credType[] = "abcde"; + char const credType[] = "abcde"; Account const issuer{"issuer"}; Account const alice{"alice"}; Account const bob{"bob"}; @@ -993,7 +993,7 @@ struct DepositPreauth_test : public beast::unit_test::suite { // create another valid credential - const char credType2[] = "fghij"; + char const credType2[] = "fghij"; env(credentials::create(alice, issuer, credType2)); env.close(); env(credentials::accept(alice, issuer, credType2)); @@ -1025,7 +1025,7 @@ struct DepositPreauth_test : public beast::unit_test::suite { using namespace jtx; - const char credType[] = "abcde"; + char const credType[] = "abcde"; Account const issuer{"issuer"}; Account const alice{"alice"}; Account const bob{"bob"}; @@ -1196,8 +1196,8 @@ struct DepositPreauth_test : public beast::unit_test::suite testExpiredCreds() { using namespace jtx; - const char credType[] = "abcde"; - const char credType2[] = "fghijkl"; + char const credType[] = "abcde"; + char const credType2[] = "fghijkl"; Account const issuer{"issuer"}; Account const alice{"alice"}; Account const bob{"bob"}; diff --git a/src/test/app/Escrow_test.cpp b/src/test/app/Escrow_test.cpp index 2b7bf4619a..1129019aab 100644 --- a/src/test/app/Escrow_test.cpp +++ b/src/test/app/Escrow_test.cpp @@ -1551,7 +1551,7 @@ struct Escrow_test : public beast::unit_test::suite Account const dillon{"dillon "}; Account const zelda{"zelda"}; - const char credType[] = "abcde"; + char const credType[] = "abcde"; { // Credentials amendment not enabled @@ -1657,7 +1657,7 @@ struct Escrow_test : public beast::unit_test::suite env.close(); { - const char credType2[] = "fghijk"; + char const credType2[] = "fghijk"; env(credentials::create(bob, zelda, credType2)); env.close(); diff --git a/src/test/app/FeeVote_test.cpp b/src/test/app/FeeVote_test.cpp index bf293a93fb..1cf2e67f83 100644 --- a/src/test/app/FeeVote_test.cpp +++ b/src/test/app/FeeVote_test.cpp @@ -76,7 +76,7 @@ class FeeVote_test : public beast::unit_test::suite setup.owner_reserve == static_cast(-1234)); } { - const auto big64 = std::to_string( + auto const big64 = std::to_string( static_cast( std::numeric_limits::max()) + 1); diff --git a/src/test/app/LedgerReplay_test.cpp b/src/test/app/LedgerReplay_test.cpp index 15f51889bb..76ab5b3218 100644 --- a/src/test/app/LedgerReplay_test.cpp +++ b/src/test/app/LedgerReplay_test.cpp @@ -312,11 +312,11 @@ public: { } void - addTxQueue(const uint256&) override + addTxQueue(uint256 const&) override { } void - removeTxQueue(const uint256&) override + removeTxQueue(uint256 const&) override { } bool @@ -414,7 +414,7 @@ struct TestPeerSet : public PeerSet } } - const std::set& + std::set const& getPeerIds() const override { static std::set emptyPeers; diff --git a/src/test/app/MPToken_test.cpp b/src/test/app/MPToken_test.cpp index a20ce56c6d..0f29e22dd9 100644 --- a/src/test/app/MPToken_test.cpp +++ b/src/test/app/MPToken_test.cpp @@ -1328,7 +1328,7 @@ class MPToken_test : public beast::unit_test::suite Account const diana("diana"); Account const dpIssuer("dpIssuer"); // holder - const char credType[] = "abcde"; + char const credType[] = "abcde"; { Env env(*this); diff --git a/src/test/app/PayChan_test.cpp b/src/test/app/PayChan_test.cpp index f2fcf344da..7cb1542453 100644 --- a/src/test/app/PayChan_test.cpp +++ b/src/test/app/PayChan_test.cpp @@ -886,7 +886,7 @@ struct PayChan_test : public beast::unit_test::suite using namespace jtx; using namespace std::literals::chrono_literals; - const char credType[] = "abcde"; + char const credType[] = "abcde"; Account const alice("alice"); Account const bob("bob"); diff --git a/src/test/app/PermissionedDomains_test.cpp b/src/test/app/PermissionedDomains_test.cpp index 65f75897ae..e33a88fa08 100644 --- a/src/test/app/PermissionedDomains_test.cpp +++ b/src/test/app/PermissionedDomains_test.cpp @@ -285,7 +285,7 @@ class PermissionedDomains_test : public beast::unit_test::suite Env env(*this, withFeature_); env.set_parse_failure_expected(true); - const int accNum = 12; + int const accNum = 12; Account const alice[accNum] = { "alice", "alice2", diff --git a/src/test/app/SHAMapStore_test.cpp b/src/test/app/SHAMapStore_test.cpp index d3b5917434..1e0ec4bcf0 100644 --- a/src/test/app/SHAMapStore_test.cpp +++ b/src/test/app/SHAMapStore_test.cpp @@ -72,21 +72,21 @@ class SHAMapStore_test : public beast::unit_test::suite env.app().getRelationalDatabase().getLedgerInfoByIndex(seq); if (!oinfo) return false; - const LedgerInfo& info = oinfo.value(); + LedgerInfo const& info = oinfo.value(); - const std::string outHash = to_string(info.hash); - const LedgerIndex outSeq = info.seq; - const std::string outParentHash = to_string(info.parentHash); - const std::string outDrops = to_string(info.drops); - const std::uint64_t outCloseTime = + std::string const outHash = to_string(info.hash); + LedgerIndex const outSeq = info.seq; + std::string const outParentHash = to_string(info.parentHash); + std::string const outDrops = to_string(info.drops); + std::uint64_t const outCloseTime = info.closeTime.time_since_epoch().count(); - const std::uint64_t outParentCloseTime = + std::uint64_t const outParentCloseTime = info.parentCloseTime.time_since_epoch().count(); - const std::uint64_t outCloseTimeResolution = + std::uint64_t const outCloseTimeResolution = info.closeTimeResolution.count(); - const std::uint64_t outCloseFlags = info.closeFlags; - const std::string outAccountHash = to_string(info.accountHash); - const std::string outTxHash = to_string(info.txHash); + std::uint64_t const outCloseFlags = info.closeFlags; + std::string const outAccountHash = to_string(info.accountHash); + std::string const outTxHash = to_string(info.txHash); auto const& ledger = json[jss::result][jss::ledger]; return outHash == ledger[jss::ledger_hash].asString() && @@ -124,7 +124,7 @@ class SHAMapStore_test : public beast::unit_test::suite void ledgerCheck(jtx::Env& env, int const rows, int const first) { - const auto [actualRows, actualFirst, actualLast] = + auto const [actualRows, actualFirst, actualLast] = dynamic_cast(&env.app().getRelationalDatabase()) ->getLedgerCountMinMax(); diff --git a/src/test/app/SetTrust_test.cpp b/src/test/app/SetTrust_test.cpp index c99a1dafa1..9b4048bf9c 100644 --- a/src/test/app/SetTrust_test.cpp +++ b/src/test/app/SetTrust_test.cpp @@ -469,7 +469,7 @@ public: auto& tx1 = createQuality ? txWithQuality : txWithoutQuality; auto& tx2 = createQuality ? txWithoutQuality : txWithQuality; - auto check_quality = [&](const bool exists) { + auto check_quality = [&](bool const exists) { Json::Value jv; jv["account"] = toAcct.human(); auto const lines = env.rpc("json", "account_lines", to_string(jv)); diff --git a/src/test/app/TxQ_test.cpp b/src/test/app/TxQ_test.cpp index 2ce05095fc..7b69cee1ce 100644 --- a/src/test/app/TxQ_test.cpp +++ b/src/test/app/TxQ_test.cpp @@ -177,7 +177,7 @@ class TxQPosNegFlows_test : public beast::unit_test::suite auto calcMedFeeLevel(FeeLevel64 const feeLevel1, FeeLevel64 const feeLevel2) { - const FeeLevel64 expectedMedFeeLevel = + FeeLevel64 const expectedMedFeeLevel = (feeLevel1 + feeLevel2 + FeeLevel64{1}) / 2; return std::max(expectedMedFeeLevel, minEscalationFeeLevel).fee(); @@ -382,7 +382,7 @@ public: ////////////////////////////////////////////////////////////// constexpr auto largeFeeMultiplier = 700; - const auto largeFee = baseFee * largeFeeMultiplier; + auto const largeFee = baseFee * largeFeeMultiplier; // Stuff the ledger and queue so we can verify that // stuff gets kicked out. @@ -878,7 +878,7 @@ public: env(noop(alice), json(R"({"LastLedgerSequence":8})"), queued); constexpr auto largeFeeMultiplier = 700; - const auto largeFee = baseFee * largeFeeMultiplier; + auto const largeFee = baseFee * largeFeeMultiplier; // Queue items with higher fees to force the previous // txn to wait. @@ -928,7 +928,7 @@ public: BEAST_EXPECT(env.seq(alice) == 3); constexpr auto anotherLargeFeeMultiplier = 800; - const auto anotherLargeFee = baseFee * anotherLargeFeeMultiplier; + auto const anotherLargeFee = baseFee * anotherLargeFeeMultiplier; // Keep alice's transaction waiting. // clang-format off env(noop(bob), fee(anotherLargeFee), queued); diff --git a/src/test/app/ValidatorKeys_test.cpp b/src/test/app/ValidatorKeys_test.cpp index 7c8584698e..427ada132c 100644 --- a/src/test/app/ValidatorKeys_test.cpp +++ b/src/test/app/ValidatorKeys_test.cpp @@ -35,13 +35,13 @@ namespace test { class ValidatorKeys_test : public beast::unit_test::suite { // Used with [validation_seed] - const std::string seed = "shUwVw52ofnCUX5m7kPTKzJdr4HEH"; + std::string const seed = "shUwVw52ofnCUX5m7kPTKzJdr4HEH"; // Used with [validation_token] - const std::string tokenSecretStr = + std::string const tokenSecretStr = "paQmjZ37pKKPMrgadBLsuf9ab7Y7EUNzh27LQrZqoexpAs31nJi"; - const std::vector tokenBlob = { + std::vector const tokenBlob = { " " "eyJ2YWxpZGF0aW9uX3NlY3JldF9rZXkiOiI5ZWQ0NWY4NjYyNDFjYzE4YTI3NDdiNT\n", " \tQzODdjMDYyNTkwNzk3MmY0ZTcxOTAyMzFmYWE5Mzc0NTdmYTlkYWY2IiwibWFuaWZl " @@ -56,7 +56,7 @@ class ValidatorKeys_test : public beast::unit_test::suite "NmluOEhBU1FLUHVnQkQ2N2tNYVJGR3ZtcEFUSGxHS0pkdkRGbFdQWXk1QXFEZWRGdj\n", "VUSmEydzBpMjFlcTNNWXl3TFZKWm5GT3I3QzBrdzJBaVR6U0NqSXpkaXRROD0ifQ==\n"}; - const std::string tokenManifest = + std::string const tokenManifest = "JAAAAAFxIe1FtwmimvGtH2iCcMJqC9gVFKilGfw1/vCxHXXLplc2GnMhAkE1agqXxBwD" "wDbID6OMSYuM0FDAlpAgNk8SKFn7MO2fdkcwRQIhAOngu9sAKqXYouJ+l2V0W+sAOkVB" "+ZRS6PShlJAfUsXfAiBsVJGesaadOJc/aAZokS1vymGmVrlHPKWX3Yywu6in8HASQKPu" @@ -64,7 +64,7 @@ class ValidatorKeys_test : public beast::unit_test::suite "2AiTzSCjIzditQ8="; // Manifest does not match private key - const std::vector invalidTokenBlob = { + std::vector const invalidTokenBlob = { "eyJtYW5pZmVzdCI6IkpBQUFBQVZ4SWUyOVVBdzViZFJudHJ1elVkREk4aDNGV1JWZl\n", "k3SXVIaUlKQUhJd3MxdzZzM01oQWtsa1VXQWR2RnFRVGRlSEpvS1pNY0hlS0RzOExo\n", "b3d3bDlHOEdkVGNJbmFka1l3UkFJZ0h2Q01lQU1aSzlqQnV2aFhlaFRLRzVDQ3BBR1\n", diff --git a/src/test/app/ValidatorList_test.cpp b/src/test/app/ValidatorList_test.cpp index 819340bfa4..a3b62bd4f7 100644 --- a/src/test/app/ValidatorList_test.cpp +++ b/src/test/app/ValidatorList_test.cpp @@ -551,7 +551,7 @@ private: auto const version, std::vector> const& expected) { - const auto available = trustedKeys->getAvailable(hexPublic); + auto const available = trustedKeys->getAvailable(hexPublic); BEAST_EXPECT(!version || available); if (available) @@ -621,7 +621,7 @@ private: auto const publisherSecret = randomSecretKey(); auto const publisherPublic = derivePublicKey(KeyType::ed25519, publisherSecret); - const auto hexPublic = + auto const hexPublic = strHex(publisherPublic.begin(), publisherPublic.end()); auto const pubSigningKeys1 = randomKeyPair(KeyType::secp256k1); auto const manifest1 = base64_encode(makeManifestString( @@ -1005,7 +1005,7 @@ private: auto const publisherSecret = randomSecretKey(); auto const publisherPublic = derivePublicKey(KeyType::ed25519, publisherSecret); - const auto hexPublic = + auto const hexPublic = strHex(publisherPublic.begin(), publisherPublic.end()); auto const pubSigningKeys1 = randomKeyPair(KeyType::secp256k1); auto const manifest = base64_encode(makeManifestString( @@ -1057,7 +1057,7 @@ private: // unknown public key auto const badSecret = randomSecretKey(); auto const badPublic = derivePublicKey(KeyType::ed25519, badSecret); - const auto hexBad = strHex(badPublic.begin(), badPublic.end()); + auto const hexBad = strHex(badPublic.begin(), badPublic.end()); auto const available = trustedKeys->getAvailable(hexBad, 1); BEAST_EXPECT(!available); diff --git a/src/test/app/ValidatorSite_test.cpp b/src/test/app/ValidatorSite_test.cpp index f7db0463c1..7a7511e6f0 100644 --- a/src/test/app/ValidatorSite_test.cpp +++ b/src/test/app/ValidatorSite_test.cpp @@ -39,7 +39,7 @@ namespace ripple { namespace test { namespace detail { -constexpr const char* +constexpr char const* realValidatorContents() { return R"vl({ diff --git a/src/test/app/XChain_test.cpp b/src/test/app/XChain_test.cpp index 32a37f5e27..85cd636b3d 100644 --- a/src/test/app/XChain_test.cpp +++ b/src/test/app/XChain_test.cpp @@ -316,7 +316,7 @@ struct BalanceTransfer return std::all_of( reward_accounts.begin(), reward_accounts.end(), - [&](const balance& b) { return b.diff() == reward; }); + [&](balance const& b) { return b.diff() == reward; }); } bool @@ -4582,8 +4582,8 @@ private: { public: SmBase( - const std::shared_ptr& chainstate, - const BridgeDef& bridge) + std::shared_ptr const& chainstate, + BridgeDef const& bridge) : bridge_(bridge), st_(chainstate) { } @@ -4613,7 +4613,7 @@ private: } protected: - const BridgeDef& bridge_; + BridgeDef const& bridge_; std::shared_ptr st_; }; @@ -4624,8 +4624,8 @@ private: using Base = SmBase; SmCreateAccount( - const std::shared_ptr& chainstate, - const BridgeDef& bridge, + std::shared_ptr const& chainstate, + BridgeDef const& bridge, AccountCreate create) : Base(chainstate, bridge) , sm_state(st_initial) @@ -4756,8 +4756,8 @@ private: using Base = SmBase; SmTransfer( - const std::shared_ptr& chainstate, - const BridgeDef& bridge, + std::shared_ptr const& chainstate, + BridgeDef const& bridge, Transfer xfer) : Base(chainstate, bridge) , xfer(std::move(xfer)) @@ -4926,7 +4926,7 @@ private: void xfer( uint64_t time, - const std::shared_ptr& chainstate, + std::shared_ptr const& chainstate, BridgeDef const& bridge, Transfer transfer) { @@ -4936,7 +4936,7 @@ private: void ac(uint64_t time, - const std::shared_ptr& chainstate, + std::shared_ptr const& chainstate, BridgeDef const& bridge, AccountCreate ac) { diff --git a/src/test/app/tx/apply_test.cpp b/src/test/app/tx/apply_test.cpp index 63fecde65f..44a2c10b4e 100644 --- a/src/test/app/tx/apply_test.cpp +++ b/src/test/app/tx/apply_test.cpp @@ -40,7 +40,7 @@ public: testFullyCanonicalSigs() { // Construct a payments w/out a fully-canonical tx - const std::string non_fully_canonical_tx = + std::string const non_fully_canonical_tx = "12000022000000002400000001201B00497D9C6140000000000F6950684000000" "00000000C732103767C7B2C13AD90050A4263745E4BAB2B975417FA22E87780E1" "506DDAF21139BE74483046022100E95670988A34C4DB0FA73A8BFD6383872AF43" diff --git a/src/test/basics/FileUtilities_test.cpp b/src/test/basics/FileUtilities_test.cpp index b78173f35f..4b4cbe70c8 100644 --- a/src/test/basics/FileUtilities_test.cpp +++ b/src/test/basics/FileUtilities_test.cpp @@ -34,7 +34,7 @@ public: using namespace ripple::test::detail; using namespace boost::system; - constexpr const char* expectedContents = + constexpr char const* expectedContents = "This file is very short. That's all we need."; FileDirGuard file( diff --git a/src/test/basics/PerfLog_test.cpp b/src/test/basics/PerfLog_test.cpp index 7ec62a1701..05678e699d 100644 --- a/src/test/basics/PerfLog_test.cpp +++ b/src/test/basics/PerfLog_test.cpp @@ -776,7 +776,7 @@ public: // Verify values in jss::total are what we expect. Json::Value const& total{jobQueue[jss::total]}; - const int finished = jobs.size() * 2; + int const finished = jobs.size() * 2; BEAST_EXPECT(jsonToUint64(total[jss::queued]) == jobs.size()); BEAST_EXPECT(jsonToUint64(total[jss::started]) == finished); BEAST_EXPECT(jsonToUint64(total[jss::finished]) == finished); diff --git a/src/test/basics/mulDiv_test.cpp b/src/test/basics/mulDiv_test.cpp index 47332fd45b..61521577d9 100644 --- a/src/test/basics/mulDiv_test.cpp +++ b/src/test/basics/mulDiv_test.cpp @@ -28,8 +28,8 @@ struct mulDiv_test : beast::unit_test::suite void run() override { - const auto max = std::numeric_limits::max(); - const std::uint64_t max32 = std::numeric_limits::max(); + auto const max = std::numeric_limits::max(); + std::uint64_t const max32 = std::numeric_limits::max(); auto result = mulDiv(85, 20, 5); BEAST_EXPECT(result && *result == 340); diff --git a/src/test/consensus/ByzantineFailureSim_test.cpp b/src/test/consensus/ByzantineFailureSim_test.cpp index b979943477..887a060a5b 100644 --- a/src/test/consensus/ByzantineFailureSim_test.cpp +++ b/src/test/consensus/ByzantineFailureSim_test.cpp @@ -68,8 +68,8 @@ class ByzantineFailureSim_test : public beast::unit_test::suite for (TrustGraph::ForkInfo const& fi : sim.trustGraph.forkablePairs(0.8)) { - std::cout << "Can fork " << PeerGroup{fi.unlA} << " " << " " - << PeerGroup{fi.unlB} << " overlap " << fi.overlap + std::cout << "Can fork " << PeerGroup{fi.unlA} << " " + << " " << PeerGroup{fi.unlB} << " overlap " << fi.overlap << " required " << fi.required << "\n"; }; diff --git a/src/test/core/SociDB_test.cpp b/src/test/core/SociDB_test.cpp index 8f2a3c6646..9a3666f072 100644 --- a/src/test/core/SociDB_test.cpp +++ b/src/test/core/SociDB_test.cpp @@ -320,7 +320,7 @@ public: { soci::session s; sc.open(s); - const char* dbInit[] = { + char const* dbInit[] = { "BEGIN TRANSACTION;", "CREATE TABLE Ledgers ( \ LedgerHash CHARACTER(64) PRIMARY KEY, \ diff --git a/src/test/csf/Peer.h b/src/test/csf/Peer.h index 678924112e..1cb2d03cc6 100644 --- a/src/test/csf/Peer.h +++ b/src/test/csf/Peer.h @@ -553,11 +553,11 @@ struct Peer ConsensusCloseTimes const& rawCloseTimes, ConsensusMode const& mode, Json::Value&& consensusJson, - const bool validating) + bool const validating) { schedule(delays.ledgerAccept, [=, this]() { - const bool proposing = mode == ConsensusMode::proposing; - const bool consensusFail = result.state == ConsensusState::MovedOn; + bool const proposing = mode == ConsensusMode::proposing; + bool const consensusFail = result.state == ConsensusState::MovedOn; TxSet const acceptedTxs = injectTxs(prevLedger, result.txns); Ledger const newLedger = oracle.accept( diff --git a/src/test/csf/Tx.h b/src/test/csf/Tx.h index 066aee2268..7f37d60d70 100644 --- a/src/test/csf/Tx.h +++ b/src/test/csf/Tx.h @@ -188,7 +188,7 @@ private: // Helper functions for debug printing inline std::ostream& -operator<<(std::ostream& o, const Tx& t) +operator<<(std::ostream& o, Tx const& t) { return o << t.id(); } diff --git a/src/test/csf/collectors.h b/src/test/csf/collectors.h index 0494178ae9..7b91863cbd 100644 --- a/src/test/csf/collectors.h +++ b/src/test/csf/collectors.h @@ -720,4 +720,4 @@ struct JumpCollector } // namespace test } // namespace ripple -#endif +#endif \ No newline at end of file diff --git a/src/test/csf/ledgers.h b/src/test/csf/ledgers.h index a02adb9c87..45e255ffd5 100644 --- a/src/test/csf/ledgers.h +++ b/src/test/csf/ledgers.h @@ -151,7 +151,7 @@ private: }; // Single common genesis instance - static const Instance genesis; + static Instance const genesis; Ledger(ID id, Instance const* i) : id_{id}, instance_{i} { diff --git a/src/test/jtx/Env.h b/src/test/jtx/Env.h index 399b176677..ef26ebf2ee 100644 --- a/src/test/jtx/Env.h +++ b/src/test/jtx/Env.h @@ -72,7 +72,7 @@ noripple(Account const& account, Args const&... args) inline FeatureBitset supported_amendments() { - static const FeatureBitset ids = [] { + static FeatureBitset const ids = [] { auto const& sa = ripple::detail::supportedAmendments(); std::vector feats; feats.reserve(sa.size()); diff --git a/src/test/jtx/TrustedPublisherServer.h b/src/test/jtx/TrustedPublisherServer.h index 6138673484..54538032f5 100644 --- a/src/test/jtx/TrustedPublisherServer.h +++ b/src/test/jtx/TrustedPublisherServer.h @@ -220,8 +220,9 @@ public: getList_ = [blob = blob, sig, manifest, version](int interval) { // Build the contents of a version 1 format UNL file std::stringstream l; - l << "{\"blob\":\"" << blob << "\"" << ",\"signature\":\"" << sig - << "\"" << ",\"manifest\":\"" << manifest << "\"" + l << "{\"blob\":\"" << blob << "\"" + << ",\"signature\":\"" << sig << "\"" + << ",\"manifest\":\"" << manifest << "\"" << ",\"refresh_interval\": " << interval << ",\"version\":" << version << '}'; return l.str(); @@ -256,14 +257,15 @@ public: std::stringstream l; for (auto const& info : blobInfo) { - l << "{\"blob\":\"" << info.blob << "\"" << ",\"signature\":\"" - << info.signature << "\"},"; + l << "{\"blob\":\"" << info.blob << "\"" + << ",\"signature\":\"" << info.signature << "\"},"; } std::string blobs = l.str(); blobs.pop_back(); l.str(std::string()); l << "{\"blobs_v2\": [ " << blobs << "],\"manifest\":\"" << manifest - << "\"" << ",\"refresh_interval\": " << interval + << "\"" + << ",\"refresh_interval\": " << interval << ",\"version\":" << (version + 1) << '}'; return l.str(); }; diff --git a/src/test/jtx/deposit.h b/src/test/jtx/deposit.h index 9bd73d383d..a09979b7ac 100644 --- a/src/test/jtx/deposit.h +++ b/src/test/jtx/deposit.h @@ -44,7 +44,7 @@ struct AuthorizeCredentials std::string credType; auto - operator<=>(const AuthorizeCredentials&) const = default; + operator<=>(AuthorizeCredentials const&) const = default; Json::Value toJson() const diff --git a/src/test/jtx/envconfig.h b/src/test/jtx/envconfig.h index bb1716fffb..f22c5743e7 100644 --- a/src/test/jtx/envconfig.h +++ b/src/test/jtx/envconfig.h @@ -32,7 +32,7 @@ namespace test { extern std::atomic envUseIPv4; -inline const char* +inline char const* getEnvLocalhostAddr() { return envUseIPv4 ? "127.0.0.1" : "::1"; diff --git a/src/test/jtx/impl/amount.cpp b/src/test/jtx/impl/amount.cpp index 9134122da9..a1dbd25652 100644 --- a/src/test/jtx/impl/amount.cpp +++ b/src/test/jtx/impl/amount.cpp @@ -54,7 +54,7 @@ PrettyAmount::operator AnyAmount() const template static std::string -to_places(const T d, std::uint8_t places) +to_places(T const d, std::uint8_t places) { assert(places <= std::numeric_limits::digits10); diff --git a/src/test/jtx/impl/mpt.cpp b/src/test/jtx/impl/mpt.cpp index 51490ad21e..c8ff167221 100644 --- a/src/test/jtx/impl/mpt.cpp +++ b/src/test/jtx/impl/mpt.cpp @@ -83,7 +83,7 @@ MPTTester::MPTTester(Env& env, Account const& issuer, MPTInit const& arg) } void -MPTTester::create(const MPTCreate& arg) +MPTTester::create(MPTCreate const& arg) { if (id_) Throw("MPT can't be reused"); @@ -413,7 +413,7 @@ MPTTester::getFlags(std::optional const& holder) const } MPT -MPTTester::operator[](const std::string& name) +MPTTester::operator[](std::string const& name) { return MPT(name, issuanceID()); } diff --git a/src/test/jtx/xchain_bridge.h b/src/test/jtx/xchain_bridge.h index 8ff19bd508..1b3841358f 100644 --- a/src/test/jtx/xchain_bridge.h +++ b/src/test/jtx/xchain_bridge.h @@ -196,12 +196,12 @@ struct XChainBridgeObjects STAmount const split_reward_quorum; // 250,000 drops STAmount const split_reward_everyone; // 200,000 drops - const STAmount tiny_reward; // 37 drops - const STAmount tiny_reward_split; // 9 drops - const STAmount tiny_reward_remainder; // 1 drops + STAmount const tiny_reward; // 37 drops + STAmount const tiny_reward_split; // 9 drops + STAmount const tiny_reward_remainder; // 1 drops - const STAmount one_xrp; - const STAmount xrp_dust; + STAmount const one_xrp; + STAmount const xrp_dust; static constexpr int drop_per_xrp = 1000000; diff --git a/src/test/overlay/reduce_relay_test.cpp b/src/test/overlay/reduce_relay_test.cpp index 9fc105262b..18aebbe194 100644 --- a/src/test/overlay/reduce_relay_test.cpp +++ b/src/test/overlay/reduce_relay_test.cpp @@ -179,11 +179,11 @@ public: { } void - addTxQueue(const uint256&) override + addTxQueue(uint256 const&) override { } void - removeTxQueue(const uint256&) override + removeTxQueue(uint256 const&) override { } }; @@ -196,7 +196,7 @@ public: typedef std::milli period; typedef std::chrono::duration duration; typedef std::chrono::time_point time_point; - inline static const bool is_steady = false; + inline static bool const is_steady = false; static void advance(duration d) noexcept @@ -890,11 +890,12 @@ class reduce_relay_test : public beast::unit_test::suite protected: void - printPeers(const std::string& msg, std::uint16_t validator = 0) + printPeers(std::string const& msg, std::uint16_t validator = 0) { auto peers = network_.overlay().getPeers(network_.validator(validator)); - std::cout << msg << " " << "num peers " - << (int)network_.overlay().getNumPeers() << std::endl; + std::cout << msg << " " + << "num peers " << (int)network_.overlay().getNumPeers() + << std::endl; for (auto& [k, v] : peers) std::cout << k << ":" << (int)std::get(v) << " "; @@ -1125,7 +1126,7 @@ protected: } void - doTest(const std::string& msg, bool log, std::function f) + doTest(std::string const& msg, bool log, std::function f) { testcase(msg); f(log); diff --git a/src/test/overlay/tx_reduce_relay_test.cpp b/src/test/overlay/tx_reduce_relay_test.cpp index 07ff2bb14a..7a6b36ecd2 100644 --- a/src/test/overlay/tx_reduce_relay_test.cpp +++ b/src/test/overlay/tx_reduce_relay_test.cpp @@ -41,7 +41,7 @@ public: private: void - doTest(const std::string& msg, bool log, std::function f) + doTest(std::string const& msg, bool log, std::function f) { testcase(msg); f(log); @@ -131,7 +131,7 @@ private: sendTx_++; } void - addTxQueue(const uint256& hash) override + addTxQueue(uint256 const& hash) override { queueTx_++; } diff --git a/src/test/protocol/MultiApiJson_test.cpp b/src/test/protocol/MultiApiJson_test.cpp index a5c37d257c..9105607ba4 100644 --- a/src/test/protocol/MultiApiJson_test.cpp +++ b/src/test/protocol/MultiApiJson_test.cpp @@ -45,7 +45,7 @@ Overload(Ts...) -> Overload; struct MultiApiJson_test : beast::unit_test::suite { static auto - makeJson(const char* key, int val) + makeJson(char const* key, int val) { Json::Value obj1(Json::objectValue); obj1[key] = val; @@ -80,7 +80,7 @@ struct MultiApiJson_test : beast::unit_test::suite testcase("forApiVersions, forAllApiVersions"); // Some static data for test inputs - static const int primes[] = {2, 3, 5, 7, 11, 13, 17, 19, 23, + static int const primes[] = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97}; static_assert(std::size(primes) > RPC::apiMaximumValidVersion); @@ -205,7 +205,7 @@ struct MultiApiJson_test : beast::unit_test::suite return !requires { forAllApiVersions( std::forward(v).visit(), // - [](auto, auto, const char*) {}, + [](auto, auto, char const*) {}, 1); // parameter type mismatch }; }(std::as_const(s1))); @@ -256,7 +256,7 @@ struct MultiApiJson_test : beast::unit_test::suite Json::Value const&, std::integral_constant, int, - const char*) {}, + char const*) {}, 0, ""); }; diff --git a/src/test/protocol/STAccount_test.cpp b/src/test/protocol/STAccount_test.cpp index 034e0e9e08..9476a47c5e 100644 --- a/src/test/protocol/STAccount_test.cpp +++ b/src/test/protocol/STAccount_test.cpp @@ -91,7 +91,7 @@ struct STAccount_test : public beast::unit_test::suite { // Construct from a VL that is not exactly 160 bits. Serializer s; - const std::uint8_t bits128[]{ + std::uint8_t const bits128[]{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; s.addVL(bits128, sizeof(bits128)); SerialIter sit(s.slice()); diff --git a/src/test/protocol/STAmount_test.cpp b/src/test/protocol/STAmount_test.cpp index 1836fa8595..712c91000e 100644 --- a/src/test/protocol/STAmount_test.cpp +++ b/src/test/protocol/STAmount_test.cpp @@ -300,7 +300,7 @@ public: unexpected(!to_currency(c, "USD"), "create USD currency"); unexpected(to_string(c) != "USD", "check USD currency"); - const std::string cur = "015841551A748AD2C1F76FF6ECB0CCCD00000000"; + std::string const cur = "015841551A748AD2C1F76FF6ECB0CCCD00000000"; unexpected(!to_currency(c, cur), "create custom currency"); unexpected(to_string(c) != cur, "check custom currency"); } diff --git a/src/test/protocol/TER_test.cpp b/src/test/protocol/TER_test.cpp index a43fd8758a..0107f1c7d2 100644 --- a/src/test/protocol/TER_test.cpp +++ b/src/test/protocol/TER_test.cpp @@ -149,7 +149,7 @@ struct TER_test : public beast::unit_test::suite terRETRY, tesSUCCESS, tecCLAIM); - static const int hiIndex{ + static int const hiIndex{ std::tuple_size::value - 1}; // Verify that enums cannot be converted to other enum types. @@ -277,7 +277,7 @@ struct TER_test : public beast::unit_test::suite tecCLAIM, NotTEC{telLOCAL_ERROR}, TER{tecCLAIM}); - static const int hiIndex{std::tuple_size::value - 1}; + static int const hiIndex{std::tuple_size::value - 1}; // Verify that all types in the ters tuple can be compared with all // the other types in ters. diff --git a/src/test/rpc/DepositAuthorized_test.cpp b/src/test/rpc/DepositAuthorized_test.cpp index 8162528ec2..647f9e25ed 100644 --- a/src/test/rpc/DepositAuthorized_test.cpp +++ b/src/test/rpc/DepositAuthorized_test.cpp @@ -339,7 +339,7 @@ public: using namespace jtx; - const char credType[] = "abcde"; + char const credType[] = "abcde"; Account const alice{"alice"}; Account const becky{"becky"}; @@ -477,7 +477,7 @@ public: } { - static const std::vector credIds = { + static std::vector const credIds = { "18004829F915654A81B11C4AB8218D96FED67F209B58328A72314FB6EA288B" "E4", "28004829F915654A81B11C4AB8218D96FED67F209B58328A72314FB6EA288B" @@ -571,7 +571,7 @@ public: testcase("deposit_authorized with expired credentials"); // check expired credentials - const char credType2[] = "fghijk"; + char const credType2[] = "fghijk"; std::uint32_t const x = env.current() ->info() .parentCloseTime.time_since_epoch() diff --git a/src/test/rpc/Feature_test.cpp b/src/test/rpc/Feature_test.cpp index bc789f9a74..40de395a71 100644 --- a/src/test/rpc/Feature_test.cpp +++ b/src/test/rpc/Feature_test.cpp @@ -519,7 +519,7 @@ class Feature_test : public beast::unit_test::suite using namespace test::jtx; Env env{*this, FeatureBitset(featureMultiSignReserve)}; - constexpr const char* featureName = "MultiSignReserve"; + constexpr char const* featureName = "MultiSignReserve"; auto jrr = env.rpc("feature", featureName)[jss::result]; if (!BEAST_EXPECTS(jrr[jss::status] == jss::success, "status")) @@ -570,7 +570,7 @@ class Feature_test : public beast::unit_test::suite using namespace test::jtx; Env env{*this}; - constexpr const char* featureName = "NonFungibleTokensV1"; + constexpr char const* featureName = "NonFungibleTokensV1"; auto jrr = env.rpc("feature", featureName)[jss::result]; if (!BEAST_EXPECTS(jrr[jss::status] == jss::success, "status")) diff --git a/src/test/rpc/Handler_test.cpp b/src/test/rpc/Handler_test.cpp index 4883cf664f..8eb0c8d01d 100644 --- a/src/test/rpc/Handler_test.cpp +++ b/src/test/rpc/Handler_test.cpp @@ -84,7 +84,7 @@ class Handler_test : public beast::unit_test::suite } } - const double mean_squared = (sum * sum) / (j * j); + double const mean_squared = (sum * sum) / (j * j); return std::make_tuple( clock::duration{static_cast(sum / j)}, clock::duration{ @@ -100,7 +100,7 @@ class Handler_test : public beast::unit_test::suite std::random_device dev; std::ranlux48 prng(dev()); - std::vector names = + std::vector names = test::jtx::make_vector(ripple::RPC::getHandlerNames()); std::uniform_int_distribution distr{0, names.size() - 1}; diff --git a/src/test/rpc/LedgerEntry_test.cpp b/src/test/rpc/LedgerEntry_test.cpp index 465d6c6631..cb6f6d45e2 100644 --- a/src/test/rpc/LedgerEntry_test.cpp +++ b/src/test/rpc/LedgerEntry_test.cpp @@ -253,7 +253,7 @@ class LedgerEntry_test : public beast::unit_test::suite Account const issuer{"issuer"}; Account const alice{"alice"}; Account const bob{"bob"}; - const char credType[] = "abcde"; + char const credType[] = "abcde"; env.fund(XRP(5000), issuer, alice, bob); env.close(); @@ -692,7 +692,7 @@ class LedgerEntry_test : public beast::unit_test::suite Account const issuer{"issuer"}; Account const alice{"alice"}; Account const bob{"bob"}; - const char credType[] = "abcde"; + char const credType[] = "abcde"; env.fund(XRP(5000), issuer, alice, bob); env.close(); @@ -885,7 +885,7 @@ class LedgerEntry_test : public beast::unit_test::suite { // Failed, authorized_credentials is too long - static const std::string_view credTypes[] = { + static std::string_view const credTypes[] = { "cred1", "cred2", "cred3", diff --git a/src/test/rpc/LedgerRPC_test.cpp b/src/test/rpc/LedgerRPC_test.cpp index 4e8d2964ca..5b26f43161 100644 --- a/src/test/rpc/LedgerRPC_test.cpp +++ b/src/test/rpc/LedgerRPC_test.cpp @@ -563,11 +563,11 @@ class LedgerRPC_test : public beast::unit_test::suite env.close(); jrr = env.rpc("json", "ledger", to_string(jv))[jss::result]; - const std::string txid0 = [&]() { + std::string const txid0 = [&]() { auto const& parentHash = env.current()->info().parentHash; if (BEAST_EXPECT(jrr[jss::queue_data].size() == 2)) { - const std::string txid1 = [&]() { + std::string const txid1 = [&]() { auto const& txj = jrr[jss::queue_data][1u]; BEAST_EXPECT(txj[jss::account] == alice.human()); BEAST_EXPECT(txj[jss::fee_level] == "256"); @@ -589,7 +589,7 @@ class LedgerRPC_test : public beast::unit_test::suite auto const& tx = txj[jss::tx]; BEAST_EXPECT(tx[jss::Account] == alice.human()); BEAST_EXPECT(tx[jss::TransactionType] == jss::OfferCreate); - const auto txid0 = tx[jss::hash].asString(); + auto const txid0 = tx[jss::hash].asString(); uint256 tx0, tx1; BEAST_EXPECT(tx0.parseHex(txid0)); BEAST_EXPECT(tx1.parseHex(txid1)); @@ -665,7 +665,7 @@ class LedgerRPC_test : public beast::unit_test::suite jv[jss::binary] = false; jrr = env.rpc("json", "ledger", to_string(jv))[jss::result]; - const std::string txid2 = [&]() { + std::string const txid2 = [&]() { if (BEAST_EXPECT(jrr[jss::queue_data].size() == 1)) { auto const& txj = jrr[jss::queue_data][0u]; diff --git a/src/test/rpc/RPCCall_test.cpp b/src/test/rpc/RPCCall_test.cpp index 8438ef533d..be0f32b5ce 100644 --- a/src/test/rpc/RPCCall_test.cpp +++ b/src/test/rpc/RPCCall_test.cpp @@ -5840,7 +5840,7 @@ static RPCCallTestData const rpcCallTestArray[] = { }; std::string -updateAPIVersionString(const char* const req, unsigned apiVersion) +updateAPIVersionString(char const* const req, unsigned apiVersion) { std::string const version_str = std::to_string(apiVersion); static auto const place_holder = "%API_VER%"; @@ -5883,7 +5883,7 @@ public: std::vector const args{ rpcCallTest.args.begin(), rpcCallTest.args.end()}; - const char* const expVersioned = + char const* const expVersioned = (apiVersion - RPC::apiMinimumSupportedVersion) < rpcCallTest.exp.size() ? rpcCallTest.exp[apiVersion - RPC::apiMinimumSupportedVersion] diff --git a/src/test/rpc/Transaction_test.cpp b/src/test/rpc/Transaction_test.cpp index 0a5821499a..577f731200 100644 --- a/src/test/rpc/Transaction_test.cpp +++ b/src/test/rpc/Transaction_test.cpp @@ -55,11 +55,11 @@ class Transaction_test : public beast::unit_test::suite using namespace test::jtx; using std::to_string; - const char* COMMAND = jss::tx.c_str(); - const char* BINARY = jss::binary.c_str(); - const char* NOT_FOUND = RPC::get_error_info(rpcTXN_NOT_FOUND).token; - const char* INVALID = RPC::get_error_info(rpcINVALID_LGR_RANGE).token; - const char* EXCESSIVE = + char const* COMMAND = jss::tx.c_str(); + char const* BINARY = jss::binary.c_str(); + char const* NOT_FOUND = RPC::get_error_info(rpcTXN_NOT_FOUND).token; + char const* INVALID = RPC::get_error_info(rpcINVALID_LGR_RANGE).token; + char const* EXCESSIVE = RPC::get_error_info(rpcEXCESSIVE_LGR_RANGE).token; Env env{*this, features}; @@ -135,7 +135,7 @@ class Transaction_test : public beast::unit_test::suite BEAST_EXPECT(!result[jss::result][jss::searched_all].asBool()); } - const auto deletedLedger = (startLegSeq + endLegSeq) / 2; + auto const deletedLedger = (startLegSeq + endLegSeq) / 2; { // Remove one of the ledgers from the database directly dynamic_cast(&env.app().getRelationalDatabase()) @@ -305,11 +305,11 @@ class Transaction_test : public beast::unit_test::suite using namespace test::jtx; using std::to_string; - const char* COMMAND = jss::tx.c_str(); - const char* BINARY = jss::binary.c_str(); - const char* NOT_FOUND = RPC::get_error_info(rpcTXN_NOT_FOUND).token; - const char* INVALID = RPC::get_error_info(rpcINVALID_LGR_RANGE).token; - const char* EXCESSIVE = + char const* COMMAND = jss::tx.c_str(); + char const* BINARY = jss::binary.c_str(); + char const* NOT_FOUND = RPC::get_error_info(rpcTXN_NOT_FOUND).token; + char const* INVALID = RPC::get_error_info(rpcINVALID_LGR_RANGE).token; + char const* EXCESSIVE = RPC::get_error_info(rpcEXCESSIVE_LGR_RANGE).token; Env env{*this, makeNetworkConfig(11111)}; @@ -393,7 +393,7 @@ class Transaction_test : public beast::unit_test::suite BEAST_EXPECT(!result[jss::result][jss::searched_all].asBool()); } - const auto deletedLedger = (startLegSeq + endLegSeq) / 2; + auto const deletedLedger = (startLegSeq + endLegSeq) / 2; { // Remove one of the ledgers from the database directly dynamic_cast(&env.app().getRelationalDatabase()) diff --git a/src/test/rpc/ValidatorInfo_test.cpp b/src/test/rpc/ValidatorInfo_test.cpp index 4904923e0b..78ff267e57 100644 --- a/src/test/rpc/ValidatorInfo_test.cpp +++ b/src/test/rpc/ValidatorInfo_test.cpp @@ -63,7 +63,7 @@ public: testcase("Lookup"); using namespace jtx; - const std::vector tokenBlob = { + std::vector const tokenBlob = { " " "eyJ2YWxpZGF0aW9uX3NlY3JldF9rZXkiOiI5ZWQ0NWY4NjYyNDFjYzE4YTI3NDdiNT" "\n", diff --git a/src/test/unit_test/multi_runner.h b/src/test/unit_test/multi_runner.h index 653bbead06..08512d1882 100644 --- a/src/test/unit_test/multi_runner.h +++ b/src/test/unit_test/multi_runner.h @@ -152,10 +152,10 @@ class multi_runner_base print_results(S& s); }; - static constexpr const char* shared_mem_name_ = "RippledUnitTestSharedMem"; + static constexpr char const* shared_mem_name_ = "RippledUnitTestSharedMem"; // name of the message queue a multi_runner_child will use to communicate // with multi_runner_parent - static constexpr const char* message_queue_name_ = + static constexpr char const* message_queue_name_ = "RippledUnitTestMessageQueue"; // `inner_` will be created in shared memory diff --git a/src/xrpld/app/consensus/RCLConsensus.cpp b/src/xrpld/app/consensus/RCLConsensus.cpp index 0a5cee4121..292ba7d483 100644 --- a/src/xrpld/app/consensus/RCLConsensus.cpp +++ b/src/xrpld/app/consensus/RCLConsensus.cpp @@ -319,8 +319,8 @@ RCLConsensus::Adaptor::onClose( NetClock::time_point const& closeTime, ConsensusMode mode) -> Result { - const bool wrongLCL = mode == ConsensusMode::wrongLedger; - const bool proposing = mode == ConsensusMode::proposing; + bool const wrongLCL = mode == ConsensusMode::wrongLedger; + bool const proposing = mode == ConsensusMode::proposing; notify(protocol::neCLOSING_LEDGER, ledger, !wrongLCL); @@ -437,7 +437,7 @@ RCLConsensus::Adaptor::onAccept( ConsensusCloseTimes const& rawCloseTimes, ConsensusMode const& mode, Json::Value&& consensusJson, - const bool validating) + bool const validating) { app_.getJobQueue().addJob( jtACCEPT, @@ -474,9 +474,9 @@ RCLConsensus::Adaptor::doAccept( bool closeTimeCorrect; - const bool proposing = mode == ConsensusMode::proposing; - const bool haveCorrectLCL = mode != ConsensusMode::wrongLedger; - const bool consensusFail = result.state == ConsensusState::MovedOn; + bool const proposing = mode == ConsensusMode::proposing; + bool const haveCorrectLCL = mode != ConsensusMode::wrongLedger; + bool const consensusFail = result.state == ConsensusState::MovedOn; auto consensusCloseTime = result.position.closeTime(); @@ -1020,7 +1020,7 @@ RCLConsensus::Adaptor::preStartRound( } } - const bool synced = app_.getOPs().getOperatingMode() == OperatingMode::FULL; + bool const synced = app_.getOPs().getOperatingMode() == OperatingMode::FULL; if (validating_) { @@ -1105,8 +1105,8 @@ RCLConsensus::startRound( } RclConsensusLogger::RclConsensusLogger( - const char* label, - const bool validating, + char const* label, + bool const validating, beast::Journal j) : j_(j) { diff --git a/src/xrpld/app/consensus/RCLConsensus.h b/src/xrpld/app/consensus/RCLConsensus.h index 735c67fd01..38481d2363 100644 --- a/src/xrpld/app/consensus/RCLConsensus.h +++ b/src/xrpld/app/consensus/RCLConsensus.h @@ -328,7 +328,7 @@ class RCLConsensus ConsensusCloseTimes const& rawCloseTimes, ConsensusMode const& mode, Json::Value&& consensusJson, - const bool validating); + bool const validating); /** Process the accepted ledger that was a result of simulation/force accept. @@ -556,7 +556,7 @@ class RclConsensusLogger public: explicit RclConsensusLogger( - const char* label, + char const* label, bool validating, beast::Journal j); ~RclConsensusLogger(); diff --git a/src/xrpld/app/ledger/BuildLedger.h b/src/xrpld/app/ledger/BuildLedger.h index 0805db6c8d..2ec571773c 100644 --- a/src/xrpld/app/ledger/BuildLedger.h +++ b/src/xrpld/app/ledger/BuildLedger.h @@ -53,7 +53,7 @@ std::shared_ptr buildLedger( std::shared_ptr const& parent, NetClock::time_point closeTime, - const bool closeTimeCorrect, + bool const closeTimeCorrect, NetClock::duration closeResolution, Application& app, CanonicalTXSet& txns, diff --git a/src/xrpld/app/ledger/Ledger.cpp b/src/xrpld/app/ledger/Ledger.cpp index c4965cded2..3cdf0ab1a7 100644 --- a/src/xrpld/app/ledger/Ledger.cpp +++ b/src/xrpld/app/ledger/Ledger.cpp @@ -1102,7 +1102,7 @@ finishLoadByIndexOrHash( std::tuple, std::uint32_t, uint256> getLatestLedger(Application& app) { - const std::optional info = + std::optional const info = app.getRelationalDatabase().getNewestLedgerInfo(); if (!info) return {std::shared_ptr(), {}, {}}; diff --git a/src/xrpld/app/ledger/LedgerHistory.cpp b/src/xrpld/app/ledger/LedgerHistory.cpp index bf866abf3f..ccec209bd4 100644 --- a/src/xrpld/app/ledger/LedgerHistory.cpp +++ b/src/xrpld/app/ledger/LedgerHistory.cpp @@ -65,7 +65,7 @@ LedgerHistory::insert( std::unique_lock sl(m_ledgers_by_hash.peekMutex()); - const bool alreadyHad = m_ledgers_by_hash.canonicalize_replace_cache( + bool const alreadyHad = m_ledgers_by_hash.canonicalize_replace_cache( ledger->info().hash, ledger); if (validated) mLedgersByIndex[ledger->info().seq] = ledger->info().hash; @@ -225,30 +225,30 @@ log_metadata_difference( { JLOG(j.debug()) << "MISMATCH on TX " << tx << ": Different result and index!"; - JLOG(j.debug()) - << " Built:" << " Result: " << builtMetaData->getResult() - << " Index: " << builtMetaData->getIndex(); - JLOG(j.debug()) - << " Valid:" << " Result: " << validMetaData->getResult() - << " Index: " << validMetaData->getIndex(); + JLOG(j.debug()) << " Built:" + << " Result: " << builtMetaData->getResult() + << " Index: " << builtMetaData->getIndex(); + JLOG(j.debug()) << " Valid:" + << " Result: " << validMetaData->getResult() + << " Index: " << validMetaData->getIndex(); } else if (result_diff) { JLOG(j.debug()) << "MISMATCH on TX " << tx << ": Different result!"; - JLOG(j.debug()) - << " Built:" << " Result: " << builtMetaData->getResult(); - JLOG(j.debug()) - << " Valid:" << " Result: " << validMetaData->getResult(); + JLOG(j.debug()) << " Built:" + << " Result: " << builtMetaData->getResult(); + JLOG(j.debug()) << " Valid:" + << " Result: " << validMetaData->getResult(); } else if (index_diff) { JLOG(j.debug()) << "MISMATCH on TX " << tx << ": Different index!"; - JLOG(j.debug()) - << " Built:" << " Index: " << builtMetaData->getIndex(); - JLOG(j.debug()) - << " Valid:" << " Index: " << validMetaData->getIndex(); + JLOG(j.debug()) << " Built:" + << " Index: " << builtMetaData->getIndex(); + JLOG(j.debug()) << " Valid:" + << " Index: " << validMetaData->getIndex(); } } else @@ -267,12 +267,12 @@ log_metadata_difference( JLOG(j.debug()) << "MISMATCH on TX " << tx << ": Different result and nodes!"; JLOG(j.debug()) - << " Built:" << " Result: " << builtMetaData->getResult() - << " Nodes:\n" + << " Built:" + << " Result: " << builtMetaData->getResult() << " Nodes:\n" << builtNodes.getJson(JsonOptions::none); JLOG(j.debug()) - << " Valid:" << " Result: " << validMetaData->getResult() - << " Nodes:\n" + << " Valid:" + << " Result: " << validMetaData->getResult() << " Nodes:\n" << validNodes.getJson(JsonOptions::none); } else if (index_diff) @@ -280,21 +280,23 @@ log_metadata_difference( JLOG(j.debug()) << "MISMATCH on TX " << tx << ": Different index and nodes!"; JLOG(j.debug()) - << " Built:" << " Index: " << builtMetaData->getIndex() - << " Nodes:\n" + << " Built:" + << " Index: " << builtMetaData->getIndex() << " Nodes:\n" << builtNodes.getJson(JsonOptions::none); JLOG(j.debug()) - << " Valid:" << " Index: " << validMetaData->getIndex() - << " Nodes:\n" + << " Valid:" + << " Index: " << validMetaData->getIndex() << " Nodes:\n" << validNodes.getJson(JsonOptions::none); } else // nodes_diff { JLOG(j.debug()) << "MISMATCH on TX " << tx << ": Different nodes!"; - JLOG(j.debug()) << " Built:" << " Nodes:\n" + JLOG(j.debug()) << " Built:" + << " Nodes:\n" << builtNodes.getJson(JsonOptions::none); - JLOG(j.debug()) << " Valid:" << " Nodes:\n" + JLOG(j.debug()) << " Valid:" + << " Nodes:\n" << validNodes.getJson(JsonOptions::none); } } @@ -351,10 +353,10 @@ LedgerHistory::handleMismatch( if (!builtLedger || !validLedger) { - JLOG(j_.error()) << "MISMATCH cannot be analyzed:" << " builtLedger: " - << to_string(built) << " -> " << builtLedger - << " validLedger: " << to_string(valid) << " -> " - << validLedger; + JLOG(j_.error()) << "MISMATCH cannot be analyzed:" + << " builtLedger: " << to_string(built) << " -> " + << builtLedger << " validLedger: " << to_string(valid) + << " -> " << validLedger; return; } diff --git a/src/xrpld/app/ledger/LedgerMaster.h b/src/xrpld/app/ledger/LedgerMaster.h index f8d726ec8e..5e0598d78b 100644 --- a/src/xrpld/app/ledger/LedgerMaster.h +++ b/src/xrpld/app/ledger/LedgerMaster.h @@ -312,7 +312,7 @@ private: // Returns true if work started. Always called with m_mutex locked. // The passed lock is a reminder to callers. bool - newPFWork(const char* name, std::unique_lock&); + newPFWork(char const* name, std::unique_lock&); Application& app_; beast::Journal m_journal; diff --git a/src/xrpld/app/ledger/OrderBookDB.cpp b/src/xrpld/app/ledger/OrderBookDB.cpp index 5d3616ce20..b8a7b54008 100644 --- a/src/xrpld/app/ledger/OrderBookDB.cpp +++ b/src/xrpld/app/ledger/OrderBookDB.cpp @@ -252,7 +252,7 @@ OrderBookDB::getBookListeners(Book const& book) void OrderBookDB::processTxn( std::shared_ptr const& ledger, - const AcceptedLedgerTx& alTx, + AcceptedLedgerTx const& alTx, MultiApiJson const& jvObj) { std::lock_guard sl(mLock); diff --git a/src/xrpld/app/ledger/OrderBookDB.h b/src/xrpld/app/ledger/OrderBookDB.h index d120f43aea..bc36f8a301 100644 --- a/src/xrpld/app/ledger/OrderBookDB.h +++ b/src/xrpld/app/ledger/OrderBookDB.h @@ -65,7 +65,7 @@ public: void processTxn( std::shared_ptr const& ledger, - const AcceptedLedgerTx& alTx, + AcceptedLedgerTx const& alTx, MultiApiJson const& jvObj); private: diff --git a/src/xrpld/app/ledger/detail/BuildLedger.cpp b/src/xrpld/app/ledger/detail/BuildLedger.cpp index 3f099cd2ea..954507a006 100644 --- a/src/xrpld/app/ledger/detail/BuildLedger.cpp +++ b/src/xrpld/app/ledger/detail/BuildLedger.cpp @@ -39,7 +39,7 @@ std::shared_ptr buildLedgerImpl( std::shared_ptr const& parent, NetClock::time_point closeTime, - const bool closeTimeCorrect, + bool const closeTimeCorrect, NetClock::duration closeResolution, Application& app, beast::Journal j, @@ -182,7 +182,7 @@ std::shared_ptr buildLedger( std::shared_ptr const& parent, NetClock::time_point closeTime, - const bool closeTimeCorrect, + bool const closeTimeCorrect, NetClock::duration closeResolution, Application& app, CanonicalTXSet& txns, diff --git a/src/xrpld/app/ledger/detail/LedgerMaster.cpp b/src/xrpld/app/ledger/detail/LedgerMaster.cpp index 88f3de5b12..78f0375b16 100644 --- a/src/xrpld/app/ledger/detail/LedgerMaster.cpp +++ b/src/xrpld/app/ledger/detail/LedgerMaster.cpp @@ -1526,7 +1526,7 @@ LedgerMaster::newOrderBookDB() */ bool LedgerMaster::newPFWork( - const char* name, + char const* name, std::unique_lock&) { if (!app_.isStopping() && mPathFindThread < 2 && diff --git a/src/xrpld/app/ledger/detail/LedgerToJson.cpp b/src/xrpld/app/ledger/detail/LedgerToJson.cpp index 5f1e47e8b3..3e4f4b8f0a 100644 --- a/src/xrpld/app/ledger/detail/LedgerToJson.cpp +++ b/src/xrpld/app/ledger/detail/LedgerToJson.cpp @@ -168,7 +168,7 @@ fillJsonTx( if (!fill.ledger.open()) txJson[jss::ledger_hash] = to_string(fill.ledger.info().hash); - const bool validated = + bool const validated = fill.context->ledgerMaster.isValidated(fill.ledger); txJson[jss::validated] = validated; if (validated) diff --git a/src/xrpld/app/ledger/detail/TimeoutCounter.cpp b/src/xrpld/app/ledger/detail/TimeoutCounter.cpp index 0961488691..e81ec6574d 100644 --- a/src/xrpld/app/ledger/detail/TimeoutCounter.cpp +++ b/src/xrpld/app/ledger/detail/TimeoutCounter.cpp @@ -100,8 +100,8 @@ TimeoutCounter::invokeOnTimer() if (!progress_) { ++timeouts_; - JLOG(journal_.debug()) - << "Timeout(" << timeouts_ << ") " << " acquiring " << hash_; + JLOG(journal_.debug()) << "Timeout(" << timeouts_ << ") " + << " acquiring " << hash_; onTimer(false, sl); } else diff --git a/src/xrpld/app/main/Application.cpp b/src/xrpld/app/main/Application.cpp index 6e222858d8..5d495aaf06 100644 --- a/src/xrpld/app/main/Application.cpp +++ b/src/xrpld/app/main/Application.cpp @@ -1137,7 +1137,7 @@ public: return maxDisallowedLedger_; } - virtual const std::optional& + virtual std::optional const& trapTxID() const override { return trapTxID_; diff --git a/src/xrpld/app/main/Application.h b/src/xrpld/app/main/Application.h index 1bc4998aa1..f3cff35d4b 100644 --- a/src/xrpld/app/main/Application.h +++ b/src/xrpld/app/main/Application.h @@ -273,7 +273,7 @@ public: virtual LedgerIndex getMaxDisallowedLedger() = 0; - virtual const std::optional& + virtual std::optional const& trapTxID() const = 0; }; diff --git a/src/xrpld/app/main/GRPCServer.cpp b/src/xrpld/app/main/GRPCServer.cpp index 2ee811dc19..a4bbcda0a5 100644 --- a/src/xrpld/app/main/GRPCServer.cpp +++ b/src/xrpld/app/main/GRPCServer.cpp @@ -447,8 +447,8 @@ GRPCServerImpl::handleRpcs() if (!ok) { - JLOG(journal_.debug()) - << "Request listener cancelled. " << "Destroying object"; + JLOG(journal_.debug()) << "Request listener cancelled. " + << "Destroying object"; erase(ptr); } else diff --git a/src/xrpld/app/main/GRPCServer.h b/src/xrpld/app/main/GRPCServer.h index 2ecbd5e7da..5ed4ba8454 100644 --- a/src/xrpld/app/main/GRPCServer.h +++ b/src/xrpld/app/main/GRPCServer.h @@ -44,10 +44,10 @@ public: Processor() = default; - Processor(const Processor&) = delete; + Processor(Processor const&) = delete; Processor& - operator=(const Processor&) = delete; + operator=(Processor const&) = delete; // process a request that has arrived. Can only be called once per instance virtual void @@ -120,10 +120,10 @@ private: public: explicit GRPCServerImpl(Application& app); - GRPCServerImpl(const GRPCServerImpl&) = delete; + GRPCServerImpl(GRPCServerImpl const&) = delete; GRPCServerImpl& - operator=(const GRPCServerImpl&) = delete; + operator=(GRPCServerImpl const&) = delete; void shutdown(); @@ -214,10 +214,10 @@ private: Resource::Charge loadType, std::vector const& secureGatewayIPs); - CallData(const CallData&) = delete; + CallData(CallData const&) = delete; CallData& - operator=(const CallData&) = delete; + operator=(CallData const&) = delete; virtual void process() override; @@ -304,10 +304,10 @@ public: { } - GRPCServer(const GRPCServer&) = delete; + GRPCServer(GRPCServer const&) = delete; GRPCServer& - operator=(const GRPCServer&) = delete; + operator=(GRPCServer const&) = delete; bool start(); diff --git a/src/xrpld/app/main/Main.cpp b/src/xrpld/app/main/Main.cpp index 2fa0f68df4..e926a38563 100644 --- a/src/xrpld/app/main/Main.cpp +++ b/src/xrpld/app/main/Main.cpp @@ -115,7 +115,7 @@ adjustDescriptorLimit(int needed, beast::Journal j) } void -printHelp(const po::options_description& desc) +printHelp(po::options_description const& desc) { std::cerr << systemName() << "d [options] \n" diff --git a/src/xrpld/app/misc/AMMHelpers.h b/src/xrpld/app/misc/AMMHelpers.h index f27d542e32..97554b7e15 100644 --- a/src/xrpld/app/misc/AMMHelpers.h +++ b/src/xrpld/app/misc/AMMHelpers.h @@ -382,9 +382,9 @@ changeSpotPriceQuality( { JLOG(j.error()) << "changeSpotPriceQuality failed: " << to_string(pool.in) - << " " << to_string(pool.out) << " " << " " << quality - << " " << tfee << " " << to_string(amounts.in) << " " - << to_string(amounts.out); + << " " << to_string(pool.out) << " " + << " " << quality << " " << tfee << " " + << to_string(amounts.in) << " " << to_string(amounts.out); Throw("changeSpotPriceQuality failed"); } else diff --git a/src/xrpld/app/misc/FeeVoteImpl.cpp b/src/xrpld/app/misc/FeeVoteImpl.cpp index f9d5fbc58c..85b5791d67 100644 --- a/src/xrpld/app/misc/FeeVoteImpl.cpp +++ b/src/xrpld/app/misc/FeeVoteImpl.cpp @@ -130,7 +130,7 @@ FeeVoteImpl::doValidation( auto vote = [&v, this]( auto const current, XRPAmount target, - const char* name, + char const* name, auto const& sfield) { if (current != target) { @@ -164,7 +164,7 @@ FeeVoteImpl::doValidation( auto const current, XRPAmount target, auto const& convertCallback, - const char* name, + char const* name, auto const& sfield) { if (current != target) { diff --git a/src/xrpld/app/misc/HashRouter.cpp b/src/xrpld/app/misc/HashRouter.cpp index ac522487f5..dc87b2bce1 100644 --- a/src/xrpld/app/misc/HashRouter.cpp +++ b/src/xrpld/app/misc/HashRouter.cpp @@ -55,7 +55,7 @@ HashRouter::addSuppressionPeer(uint256 const& key, PeerShortID peer) } std::pair> -HashRouter::addSuppressionPeerWithStatus(const uint256& key, PeerShortID peer) +HashRouter::addSuppressionPeerWithStatus(uint256 const& key, PeerShortID peer) { std::lock_guard lock(mutex_); diff --git a/src/xrpld/app/misc/NetworkOPs.cpp b/src/xrpld/app/misc/NetworkOPs.cpp index b05f38f3ed..6f29f79384 100644 --- a/src/xrpld/app/misc/NetworkOPs.cpp +++ b/src/xrpld/app/misc/NetworkOPs.cpp @@ -373,7 +373,7 @@ public: std::shared_ptr& lpLedger, Book const&, AccountID const& uTakerID, - const bool bProof, + bool const bProof, unsigned int iLimit, Json::Value const& jvMarker, Json::Value& jvResult) override; @@ -397,7 +397,7 @@ private: void switchLastClosedLedger(std::shared_ptr const& newLCL); bool - checkLastClosedLedger(const Overlay::PeerSequence&, uint256& networkClosed); + checkLastClosedLedger(Overlay::PeerSequence const&, uint256& networkClosed); public: bool @@ -958,7 +958,7 @@ NetworkOPsImp::setStateTimer() void NetworkOPsImp::setTimer( boost::asio::steady_timer& timer, - const std::chrono::milliseconds& expiry_time, + std::chrono::milliseconds const& expiry_time, std::function onExpire, std::function onError) { @@ -1101,7 +1101,7 @@ NetworkOPsImp::processHeartbeatTimer() mConsensus.timerEntry(app_.timeKeeper().closeTime(), clog.ss()); CLOG(clog.ss()) << "consensus phase " << to_string(mLastConsensusPhase); - const ConsensusPhase currPhase = mConsensus.phase(); + ConsensusPhase const currPhase = mConsensus.phase(); if (mLastConsensusPhase != currPhase) { reportConsensusStateChange(currPhase); @@ -1844,7 +1844,7 @@ NetworkOPsImp::clearUNLBlocked() bool NetworkOPsImp::checkLastClosedLedger( - const Overlay::PeerSequence& peerList, + Overlay::PeerSequence const& peerList, uint256& networkClosed) { // Returns true if there's an *abnormal* ledger issue, normal changing in @@ -2066,7 +2066,7 @@ NetworkOPsImp::beginConsensus( changes.added, clog); - const ConsensusPhase currPhase = mConsensus.phase(); + ConsensusPhase const currPhase = mConsensus.phase(); if (mLastConsensusPhase != currPhase) { reportConsensusStateChange(currPhase); @@ -3315,7 +3315,7 @@ NetworkOPsImp::transJson( void NetworkOPsImp::pubValidatedTransaction( std::shared_ptr const& ledger, - const AcceptedLedgerTx& transaction, + AcceptedLedgerTx const& transaction, bool last) { auto const& stTxn = transaction.getTxn(); @@ -3460,8 +3460,8 @@ NetworkOPsImp::pubAccountTransaction( } JLOG(m_journal.trace()) - << "pubAccountTransaction: " << "proposed=" << iProposed - << ", accepted=" << iAccepted; + << "pubAccountTransaction: " + << "proposed=" << iProposed << ", accepted=" << iAccepted; if (!notify.empty() || !accountHistoryNotify.empty()) { @@ -3666,7 +3666,7 @@ void NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo) { enum DatabaseType { Sqlite, None }; - static const auto databaseType = [&]() -> DatabaseType { + static auto const databaseType = [&]() -> DatabaseType { // Use a dynamic_cast to return DatabaseType::None // on failure. if (dynamic_cast(&app_.getRelationalDatabase())) @@ -3722,7 +3722,7 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo) if (node.isFieldPresent(sfNewFields)) { - if (auto inner = dynamic_cast( + if (auto inner = dynamic_cast( node.peekAtPField(sfNewFields)); inner) { @@ -4058,7 +4058,7 @@ NetworkOPsImp::unsubAccountHistory( void NetworkOPsImp::unsubAccountHistoryInternal( std::uint64_t seq, - const AccountID& account, + AccountID const& account, bool historyOnly) { std::lock_guard sl(mSubLock); @@ -4395,8 +4395,8 @@ NetworkOPsImp::getBookPage( (jvResult[jss::offers] = Json::Value(Json::arrayValue)); std::unordered_map umBalance; - const uint256 uBookBase = getBookBase(book); - const uint256 uBookEnd = getQualityNext(uBookBase); + uint256 const uBookBase = getBookBase(book); + uint256 const uBookEnd = getQualityNext(uBookBase); uint256 uTipIndex = uBookBase; if (auto stream = m_journal.trace()) @@ -4607,7 +4607,7 @@ NetworkOPsImp::getBookPage( auto const rate = transferRate(lesActive, book.out.account); - const bool bGlobalFreeze = lesActive.isGlobalFrozen(book.out.account) || + bool const bGlobalFreeze = lesActive.isGlobalFrozen(book.out.account) || lesActive.isGlobalFrozen(book.in.account); while (iLimit-- > 0 && obIterator.nextOffer()) diff --git a/src/xrpld/app/misc/Transaction.h b/src/xrpld/app/misc/Transaction.h index 817e68817c..005ff16993 100644 --- a/src/xrpld/app/misc/Transaction.h +++ b/src/xrpld/app/misc/Transaction.h @@ -64,7 +64,7 @@ class Transaction : public std::enable_shared_from_this, { public: using pointer = std::shared_ptr; - using ref = const pointer&; + using ref = pointer const&; Transaction( std::shared_ptr const&, diff --git a/src/xrpld/app/misc/TxQ.h b/src/xrpld/app/misc/TxQ.h index 6fc61055f1..f6ac2c6861 100644 --- a/src/xrpld/app/misc/TxQ.h +++ b/src/xrpld/app/misc/TxQ.h @@ -650,7 +650,7 @@ private: * */ bool - operator()(const MaybeTx& lhs, const MaybeTx& rhs) const + operator()(MaybeTx const& lhs, MaybeTx const& rhs) const { if (lhs.feeLevel == rhs.feeLevel) return (lhs.txID ^ MaybeTx::parentHashComp) < @@ -690,7 +690,7 @@ private: /// Construct from a transaction explicit TxQAccount(std::shared_ptr const& txn); /// Construct from an account - explicit TxQAccount(const AccountID& account); + explicit TxQAccount(AccountID const& account); /// Return the number of transactions currently queued for this account std::size_t diff --git a/src/xrpld/app/misc/ValidatorList.h b/src/xrpld/app/misc/ValidatorList.h index 4e18aa5db3..4cb32282db 100644 --- a/src/xrpld/app/misc/ValidatorList.h +++ b/src/xrpld/app/misc/ValidatorList.h @@ -271,7 +271,7 @@ class ValidatorList // collection with more than 5 entries will be considered malformed. static constexpr std::size_t maxSupportedBlobs = 5; // Prefix of the file name used to store cache files. - static const std::string filePrefix_; + static std::string const filePrefix_; public: ValidatorList( diff --git a/src/xrpld/app/misc/ValidatorSite.h b/src/xrpld/app/misc/ValidatorSite.h index 88e30e28ab..58f9eaaeff 100644 --- a/src/xrpld/app/misc/ValidatorSite.h +++ b/src/xrpld/app/misc/ValidatorSite.h @@ -86,7 +86,7 @@ private: struct Resource { explicit Resource(std::string uri_); - const std::string uri; + std::string const uri; parsedURL pUrl; }; @@ -136,7 +136,7 @@ private: std::vector sites_; // time to allow for requests to complete - const std::chrono::seconds requestTimeout_; + std::chrono::seconds const requestTimeout_; public: ValidatorSite( diff --git a/src/xrpld/app/misc/detail/AMMUtils.cpp b/src/xrpld/app/misc/detail/AMMUtils.cpp index 5078049a4a..ba4c741300 100644 --- a/src/xrpld/app/misc/detail/AMMUtils.cpp +++ b/src/xrpld/app/misc/detail/AMMUtils.cpp @@ -73,7 +73,7 @@ ammHolds( auto const singleIssue = [&issue1, &issue2, &j]( Issue checkIssue, - const char* label) -> std::optional> { + char const* label) -> std::optional> { if (checkIssue == issue1) return std::make_optional(std::make_pair(issue1, issue2)); else if (checkIssue == issue2) @@ -150,8 +150,8 @@ ammLPHolds( } amount.setIssuer(ammAccount); - JLOG(j.trace()) << "ammLPHolds:" << " lpAccount=" - << to_string(lpAccount) + JLOG(j.trace()) << "ammLPHolds:" + << " lpAccount=" << to_string(lpAccount) << " amount=" << amount.getFullText(); } diff --git a/src/xrpld/app/misc/detail/AmendmentTable.cpp b/src/xrpld/app/misc/detail/AmendmentTable.cpp index ae41a2aa7e..b13e40c3ae 100644 --- a/src/xrpld/app/misc/detail/AmendmentTable.cpp +++ b/src/xrpld/app/misc/detail/AmendmentTable.cpp @@ -998,8 +998,8 @@ AmendmentTableImpl::trustChanged(hash_set const& allTrusted) void AmendmentTableImpl::injectJson( Json::Value& v, - const uint256& id, - const AmendmentState& fs, + uint256 const& id, + AmendmentState const& fs, bool isAdmin, std::lock_guard const&) const { diff --git a/src/xrpld/app/misc/detail/TxQ.cpp b/src/xrpld/app/misc/detail/TxQ.cpp index 11d81fb8ae..adf96d0e14 100644 --- a/src/xrpld/app/misc/detail/TxQ.cpp +++ b/src/xrpld/app/misc/detail/TxQ.cpp @@ -323,7 +323,7 @@ TxQ::TxQAccount::TxQAccount(std::shared_ptr const& txn) { } -TxQ::TxQAccount::TxQAccount(const AccountID& account_) : account(account_) +TxQ::TxQAccount::TxQAccount(AccountID const& account_) : account(account_) { } @@ -1504,11 +1504,11 @@ TxQ::accept(Application& app, OpenView& view) } else { - JLOG(j_.debug()) - << "Queued transaction " << candidateIter->txID - << " failed with " << transToken(txnResult) - << ". Leave in queue." << " Applied: " << didApply - << ". Flags: " << candidateIter->flags; + JLOG(j_.debug()) << "Queued transaction " << candidateIter->txID + << " failed with " << transToken(txnResult) + << ". Leave in queue." + << " Applied: " << didApply + << ". Flags: " << candidateIter->flags; if (account.retryPenalty && candidateIter->retriesRemaining > 2) candidateIter->retriesRemaining = 1; else diff --git a/src/xrpld/app/misc/detail/ValidatorList.cpp b/src/xrpld/app/misc/detail/ValidatorList.cpp index 282c3c9e19..1ddb51c9dd 100644 --- a/src/xrpld/app/misc/detail/ValidatorList.cpp +++ b/src/xrpld/app/misc/detail/ValidatorList.cpp @@ -115,7 +115,7 @@ ValidatorList::MessageWithHash::MessageWithHash( { } -const std::string ValidatorList::filePrefix_ = "cache."; +std::string const ValidatorList::filePrefix_ = "cache."; ValidatorList::ValidatorList( ManifestCache& validatorManifests, diff --git a/src/xrpld/app/paths/AMMLiquidity.h b/src/xrpld/app/paths/AMMLiquidity.h index b41e26ac84..ee745b4a8a 100644 --- a/src/xrpld/app/paths/AMMLiquidity.h +++ b/src/xrpld/app/paths/AMMLiquidity.h @@ -52,7 +52,7 @@ template class AMMLiquidity { private: - inline static const Number InitialFibSeqPct = Number(5) / 20000; + inline static Number const InitialFibSeqPct = Number(5) / 20000; AMMContext& ammContext_; AccountID const ammAccountID_; std::uint32_t const tradingFee_; diff --git a/src/xrpld/app/paths/Flow.cpp b/src/xrpld/app/paths/Flow.cpp index 66793fc74c..08f8ec3f25 100644 --- a/src/xrpld/app/paths/Flow.cpp +++ b/src/xrpld/app/paths/Flow.cpp @@ -124,8 +124,8 @@ flow( } } - const bool srcIsXRP = isXRP(srcIssue.currency); - const bool dstIsXRP = isXRP(dstIssue.currency); + bool const srcIsXRP = isXRP(srcIssue.currency); + bool const dstIsXRP = isXRP(dstIssue.currency); auto const asDeliver = toAmountSpec(deliver); diff --git a/src/xrpld/app/paths/PathRequest.cpp b/src/xrpld/app/paths/PathRequest.cpp index dc2868eaf2..ed090d25aa 100644 --- a/src/xrpld/app/paths/PathRequest.cpp +++ b/src/xrpld/app/paths/PathRequest.cpp @@ -41,7 +41,7 @@ namespace ripple { PathRequest::PathRequest( Application& app, - const std::shared_ptr& subscriber, + std::shared_ptr const& subscriber, int id, PathRequests& owner, beast::Journal journal) diff --git a/src/xrpld/app/paths/PathRequest.h b/src/xrpld/app/paths/PathRequest.h index 3fdbecf7ed..e480c2b812 100644 --- a/src/xrpld/app/paths/PathRequest.h +++ b/src/xrpld/app/paths/PathRequest.h @@ -52,8 +52,8 @@ class PathRequest final : public InfoSubRequest, public: using wptr = std::weak_ptr; using pointer = std::shared_ptr; - using ref = const pointer&; - using wref = const wptr&; + using ref = pointer const&; + using wref = wptr const&; public: // path_find semantics diff --git a/src/xrpld/app/paths/Pathfinder.cpp b/src/xrpld/app/paths/Pathfinder.cpp index 379bb07e4b..e02c3ed089 100644 --- a/src/xrpld/app/paths/Pathfinder.cpp +++ b/src/xrpld/app/paths/Pathfinder.cpp @@ -77,7 +77,7 @@ struct AccountCandidate int priority; AccountID account; - static const int highPriority = 10000; + static int const highPriority = 10000; }; bool @@ -236,7 +236,8 @@ Pathfinder::findPaths( mSource = STPathElement(account, mSrcCurrency, issuer); auto issuerString = mSrcIssuer ? to_string(*mSrcIssuer) : std::string("none"); - JLOG(j_.trace()) << "findPaths>" << " mSrcAccount=" << mSrcAccount + JLOG(j_.trace()) << "findPaths>" + << " mSrcAccount=" << mSrcAccount << " mDstAccount=" << mDstAccount << " mDstAmount=" << mDstAmount.getFullText() << " mSrcCurrency=" << mSrcCurrency @@ -582,7 +583,7 @@ Pathfinder::getBestPaths( XRPL_ASSERT( fullLiquidityPath.empty(), "ripple::Pathfinder::getBestPaths : first empty path result"); - const bool issuerIsSender = + bool const issuerIsSender = isXRP(mSrcCurrency) || (srcIssuer == mSrcAccount); std::vector extraPathRanks; @@ -943,7 +944,7 @@ addUniquePath(STPathSet& pathSet, STPath const& path) void Pathfinder::addLink( - const STPath& currentPath, // The path to build from + STPath const& currentPath, // The path to build from STPathSet& incompletePaths, // The set of partial paths we add to int addFlags, std::function const& continueCallback) diff --git a/src/xrpld/app/paths/detail/BookStep.cpp b/src/xrpld/app/paths/detail/BookStep.cpp index 5e650230fe..4024ca190d 100644 --- a/src/xrpld/app/paths/detail/BookStep.cpp +++ b/src/xrpld/app/paths/detail/BookStep.cpp @@ -190,7 +190,8 @@ protected: logStringImpl(char const* name) const { std::ostringstream ostr; - ostr << name << ": " << "\ninIss: " << book_.in.account + ostr << name << ": " + << "\ninIss: " << book_.in.account << "\noutIss: " << book_.out.account << "\ninCur: " << book_.in.currency << "\noutCur: " << book_.out.currency; diff --git a/src/xrpld/app/paths/detail/DirectStep.cpp b/src/xrpld/app/paths/detail/DirectStep.cpp index 4e5ccea3f1..4dc9cbf20d 100644 --- a/src/xrpld/app/paths/detail/DirectStep.cpp +++ b/src/xrpld/app/paths/detail/DirectStep.cpp @@ -205,7 +205,8 @@ protected: logStringImpl(char const* name) const { std::ostringstream ostr; - ostr << name << ": " << "\nSrc: " << src_ << "\nDst: " << dst_; + ostr << name << ": " + << "\nSrc: " << src_ << "\nDst: " << dst_; return ostr.str(); } diff --git a/src/xrpld/app/paths/detail/XRPEndpointStep.cpp b/src/xrpld/app/paths/detail/XRPEndpointStep.cpp index 4f38a7b422..7fdfb3749d 100644 --- a/src/xrpld/app/paths/detail/XRPEndpointStep.cpp +++ b/src/xrpld/app/paths/detail/XRPEndpointStep.cpp @@ -132,7 +132,8 @@ protected: logStringImpl(char const* name) const { std::ostringstream ostr; - ostr << name << ": " << "\nAcc: " << acc_; + ostr << name << ": " + << "\nAcc: " << acc_; return ostr.str(); } diff --git a/src/xrpld/app/rdb/RelationalDatabase.h b/src/xrpld/app/rdb/RelationalDatabase.h index 927b08d385..25b16f04a1 100644 --- a/src/xrpld/app/rdb/RelationalDatabase.h +++ b/src/xrpld/app/rdb/RelationalDatabase.h @@ -238,8 +238,8 @@ rangeCheckedCast(C c) /* This should never happen */ UNREACHABLE("ripple::rangeCheckedCast : domain error"); JLOG(debugLog().error()) - << "rangeCheckedCast domain error:" << " value = " << c - << " min = " << std::numeric_limits::lowest() + << "rangeCheckedCast domain error:" + << " value = " << c << " min = " << std::numeric_limits::lowest() << " max: " << std::numeric_limits::max(); } diff --git a/src/xrpld/app/rdb/backend/detail/Node.cpp b/src/xrpld/app/rdb/backend/detail/Node.cpp index 019d00ed36..6a0544091b 100644 --- a/src/xrpld/app/rdb/backend/detail/Node.cpp +++ b/src/xrpld/app/rdb/backend/detail/Node.cpp @@ -1059,7 +1059,7 @@ accountTxPage( // SQL's BETWEEN uses a closed interval ([a,b]) - const char* const order = forward ? "ASC" : "DESC"; + char const* const order = forward ? "ASC" : "DESC"; if (findLedger == 0) { @@ -1074,10 +1074,10 @@ accountTxPage( } else { - const char* const compare = forward ? ">=" : "<="; - const std::uint32_t minLedger = + char const* const compare = forward ? ">=" : "<="; + std::uint32_t const minLedger = forward ? findLedger + 1 : options.minLedger; - const std::uint32_t maxLedger = + std::uint32_t const maxLedger = forward ? options.maxLedger : findLedger - 1; auto b58acct = toBase58(options.account); diff --git a/src/xrpld/app/rdb/detail/RelationalDatabase.cpp b/src/xrpld/app/rdb/detail/RelationalDatabase.cpp index 4a95134d70..72edbec79d 100644 --- a/src/xrpld/app/rdb/detail/RelationalDatabase.cpp +++ b/src/xrpld/app/rdb/detail/RelationalDatabase.cpp @@ -34,7 +34,7 @@ RelationalDatabase::init( { bool use_sqlite = false; - const Section& rdb_section{config.section(SECTION_RELATIONAL_DB)}; + Section const& rdb_section{config.section(SECTION_RELATIONAL_DB)}; if (!rdb_section.empty()) { if (boost::iequals(get(rdb_section, "backend"), "sqlite")) diff --git a/src/xrpld/app/tx/detail/CancelOffer.cpp b/src/xrpld/app/tx/detail/CancelOffer.cpp index 6d8c077a62..004ae1e8b9 100644 --- a/src/xrpld/app/tx/detail/CancelOffer.cpp +++ b/src/xrpld/app/tx/detail/CancelOffer.cpp @@ -35,8 +35,8 @@ CancelOffer::preflight(PreflightContext const& ctx) if (uTxFlags & tfUniversalMask) { - JLOG(ctx.j.trace()) - << "Malformed transaction: " << "Invalid flags set."; + JLOG(ctx.j.trace()) << "Malformed transaction: " + << "Invalid flags set."; return temINVALID_FLAG; } @@ -63,8 +63,8 @@ CancelOffer::preclaim(PreclaimContext const& ctx) if ((*sle)[sfSequence] <= offerSequence) { - JLOG(ctx.j.trace()) << "Malformed transaction: " << "Sequence " - << offerSequence << " is invalid."; + JLOG(ctx.j.trace()) << "Malformed transaction: " + << "Sequence " << offerSequence << " is invalid."; return temBAD_SEQUENCE; } diff --git a/src/xrpld/app/tx/detail/Change.cpp b/src/xrpld/app/tx/detail/Change.cpp index 760d5b3d98..1392d84c08 100644 --- a/src/xrpld/app/tx/detail/Change.cpp +++ b/src/xrpld/app/tx/detail/Change.cpp @@ -268,8 +268,8 @@ Change::applyAmendment() auto flags = ctx_.tx.getFlags(); - const bool gotMajority = (flags & tfGotMajority) != 0; - const bool lostMajority = (flags & tfLostMajority) != 0; + bool const gotMajority = (flags & tfGotMajority) != 0; + bool const lostMajority = (flags & tfLostMajority) != 0; if (gotMajority && lostMajority) return temINVALID_FLAG; @@ -279,7 +279,7 @@ Change::applyAmendment() bool found = false; if (amendmentObject->isFieldPresent(sfMajorities)) { - const STArray& oldMajorities = + STArray const& oldMajorities = amendmentObject->getFieldArray(sfMajorities); for (auto const& majority : oldMajorities) { diff --git a/src/xrpld/app/tx/detail/Transactor.cpp b/src/xrpld/app/tx/detail/Transactor.cpp index 390f32e02b..baba7d131e 100644 --- a/src/xrpld/app/tx/detail/Transactor.cpp +++ b/src/xrpld/app/tx/detail/Transactor.cpp @@ -283,9 +283,9 @@ Transactor::checkFee(PreclaimContext const& ctx, XRPAmount baseFee) if (balance < feePaid) { - JLOG(ctx.j.trace()) - << "Insufficient balance:" << " balance=" << to_string(balance) - << " paid=" << to_string(feePaid); + JLOG(ctx.j.trace()) << "Insufficient balance:" + << " balance=" << to_string(balance) + << " paid=" << to_string(feePaid); if ((balance > beast::zero) && !ctx.view.open()) { diff --git a/src/xrpld/conditions/detail/error.cpp b/src/xrpld/conditions/detail/error.cpp index 10a3a43921..1ba1a48186 100644 --- a/src/xrpld/conditions/detail/error.cpp +++ b/src/xrpld/conditions/detail/error.cpp @@ -32,7 +32,7 @@ class cryptoconditions_error_category : public std::error_category public: explicit cryptoconditions_error_category() = default; - const char* + char const* name() const noexcept override { return "cryptoconditions"; diff --git a/src/xrpld/conditions/detail/utils.h b/src/xrpld/conditions/detail/utils.h index 28943bd640..93e444a17a 100644 --- a/src/xrpld/conditions/detail/utils.h +++ b/src/xrpld/conditions/detail/utils.h @@ -188,9 +188,9 @@ parseInteger(Slice& s, std::size_t count, std::error_code& ec) return v; } - const bool isSigned = std::numeric_limits::is_signed; + bool const isSigned = std::numeric_limits::is_signed; // unsigned types may have a leading zero octet - const size_t maxLength = isSigned ? sizeof(Integer) : sizeof(Integer) + 1; + size_t const maxLength = isSigned ? sizeof(Integer) : sizeof(Integer) + 1; if (count > maxLength) { ec = error::large_size; diff --git a/src/xrpld/consensus/Consensus.h b/src/xrpld/consensus/Consensus.h index 948c00a8b2..f3265cf381 100644 --- a/src/xrpld/consensus/Consensus.h +++ b/src/xrpld/consensus/Consensus.h @@ -603,7 +603,7 @@ private: Ledger_t previousLedger_; // Transaction Sets, indexed by hash of transaction tree - hash_map acquired_; + hash_map acquired_; std::optional result_; ConsensusCloseTimes rawCloseTimes_; @@ -872,7 +872,7 @@ Consensus::timerEntry( CLOG(clog) << "Set network adjusted time to " << to_string(now) << ". "; // Check we are on the proper ledger (this may change phase_) - const auto phaseOrig = phase_; + auto const phaseOrig = phase_; CLOG(clog) << "Phase " << to_string(phaseOrig) << ". "; checkLedger(clog); if (phaseOrig != phase_) @@ -1121,8 +1121,8 @@ Consensus::checkLedger(std::unique_ptr const& clog) auto netLgr = adaptor_.getPrevLedger(prevLedgerID_, previousLedger_, mode_.get()); - CLOG(clog) << "network ledgerid " << netLgr << ", " << "previous ledger " - << prevLedgerID_ << ". "; + CLOG(clog) << "network ledgerid " << netLgr << ", " + << "previous ledger " << prevLedgerID_ << ". "; if (netLgr != prevLedgerID_) { @@ -1213,7 +1213,8 @@ Consensus::phaseOpen(std::unique_ptr const& clog) adaptor_.parms().ledgerIDLE_INTERVAL, 2 * previousLedger_.closeTimeResolution()); CLOG(clog) << "idle interval set to " << idleInterval.count() - << "ms based on " << "ledgerIDLE_INTERVAL: " + << "ms based on " + << "ledgerIDLE_INTERVAL: " << adaptor_.parms().ledgerIDLE_INTERVAL.count() << ", previous ledger close time resolution: " << previousLedger_.closeTimeResolution().count() << "ms. "; @@ -1261,7 +1262,8 @@ Consensus::shouldPause( << "roundTime: " << result_->roundTime.read().count() << ", " << "max consensus time: " << parms.ledgerMAX_CONSENSUS.count() << ", " << "validators: " << totalValidators << ", " - << "laggards: " << laggards << ", " << "offline: " << offline << ", " + << "laggards: " << laggards << ", " + << "offline: " << offline << ", " << "quorum: " << quorum << ")"; if (!ahead || !laggards || !totalValidators || !adaptor_.validator() || @@ -1447,7 +1449,7 @@ Consensus::closeLedger(std::unique_ptr const& clog) if (acquired_.emplace(result_->txns.id(), result_->txns).second) adaptor_.share(result_->txns); - const auto mode = mode_.get(); + auto const mode = mode_.get(); CLOG(clog) << "closeLedger transitioned to ConsensusPhase::establish, mode: " << to_string(mode) @@ -1622,8 +1624,8 @@ Consensus::updateOurPositions( if (!haveCloseTimeConsensus_) { JLOG(j_.debug()) - << "No CT consensus:" << " Proposers:" - << currPeerPositions_.size() + << "No CT consensus:" + << " Proposers:" << currPeerPositions_.size() << " Mode:" << to_string(mode_.get()) << " Thresh:" << threshConsensus << " Pos:" << consensusCloseTime.time_since_epoch().count(); diff --git a/src/xrpld/consensus/DisputedTx.h b/src/xrpld/consensus/DisputedTx.h index 513f240829..4ed31b77ca 100644 --- a/src/xrpld/consensus/DisputedTx.h +++ b/src/xrpld/consensus/DisputedTx.h @@ -149,8 +149,7 @@ public: @return bool Whether the peer changed its vote. (A new vote counts as a change.) */ - [[nodiscard]] - bool + [[nodiscard]] bool setVote(NodeID_t const& peer, bool votesYes); /** Remove a peer's vote diff --git a/src/xrpld/core/Job.h b/src/xrpld/core/Job.h index 66048f4f21..11b0b9b72b 100644 --- a/src/xrpld/core/Job.h +++ b/src/xrpld/core/Job.h @@ -132,13 +132,13 @@ public: // These comparison operators make the jobs sort in priority order // in the job set bool - operator<(const Job& j) const; + operator<(Job const& j) const; bool - operator>(const Job& j) const; + operator>(Job const& j) const; bool - operator<=(const Job& j) const; + operator<=(Job const& j) const; bool - operator>=(const Job& j) const; + operator>=(Job const& j) const; private: JobType mType; diff --git a/src/xrpld/core/detail/Config.cpp b/src/xrpld/core/detail/Config.cpp index 60a359b580..b132987d08 100644 --- a/src/xrpld/core/detail/Config.cpp +++ b/src/xrpld/core/detail/Config.cpp @@ -158,7 +158,7 @@ static_assert( #define SECTION_DEFAULT_NAME "" IniFileSections -parseIniFile(std::string const& strInput, const bool bTrim) +parseIniFile(std::string const& strInput, bool const bTrim) { std::string strData(strInput); std::vector vLines; @@ -490,7 +490,7 @@ Config::loadFromString(std::string const& fileContents) // if the user has specified ip:port then replace : with a space. { auto replaceColons = [](std::vector& strVec) { - const static std::regex e(":([0-9]+)$"); + static std::regex const e(":([0-9]+)$"); for (auto& line : strVec) { // skip anything that might be an ipv6 address diff --git a/src/xrpld/core/detail/Job.cpp b/src/xrpld/core/detail/Job.cpp index d62f49ff49..8e9a74b535 100644 --- a/src/xrpld/core/detail/Job.cpp +++ b/src/xrpld/core/detail/Job.cpp @@ -73,7 +73,7 @@ Job::doJob() } bool -Job::operator>(const Job& j) const +Job::operator>(Job const& j) const { if (mType < j.mType) return true; @@ -85,7 +85,7 @@ Job::operator>(const Job& j) const } bool -Job::operator>=(const Job& j) const +Job::operator>=(Job const& j) const { if (mType < j.mType) return true; @@ -97,7 +97,7 @@ Job::operator>=(const Job& j) const } bool -Job::operator<(const Job& j) const +Job::operator<(Job const& j) const { if (mType < j.mType) return false; @@ -109,7 +109,7 @@ Job::operator<(const Job& j) const } bool -Job::operator<=(const Job& j) const +Job::operator<=(Job const& j) const { if (mType < j.mType) return false; diff --git a/src/xrpld/core/detail/SociDB.cpp b/src/xrpld/core/detail/SociDB.cpp index 96b1f97977..5b298dac43 100644 --- a/src/xrpld/core/detail/SociDB.cpp +++ b/src/xrpld/core/detail/SociDB.cpp @@ -311,7 +311,7 @@ protected: sqliteWALHook( void* cpId, sqlite_api::sqlite3* conn, - const char* dbName, + char const* dbName, int walSize) { if (walSize >= checkpointPageCount) diff --git a/src/xrpld/ledger/ReadView.h b/src/xrpld/ledger/ReadView.h index 53e12083f3..4c1986be4e 100644 --- a/src/xrpld/ledger/ReadView.h +++ b/src/xrpld/ledger/ReadView.h @@ -258,7 +258,7 @@ public: using digest_type = uint256; DigestAwareReadView() = default; - DigestAwareReadView(const DigestAwareReadView&) = default; + DigestAwareReadView(DigestAwareReadView const&) = default; /** Return the digest associated with the key. diff --git a/src/xrpld/ledger/View.h b/src/xrpld/ledger/View.h index bb04fa8b87..4c70f5dc48 100644 --- a/src/xrpld/ledger/View.h +++ b/src/xrpld/ledger/View.h @@ -339,7 +339,7 @@ areCompatible( ReadView const& validLedger, ReadView const& testLedger, beast::Journal::Stream& s, - const char* reason); + char const* reason); [[nodiscard]] bool areCompatible( @@ -347,7 +347,7 @@ areCompatible( LedgerIndex validIndex, ReadView const& testLedger, beast::Journal::Stream& s, - const char* reason); + char const* reason); //------------------------------------------------------------------------------ // @@ -440,14 +440,14 @@ describeOwnerDir(AccountID const& account); [[nodiscard]] TER trustCreate( ApplyView& view, - const bool bSrcHigh, + bool const bSrcHigh, AccountID const& uSrcAccountID, AccountID const& uDstAccountID, uint256 const& uIndex, // --> ripple state entry SLE::ref sleAccount, // --> the account being set. - const bool bAuth, // --> authorize account. - const bool bNoRipple, // --> others cannot ripple through - const bool bFreeze, // --> funds cannot leave + bool const bAuth, // --> authorize account. + bool const bNoRipple, // --> others cannot ripple through + bool const bFreeze, // --> funds cannot leave bool bDeepFreeze, // --> can neither receive nor send funds STAmount const& saBalance, // --> balance of account being set. // Issuer should be noAccount() diff --git a/src/xrpld/ledger/detail/RawStateTable.h b/src/xrpld/ledger/detail/RawStateTable.h index 2db37d9833..37597aa678 100644 --- a/src/xrpld/ledger/detail/RawStateTable.h +++ b/src/xrpld/ledger/detail/RawStateTable.h @@ -126,7 +126,7 @@ private: sleAction, std::less, boost::container::pmr::polymorphic_allocator< - std::pair>>; + std::pair>>; // monotonic_resource_ must outlive `items_`. Make a pointer so it may be // easily moved. std::unique_ptr diff --git a/src/xrpld/ledger/detail/View.cpp b/src/xrpld/ledger/detail/View.cpp index 2a5224ebf1..af81a6b7bb 100644 --- a/src/xrpld/ledger/detail/View.cpp +++ b/src/xrpld/ledger/detail/View.cpp @@ -381,7 +381,8 @@ accountHolds( amount.clear(Issue{currency, issuer}); } - JLOG(j.trace()) << "accountHolds:" << " account=" << to_string(account) + JLOG(j.trace()) << "accountHolds:" + << " account=" << to_string(account) << " amount=" << amount.getFullText(); return view.balanceHook(account, issuer, amount); @@ -530,7 +531,8 @@ xrpLiquid( STAmount const amount = (balance < reserve) ? STAmount{0} : balance - reserve; - JLOG(j.trace()) << "accountHolds:" << " account=" << to_string(id) + JLOG(j.trace()) << "accountHolds:" + << " account=" << to_string(id) << " amount=" << amount.getFullText() << " fullBalance=" << fullBalance.getFullText() << " balance=" << balance.getFullText() @@ -675,7 +677,7 @@ areCompatible( ReadView const& validLedger, ReadView const& testLedger, beast::Journal::Stream& s, - const char* reason) + char const* reason) { bool ret = true; @@ -739,7 +741,7 @@ areCompatible( LedgerIndex validIndex, ReadView const& testLedger, beast::Journal::Stream& s, - const char* reason) + char const* reason) { bool ret = true; @@ -932,14 +934,14 @@ describeOwnerDir(AccountID const& account) TER trustCreate( ApplyView& view, - const bool bSrcHigh, + bool const bSrcHigh, AccountID const& uSrcAccountID, AccountID const& uDstAccountID, uint256 const& uIndex, // --> ripple state entry SLE::ref sleAccount, // --> the account being set. - const bool bAuth, // --> authorize account. - const bool bNoRipple, // --> others cannot ripple through - const bool bFreeze, // --> funds cannot leave + bool const bAuth, // --> authorize account. + bool const bNoRipple, // --> others cannot ripple through + bool const bFreeze, // --> funds cannot leave bool bDeepFreeze, // --> can neither receive nor send funds STAmount const& saBalance, // --> balance of account being set. // Issuer should be noAccount() @@ -975,8 +977,8 @@ trustCreate( if (!highNode) return tecDIR_FULL; - const bool bSetDst = saLimit.getIssuer() == uDstAccountID; - const bool bSetHigh = bSrcHigh ^ bSetDst; + bool const bSetDst = saLimit.getIssuer() == uDstAccountID; + bool const bSetHigh = bSrcHigh ^ bSetDst; XRPL_ASSERT(sleAccount, "ripple::trustCreate : non-null SLE"); if (!sleAccount) diff --git a/src/xrpld/net/AutoSocket.h b/src/xrpld/net/AutoSocket.h index 7486d6128d..d06787340b 100644 --- a/src/xrpld/net/AutoSocket.h +++ b/src/xrpld/net/AutoSocket.h @@ -168,7 +168,7 @@ public: template void - async_read_some(const Seq& buffers, Handler handler) + async_read_some(Seq const& buffers, Handler handler) { if (isSecure()) mSocket->async_read_some(buffers, handler); @@ -178,7 +178,7 @@ public: template void - async_read_until(const Seq& buffers, Condition condition, Handler handler) + async_read_until(Seq const& buffers, Condition condition, Handler handler) { if (isSecure()) boost::asio::async_read_until( @@ -218,7 +218,7 @@ public: template void - async_write(const Buf& buffers, Handler handler) + async_write(Buf const& buffers, Handler handler) { if (isSecure()) boost::asio::async_write(*mSocket, buffers, handler); @@ -240,7 +240,7 @@ public: template void - async_read(const Buf& buffers, Condition cond, Handler handler) + async_read(Buf const& buffers, Condition cond, Handler handler) { if (isSecure()) boost::asio::async_read(*mSocket, buffers, cond, handler); @@ -263,7 +263,7 @@ public: template void - async_read(const Buf& buffers, Handler handler) + async_read(Buf const& buffers, Handler handler) { if (isSecure()) boost::asio::async_read(*mSocket, buffers, handler); @@ -273,7 +273,7 @@ public: template void - async_write_some(const Seq& buffers, Handler handler) + async_write_some(Seq const& buffers, Handler handler) { if (isSecure()) mSocket->async_write_some(buffers, handler); @@ -285,7 +285,7 @@ protected: void handle_autodetect( callback cbFunc, - const error_code& ec, + error_code const& ec, size_t bytesTransferred) { using namespace ripple; diff --git a/src/xrpld/net/HTTPClient.h b/src/xrpld/net/HTTPClient.h index 2a33500aa4..a11b885290 100644 --- a/src/xrpld/net/HTTPClient.h +++ b/src/xrpld/net/HTTPClient.h @@ -50,12 +50,12 @@ public: get(bool bSSL, boost::asio::io_service& io_service, std::deque deqSites, - const unsigned short port, + unsigned short const port, std::string const& strPath, std::size_t responseMax, // if no Content-Length header std::chrono::seconds timeout, std::function complete, beast::Journal& j); @@ -64,12 +64,12 @@ public: get(bool bSSL, boost::asio::io_service& io_service, std::string strSite, - const unsigned short port, + unsigned short const port, std::string const& strPath, std::size_t responseMax, // if no Content-Length header std::chrono::seconds timeout, std::function complete, beast::Journal& j); @@ -79,13 +79,13 @@ public: bool bSSL, boost::asio::io_service& io_service, std::string strSite, - const unsigned short port, + unsigned short const port, std::function< void(boost::asio::streambuf& sb, std::string const& strHost)> build, std::size_t responseMax, // if no Content-Length header std::chrono::seconds timeout, std::function complete, beast::Journal& j); diff --git a/src/xrpld/net/HTTPClientSSLContext.h b/src/xrpld/net/HTTPClientSSLContext.h index 2da5ac38de..68f91b18b0 100644 --- a/src/xrpld/net/HTTPClientSSLContext.h +++ b/src/xrpld/net/HTTPClientSSLContext.h @@ -191,7 +191,7 @@ public: private: boost::asio::ssl::context ssl_context_; beast::Journal const j_; - const bool verify_; + bool const verify_; }; } // namespace ripple diff --git a/src/xrpld/net/InfoSub.h b/src/xrpld/net/InfoSub.h index c8762c31fd..bc6c6e781c 100644 --- a/src/xrpld/net/InfoSub.h +++ b/src/xrpld/net/InfoSub.h @@ -57,7 +57,7 @@ public: // aliases. using wptr = std::weak_ptr; - using ref = const std::shared_ptr&; + using ref = std::shared_ptr const&; using Consumer = Resource::Consumer; @@ -224,7 +224,7 @@ public: clearRequest(); void - setRequest(const std::shared_ptr& req); + setRequest(std::shared_ptr const& req); std::shared_ptr const& getRequest(); diff --git a/src/xrpld/net/RPCCall.h b/src/xrpld/net/RPCCall.h index 612d80c663..4c6d25ca57 100644 --- a/src/xrpld/net/RPCCall.h +++ b/src/xrpld/net/RPCCall.h @@ -46,20 +46,20 @@ namespace RPCCall { int fromCommandLine( Config const& config, - const std::vector& vCmd, + std::vector const& vCmd, Logs& logs); void fromNetwork( boost::asio::io_service& io_service, std::string const& strIp, - const std::uint16_t iPort, + std::uint16_t const iPort, std::string const& strUsername, std::string const& strPassword, std::string const& strPath, std::string const& strMethod, Json::Value const& jvParams, - const bool bSSL, + bool const bSSL, bool quiet, Logs& logs, std::function callbackFuncP = diff --git a/src/xrpld/net/detail/HTTPClient.cpp b/src/xrpld/net/detail/HTTPClient.cpp index 0ead4cf0dc..901237e1e3 100644 --- a/src/xrpld/net/detail/HTTPClient.cpp +++ b/src/xrpld/net/detail/HTTPClient.cpp @@ -53,7 +53,7 @@ class HTTPClientImp : public std::enable_shared_from_this, public: HTTPClientImp( boost::asio::io_service& io_service, - const unsigned short port, + unsigned short const port, std::size_t maxResponseSize, beast::Journal& j) : mSocket(io_service, httpClientSSLContext->context()) @@ -95,7 +95,7 @@ public: void(boost::asio::streambuf& sb, std::string const& strHost)> build, std::chrono::seconds timeout, std::function complete) { @@ -116,7 +116,7 @@ public: std::string const& strPath, std::chrono::seconds timeout, std::function complete) { @@ -179,7 +179,7 @@ public: } void - handleDeadline(const boost::system::error_code& ecResult) + handleDeadline(boost::system::error_code const& ecResult) { if (ecResult == boost::asio::error::operation_aborted) { @@ -218,7 +218,7 @@ public: } void - handleShutdown(const boost::system::error_code& ecResult) + handleShutdown(boost::system::error_code const& ecResult) { if (ecResult) { @@ -229,7 +229,7 @@ public: void handleResolve( - const boost::system::error_code& ecResult, + boost::system::error_code const& ecResult, boost::asio::ip::tcp::resolver::iterator itrEndpoint) { if (!mShutdown) @@ -261,7 +261,7 @@ public: } void - handleConnect(const boost::system::error_code& ecResult) + handleConnect(boost::system::error_code const& ecResult) { if (!mShutdown) mShutdown = ecResult; @@ -305,7 +305,7 @@ public: } void - handleRequest(const boost::system::error_code& ecResult) + handleRequest(boost::system::error_code const& ecResult) { if (!mShutdown) mShutdown = ecResult; @@ -334,7 +334,7 @@ public: void handleWrite( - const boost::system::error_code& ecResult, + boost::system::error_code const& ecResult, std::size_t bytes_transferred) { if (!mShutdown) @@ -363,7 +363,7 @@ public: void handleHeader( - const boost::system::error_code& ecResult, + boost::system::error_code const& ecResult, std::size_t bytes_transferred) { std::string strHeader{ @@ -435,7 +435,7 @@ public: void handleData( - const boost::system::error_code& ecResult, + boost::system::error_code const& ecResult, std::size_t bytes_transferred) { if (!mShutdown) @@ -467,7 +467,7 @@ public: // Call cancel the deadline timer and invoke the completion routine. void invokeComplete( - const boost::system::error_code& ecResult, + boost::system::error_code const& ecResult, int iStatus = 0, std::string const& strData = "") { @@ -517,13 +517,13 @@ private: boost::asio::streambuf mHeader; boost::asio::streambuf mResponse; std::string mBody; - const unsigned short mPort; + unsigned short const mPort; std::size_t const maxResponseSize_; int mStatus; std::function mBuild; std::function mComplete; @@ -545,12 +545,12 @@ HTTPClient::get( bool bSSL, boost::asio::io_service& io_service, std::deque deqSites, - const unsigned short port, + unsigned short const port, std::string const& strPath, std::size_t responseMax, std::chrono::seconds timeout, std::function complete, beast::Journal& j) @@ -565,12 +565,12 @@ HTTPClient::get( bool bSSL, boost::asio::io_service& io_service, std::string strSite, - const unsigned short port, + unsigned short const port, std::string const& strPath, std::size_t responseMax, std::chrono::seconds timeout, std::function complete, beast::Journal& j) @@ -587,13 +587,13 @@ HTTPClient::request( bool bSSL, boost::asio::io_service& io_service, std::string strSite, - const unsigned short port, + unsigned short const port, std::function setRequest, std::size_t responseMax, std::chrono::seconds timeout, std::function complete, beast::Journal& j) diff --git a/src/xrpld/net/detail/InfoSub.cpp b/src/xrpld/net/detail/InfoSub.cpp index 7acd7f07a7..9f394cf08e 100644 --- a/src/xrpld/net/detail/InfoSub.cpp +++ b/src/xrpld/net/detail/InfoSub.cpp @@ -124,12 +124,12 @@ InfoSub::clearRequest() } void -InfoSub::setRequest(const std::shared_ptr& req) +InfoSub::setRequest(std::shared_ptr const& req) { request_ = req; } -const std::shared_ptr& +std::shared_ptr const& InfoSub::getRequest() { return request_; diff --git a/src/xrpld/net/detail/RPCCall.cpp b/src/xrpld/net/detail/RPCCall.cpp index 92f48f8812..dd1208aa24 100644 --- a/src/xrpld/net/detail/RPCCall.cpp +++ b/src/xrpld/net/detail/RPCCall.cpp @@ -1041,7 +1041,7 @@ private: if (jvParams.size() >= 3) { - const auto offset = jvParams.size() == 3 ? 0 : 1; + auto const offset = jvParams.size() == 3 ? 0 : 1; jvRequest[jss::min_ledger] = jvParams[1u + offset].asString(); jvRequest[jss::max_ledger] = jvParams[2u + offset].asString(); @@ -1105,7 +1105,7 @@ private: parseGatewayBalances(Json::Value const& jvParams) { unsigned int index = 0; - const unsigned int size = jvParams.size(); + unsigned int const size = jvParams.size(); Json::Value jvRequest{Json::objectValue}; @@ -1189,7 +1189,7 @@ public: struct Command { - const char* name; + char const* name; parseFuncPtr parse; int minParams; int maxParams; @@ -1351,7 +1351,7 @@ struct RPCCallImp static bool onResponse( std::function callbackFuncP, - const boost::system::error_code& ecResult, + boost::system::error_code const& ecResult, int iStatus, std::string const& strData, beast::Journal j) @@ -1614,7 +1614,7 @@ namespace RPCCall { int fromCommandLine( Config const& config, - const std::vector& vCmd, + std::vector const& vCmd, Logs& logs) { auto const result = @@ -1631,14 +1631,14 @@ void fromNetwork( boost::asio::io_service& io_service, std::string const& strIp, - const std::uint16_t iPort, + std::uint16_t const iPort, std::string const& strUsername, std::string const& strPassword, std::string const& strPath, std::string const& strMethod, Json::Value const& jvParams, - const bool bSSL, - const bool quiet, + bool const bSSL, + bool const quiet, Logs& logs, std::function callbackFuncP, std::unordered_map headers) diff --git a/src/xrpld/net/detail/RPCSub.cpp b/src/xrpld/net/detail/RPCSub.cpp index 994292e7b8..3f0c923e13 100644 --- a/src/xrpld/net/detail/RPCSub.cpp +++ b/src/xrpld/net/detail/RPCSub.cpp @@ -167,7 +167,7 @@ private: true, logs_); } - catch (const std::exception& e) + catch (std::exception const& e) { JLOG(j_.info()) << "RPCCall::fromNetwork exception: " << e.what(); diff --git a/src/xrpld/net/detail/RegisterSSLCerts.cpp b/src/xrpld/net/detail/RegisterSSLCerts.cpp index 0dbf036e01..5a710323ad 100644 --- a/src/xrpld/net/detail/RegisterSSLCerts.cpp +++ b/src/xrpld/net/detail/RegisterSSLCerts.cpp @@ -80,7 +80,7 @@ registerSSLCerts( while ((pContext = CertEnumCertificatesInStore(hStore.get(), pContext)) != NULL) { - const unsigned char* pbCertEncoded = pContext->pbCertEncoded; + unsigned char const* pbCertEncoded = pContext->pbCertEncoded; std::unique_ptr x509{ d2i_X509(NULL, &pbCertEncoded, pContext->cbCertEncoded), X509_free}; if (!x509) diff --git a/src/xrpld/nodestore/detail/DummyScheduler.cpp b/src/xrpld/nodestore/detail/DummyScheduler.cpp index 76cd89610c..9df1374189 100644 --- a/src/xrpld/nodestore/detail/DummyScheduler.cpp +++ b/src/xrpld/nodestore/detail/DummyScheduler.cpp @@ -30,12 +30,12 @@ DummyScheduler::scheduleTask(Task& task) } void -DummyScheduler::onFetch(const FetchReport& report) +DummyScheduler::onFetch(FetchReport const& report) { } void -DummyScheduler::onBatchWrite(const BatchWriteReport& report) +DummyScheduler::onBatchWrite(BatchWriteReport const& report) { } diff --git a/src/xrpld/overlay/PeerSet.h b/src/xrpld/overlay/PeerSet.h index 6fb4b65643..4ee6b8d7c0 100644 --- a/src/xrpld/overlay/PeerSet.h +++ b/src/xrpld/overlay/PeerSet.h @@ -68,7 +68,7 @@ public: std::shared_ptr const& peer) = 0; /** get the set of ids of previously added peers */ - virtual const std::set& + virtual std::set const& getPeerIds() const = 0; }; diff --git a/src/xrpld/overlay/Slot.h b/src/xrpld/overlay/Slot.h index b2db772f5b..6ae3c9a142 100644 --- a/src/xrpld/overlay/Slot.h +++ b/src/xrpld/overlay/Slot.h @@ -157,7 +157,7 @@ private: deletePeer(PublicKey const& validator, id_t id, bool erase); /** Get the time of the last peer selection round */ - const time_point& + time_point const& getLastSelected() const { return lastSelected_; diff --git a/src/xrpld/overlay/detail/PeerImp.cpp b/src/xrpld/overlay/detail/PeerImp.cpp index 372ad9de53..bca2cfd8c7 100644 --- a/src/xrpld/overlay/detail/PeerImp.cpp +++ b/src/xrpld/overlay/detail/PeerImp.cpp @@ -130,7 +130,7 @@ PeerImp::PeerImp( PeerImp::~PeerImp() { - const bool inCluster{cluster()}; + bool const inCluster{cluster()}; overlay_.deletePeer(id_); overlay_.onPeerDeactivate(id_); @@ -1270,8 +1270,8 @@ PeerImp::handleTransaction( { // If we've never been in synch, there's nothing we can do // with a transaction - JLOG(p_journal_.debug()) - << "Ignoring incoming transaction: " << "Need network ledger"; + JLOG(p_journal_.debug()) << "Ignoring incoming transaction: " + << "Need network ledger"; return; } @@ -2536,7 +2536,7 @@ PeerImp::onMessage(std::shared_ptr const& m) for (int i = 0; i < packet.objects_size(); ++i) { - const protocol::TMIndexedObject& obj = packet.objects(i); + protocol::TMIndexedObject const& obj = packet.objects(i); if (obj.has_hash() && stringIsUint256Sized(obj.hash())) { @@ -2740,7 +2740,7 @@ PeerImp::addLedger( } void -PeerImp::doFetchPack(const std::shared_ptr& packet) +PeerImp::doFetchPack(std::shared_ptr const& packet) { // VFALCO TODO Invert this dependency using an observer and shared state // object. Don't queue fetch pack jobs if we're under load or we already @@ -3441,19 +3441,19 @@ PeerImp::getScore(bool haveItem) const { // Random component of score, used to break ties and avoid // overloading the "best" peer - static const int spRandomMax = 9999; + static int const spRandomMax = 9999; // Score for being very likely to have the thing we are // look for; should be roughly spRandomMax - static const int spHaveItem = 10000; + static int const spHaveItem = 10000; // Score reduction for each millisecond of latency; should // be roughly spRandomMax divided by the maximum reasonable // latency - static const int spLatency = 30; + static int const spLatency = 30; // Penalty for unknown latency; should be roughly spRandomMax - static const int spNoLatency = 8000; + static int const spNoLatency = 8000; int score = rand_int(spRandomMax); diff --git a/src/xrpld/overlay/detail/PeerImp.h b/src/xrpld/overlay/detail/PeerImp.h index 9835f4c6f4..8fbafa1ee9 100644 --- a/src/xrpld/overlay/detail/PeerImp.h +++ b/src/xrpld/overlay/detail/PeerImp.h @@ -602,7 +602,7 @@ private: std::lock_guard const& lockedRecentLock); void - doFetchPack(const std::shared_ptr& packet); + doFetchPack(std::shared_ptr const& packet); void onValidatorListMessage( diff --git a/src/xrpld/overlay/detail/PeerSet.cpp b/src/xrpld/overlay/detail/PeerSet.cpp index 909b20c307..611728839c 100644 --- a/src/xrpld/overlay/detail/PeerSet.cpp +++ b/src/xrpld/overlay/detail/PeerSet.cpp @@ -42,7 +42,7 @@ public: protocol::MessageType type, std::shared_ptr const& peer) override; - const std::set& + std::set const& getPeerIds() const override; private: @@ -117,7 +117,7 @@ PeerSetImpl::sendRequest( } } -const std::set& +std::set const& PeerSetImpl::getPeerIds() const { return peers_; @@ -171,7 +171,7 @@ public: JLOG(j_.error()) << "DummyPeerSet sendRequest should not be called"; } - const std::set& + std::set const& getPeerIds() const override { static std::set emptyPeers; diff --git a/src/xrpld/overlay/detail/TrafficCount.h b/src/xrpld/overlay/detail/TrafficCount.h index 9d1cce503b..e93163683b 100644 --- a/src/xrpld/overlay/detail/TrafficCount.h +++ b/src/xrpld/overlay/detail/TrafficCount.h @@ -246,7 +246,7 @@ public: static std::string to_string(category cat) { - static const std::unordered_map category_map = { + static std::unordered_map const category_map = { {base, "overhead"}, {cluster, "overhead_cluster"}, {overlay, "overhead_overlay"}, diff --git a/src/xrpld/overlay/detail/ZeroCopyStream.h b/src/xrpld/overlay/detail/ZeroCopyStream.h index 41d94578b0..87a5e10bc2 100644 --- a/src/xrpld/overlay/detail/ZeroCopyStream.h +++ b/src/xrpld/overlay/detail/ZeroCopyStream.h @@ -49,7 +49,7 @@ public: explicit ZeroCopyInputStream(Buffers const& buffers); bool - Next(const void** data, int* size) override; + Next(void const** data, int* size) override; void BackUp(int count) override; @@ -76,7 +76,7 @@ ZeroCopyInputStream::ZeroCopyInputStream(Buffers const& buffers) template bool -ZeroCopyInputStream::Next(const void** data, int* size) +ZeroCopyInputStream::Next(void const** data, int* size) { *data = boost::asio::buffer_cast(pos_); *size = boost::asio::buffer_size(pos_); diff --git a/src/xrpld/peerfinder/detail/Logic.h b/src/xrpld/peerfinder/detail/Logic.h index b3922f63b3..e23bbc29e1 100644 --- a/src/xrpld/peerfinder/detail/Logic.h +++ b/src/xrpld/peerfinder/detail/Logic.h @@ -1132,9 +1132,9 @@ public: } else { - JLOG(m_journal.error()) - << beast::leftw(18) << "Logic failed " << "'" << source->name() - << "' fetch, " << results.error.message(); + JLOG(m_journal.error()) << beast::leftw(18) << "Logic failed " + << "'" << source->name() << "' fetch, " + << results.error.message(); } } diff --git a/src/xrpld/perflog/PerfLog.h b/src/xrpld/perflog/PerfLog.h index 58df185e09..5212752ec7 100644 --- a/src/xrpld/perflog/PerfLog.h +++ b/src/xrpld/perflog/PerfLog.h @@ -186,9 +186,9 @@ template auto measureDurationAndLog( Func&& func, - const std::string& actionDescription, + std::string const& actionDescription, std::chrono::duration maxDelay, - const beast::Journal& journal) + beast::Journal const& journal) { auto start_time = std::chrono::high_resolution_clock::now(); diff --git a/src/xrpld/rpc/CTID.h b/src/xrpld/rpc/CTID.h index 042b79b527..be531c536a 100644 --- a/src/xrpld/rpc/CTID.h +++ b/src/xrpld/rpc/CTID.h @@ -57,12 +57,12 @@ encodeCTID(uint32_t ledgerSeq, uint32_t txnIndex, uint32_t networkID) noexcept template inline std::optional> -decodeCTID(const T ctid) noexcept +decodeCTID(T const ctid) noexcept { uint64_t ctidValue{0}; if constexpr ( std::is_same_v || std::is_same_v || - std::is_same_v || std::is_same_v) + std::is_same_v || std::is_same_v) { std::string const ctidString(ctid); diff --git a/src/xrpld/rpc/detail/Handler.cpp b/src/xrpld/rpc/detail/Handler.cpp index abdfa921bb..dd670529a5 100644 --- a/src/xrpld/rpc/detail/Handler.cpp +++ b/src/xrpld/rpc/detail/Handler.cpp @@ -224,7 +224,7 @@ private: } template - explicit HandlerTable(const Handler (&entries)[N]) + explicit HandlerTable(Handler const (&entries)[N]) { for (auto const& entry : entries) { diff --git a/src/xrpld/rpc/detail/Handler.h b/src/xrpld/rpc/detail/Handler.h index e0fb66f6fd..8c263a90ab 100644 --- a/src/xrpld/rpc/detail/Handler.h +++ b/src/xrpld/rpc/detail/Handler.h @@ -47,7 +47,7 @@ struct Handler template using Method = std::function; - const char* name_; + char const* name_; Method valueMethod_; Role role_; RPC::Condition condition_; diff --git a/src/xrpld/rpc/detail/RPCHelpers.cpp b/src/xrpld/rpc/detail/RPCHelpers.cpp index 3449a744d6..347a984d15 100644 --- a/src/xrpld/rpc/detail/RPCHelpers.cpp +++ b/src/xrpld/rpc/detail/RPCHelpers.cpp @@ -962,7 +962,7 @@ chooseLedgerEntryType(Json::Value const& params) // against the canonical name (case-insensitive) or the RPC name // (case-sensitive). auto const filter = p.asString(); - const auto iter = + auto const iter = std::ranges::find_if(types, [&filter](decltype(types.front())& t) { return boost::iequals(std::get<0>(t), filter) || std::get<1>(t) == filter; diff --git a/src/xrpld/rpc/detail/RPCHelpers.h b/src/xrpld/rpc/detail/RPCHelpers.h index 89af005292..31b9761058 100644 --- a/src/xrpld/rpc/detail/RPCHelpers.h +++ b/src/xrpld/rpc/detail/RPCHelpers.h @@ -257,7 +257,7 @@ isAccountObjectsValidType(LedgerEntryType const& type); * @return the api version number */ unsigned int -getAPIVersionNumber(const Json::Value& value, bool betaEnabled); +getAPIVersionNumber(Json::Value const& value, bool betaEnabled); /** Return a ledger based on ledger_hash or ledger_index, or an RPC error */ diff --git a/src/xrpld/rpc/detail/ServerHandler.cpp b/src/xrpld/rpc/detail/ServerHandler.cpp index a5aca59657..0c84e59413 100644 --- a/src/xrpld/rpc/detail/ServerHandler.cpp +++ b/src/xrpld/rpc/detail/ServerHandler.cpp @@ -1027,7 +1027,7 @@ ServerHandler::processRequest( if (auto stream = m_journal.debug()) { - static const int maxSize = 10000; + static int const maxSize = 10000; if (response.size() <= maxSize) stream << "Reply: " << response; else diff --git a/src/xrpld/rpc/detail/TransactionSign.cpp b/src/xrpld/rpc/detail/TransactionSign.cpp index 3f388e636f..9387aba505 100644 --- a/src/xrpld/rpc/detail/TransactionSign.cpp +++ b/src/xrpld/rpc/detail/TransactionSign.cpp @@ -746,8 +746,7 @@ transactionFormatResultImpl(Transaction::pointer tpTrans, unsigned apiVersion) //------------------------------------------------------------------------------ -[[nodiscard]] -static XRPAmount +[[nodiscard]] static XRPAmount getTxFee(Application const& app, Config const& config, Json::Value tx) { // autofilling only needed in this function so that the `STParsedJSONObject` @@ -945,7 +944,7 @@ transactionSign( if (!preprocResult.second) return preprocResult.first; - std::shared_ptr ledger = app.openLedger().current(); + std::shared_ptr ledger = app.openLedger().current(); // Make sure the STTx makes a legitimate Transaction. std::pair txn = transactionConstructImpl(preprocResult.second, ledger->rules(), app); @@ -1101,7 +1100,7 @@ transactionSignFor( JLOG(j.debug()) << "transactionSignFor: " << jvRequest; // Verify presence of the signer's account field. - const char accountField[] = "account"; + char const accountField[] = "account"; if (!jvRequest.isMember(accountField)) return RPC::missing_field_error(accountField); diff --git a/src/xrpld/rpc/handlers/GetAggregatePrice.cpp b/src/xrpld/rpc/handlers/GetAggregatePrice.cpp index 8001071f85..33a88ba78f 100644 --- a/src/xrpld/rpc/handlers/GetAggregatePrice.cpp +++ b/src/xrpld/rpc/handlers/GetAggregatePrice.cpp @@ -108,8 +108,8 @@ iteratePriceData( return; oracle = isNew - ? &static_cast(node.peekAtField(sfNewFields)) - : &static_cast( + ? &static_cast(node.peekAtField(sfNewFields)) + : &static_cast( node.peekAtField(sfFinalFields)); break; } diff --git a/src/xrpld/rpc/handlers/GetCounts.cpp b/src/xrpld/rpc/handlers/GetCounts.cpp index 9f57f08a24..3c1d8cccdd 100644 --- a/src/xrpld/rpc/handlers/GetCounts.cpp +++ b/src/xrpld/rpc/handlers/GetCounts.cpp @@ -37,7 +37,7 @@ static void textTime( std::string& text, UptimeClock::time_point& seconds, - const char* unitName, + char const* unitName, std::chrono::seconds unitVal) { auto i = seconds.time_since_epoch() / unitVal; diff --git a/src/xrpld/rpc/handlers/LedgerEntry.cpp b/src/xrpld/rpc/handlers/LedgerEntry.cpp index ade9b9578b..d2f188aef3 100644 --- a/src/xrpld/rpc/handlers/LedgerEntry.cpp +++ b/src/xrpld/rpc/handlers/LedgerEntry.cpp @@ -959,7 +959,7 @@ doLedgerEntry(RPC::JsonContext& context) try { bool found = false; - for (const auto& ledgerEntry : ledgerEntryParsers) + for (auto const& ledgerEntry : ledgerEntryParsers) { if (context.params.isMember(ledgerEntry.fieldName)) { diff --git a/src/xrpld/rpc/handlers/ServerInfo.cpp b/src/xrpld/rpc/handlers/ServerInfo.cpp index b303402e2d..eac7f2021f 100644 --- a/src/xrpld/rpc/handlers/ServerInfo.cpp +++ b/src/xrpld/rpc/handlers/ServerInfo.cpp @@ -287,7 +287,7 @@ ServerDefinitions::ServerDefinitions() : defs_{Json::objectValue} // generate hash { - const std::string out = Json::FastWriter().write(defs_); + std::string const out = Json::FastWriter().write(defs_); defsHash_ = ripple::sha512Half(ripple::Slice{out.data(), out.size()}); defs_[jss::hash] = to_string(defsHash_); } @@ -308,7 +308,7 @@ doServerDefinitions(RPC::JsonContext& context) return RPC::invalid_field_error(jss::hash); } - static const detail::ServerDefinitions defs{}; + static detail::ServerDefinitions const defs{}; if (defs.hashMatches(hash)) { Json::Value jv = Json::objectValue; diff --git a/src/xrpld/rpc/handlers/Simulate.cpp b/src/xrpld/rpc/handlers/Simulate.cpp index 3c7340ece3..5f69c203ff 100644 --- a/src/xrpld/rpc/handlers/Simulate.cpp +++ b/src/xrpld/rpc/handlers/Simulate.cpp @@ -233,7 +233,7 @@ simulateTxn(RPC::JsonContext& context, std::shared_ptr transaction) jvResult[jss::applied] = result.applied; jvResult[jss::ledger_index] = view.seq(); - const bool isBinaryOutput = context.params.get(jss::binary, false).asBool(); + bool const isBinaryOutput = context.params.get(jss::binary, false).asBool(); // Convert the TER to human-readable values std::string token; diff --git a/src/xrpld/rpc/handlers/Subscribe.cpp b/src/xrpld/rpc/handlers/Subscribe.cpp index 35b82edb3f..deac6e18ad 100644 --- a/src/xrpld/rpc/handlers/Subscribe.cpp +++ b/src/xrpld/rpc/handlers/Subscribe.cpp @@ -330,7 +330,7 @@ doSubscribe(RPC::JsonContext& context) context.app.getLedgerMaster().getPublishedLedger(); if (lpLedger) { - const Json::Value jvMarker = Json::Value(Json::nullValue); + Json::Value const jvMarker = Json::Value(Json::nullValue); Json::Value jvOffers(Json::objectValue); auto add = [&](Json::StaticString field) { diff --git a/src/xrpld/shamap/SHAMap.h b/src/xrpld/shamap/SHAMap.h index 5771f3ec1d..33c42c2d23 100644 --- a/src/xrpld/shamap/SHAMap.h +++ b/src/xrpld/shamap/SHAMap.h @@ -513,9 +513,9 @@ private: struct MissingNodes { MissingNodes() = delete; - MissingNodes(const MissingNodes&) = delete; + MissingNodes(MissingNodes const&) = delete; MissingNodes& - operator=(const MissingNodes&) = delete; + operator=(MissingNodes const&) = delete; // basic parameters int max_; diff --git a/src/xrpld/shamap/SHAMapLeafNode.h b/src/xrpld/shamap/SHAMapLeafNode.h index c0f9422a38..383da38fd4 100644 --- a/src/xrpld/shamap/SHAMapLeafNode.h +++ b/src/xrpld/shamap/SHAMapLeafNode.h @@ -42,9 +42,9 @@ protected: SHAMapHash const& hash); public: - SHAMapLeafNode(const SHAMapLeafNode&) = delete; + SHAMapLeafNode(SHAMapLeafNode const&) = delete; SHAMapLeafNode& - operator=(const SHAMapLeafNode&) = delete; + operator=(SHAMapLeafNode const&) = delete; bool isLeaf() const final override diff --git a/src/xrpld/shamap/detail/SHAMap.cpp b/src/xrpld/shamap/detail/SHAMap.cpp index ab511f343f..d2415a2ff2 100644 --- a/src/xrpld/shamap/detail/SHAMap.cpp +++ b/src/xrpld/shamap/detail/SHAMap.cpp @@ -521,7 +521,7 @@ SHAMap::firstBelow( return belowHelper(node, stack, branch, {init, cmp, incr}); } -static const boost::intrusive_ptr no_item; +static boost::intrusive_ptr const no_item; boost::intrusive_ptr const& SHAMap::onlyBelow(SHAMapTreeNode* node) const @@ -757,7 +757,7 @@ SHAMap::delItem(uint256 const& id) { // we may have made this a node with 1 or 0 children // And, if so, we need to remove this branch - const int bc = node->getBranchCount(); + int const bc = node->getBranchCount(); if (bc == 0) { // no children below this branch diff --git a/src/xrpld/shamap/detail/SHAMapInnerNode.cpp b/src/xrpld/shamap/detail/SHAMapInnerNode.cpp index 8ec581b475..6e9d447cf6 100644 --- a/src/xrpld/shamap/detail/SHAMapInnerNode.cpp +++ b/src/xrpld/shamap/detail/SHAMapInnerNode.cpp @@ -266,7 +266,7 @@ SHAMapInnerNode::serializeWithPrefix(Serializer& s) const } std::string -SHAMapInnerNode::getString(const SHAMapNodeID& id) const +SHAMapInnerNode::getString(SHAMapNodeID const& id) const { std::string ret = SHAMapTreeNode::getString(id); auto hashes = hashesAndChildren_.getHashes(); diff --git a/src/xrpld/shamap/detail/SHAMapLeafNode.cpp b/src/xrpld/shamap/detail/SHAMapLeafNode.cpp index cdf0f80a84..10d61ff138 100644 --- a/src/xrpld/shamap/detail/SHAMapLeafNode.cpp +++ b/src/xrpld/shamap/detail/SHAMapLeafNode.cpp @@ -65,7 +65,7 @@ SHAMapLeafNode::setItem(boost::intrusive_ptr item) } std::string -SHAMapLeafNode::getString(const SHAMapNodeID& id) const +SHAMapLeafNode::getString(SHAMapNodeID const& id) const { std::string ret = SHAMapTreeNode::getString(id); diff --git a/src/xrpld/shamap/detail/SHAMapSync.cpp b/src/xrpld/shamap/detail/SHAMapSync.cpp index d43b1ff024..176c9f2a3a 100644 --- a/src/xrpld/shamap/detail/SHAMapSync.cpp +++ b/src/xrpld/shamap/detail/SHAMapSync.cpp @@ -577,7 +577,7 @@ SHAMap::addRootNode( SHAMapAddNode SHAMap::addKnownNode( - const SHAMapNodeID& node, + SHAMapNodeID const& node, Slice const& rawNode, SHAMapSyncFilter* filter) { diff --git a/src/xrpld/shamap/detail/SHAMapTreeNode.cpp b/src/xrpld/shamap/detail/SHAMapTreeNode.cpp index 6acf3f3bfc..d1e74fd6a7 100644 --- a/src/xrpld/shamap/detail/SHAMapTreeNode.cpp +++ b/src/xrpld/shamap/detail/SHAMapTreeNode.cpp @@ -179,7 +179,7 @@ SHAMapTreeNode::makeFromPrefix(Slice rawNode, SHAMapHash const& hash) } std::string -SHAMapTreeNode::getString(const SHAMapNodeID& id) const +SHAMapTreeNode::getString(SHAMapNodeID const& id) const { return to_string(id); } From 28f50cb7cffce7579cdd60f1fbbbe7b8d7f50a9d Mon Sep 17 00:00:00 2001 From: yinyiqian1 Date: Sat, 10 May 2025 10:36:11 -0400 Subject: [PATCH 021/244] fix: enable LedgerStateFix for delegation (#5427) --- include/xrpl/protocol/detail/transactions.macro | 2 +- src/test/app/Delegate_test.cpp | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/include/xrpl/protocol/detail/transactions.macro b/include/xrpl/protocol/detail/transactions.macro index 61479611aa..54f97f942f 100644 --- a/include/xrpl/protocol/detail/transactions.macro +++ b/include/xrpl/protocol/detail/transactions.macro @@ -396,7 +396,7 @@ TRANSACTION(ttORACLE_DELETE, 52, OracleDelete, Delegation::delegatable, ({ })) /** This transaction type fixes a problem in the ledger state */ -TRANSACTION(ttLEDGER_STATE_FIX, 53, LedgerStateFix, Delegation::notDelegatable, ({ +TRANSACTION(ttLEDGER_STATE_FIX, 53, LedgerStateFix, Delegation::delegatable, ({ {sfLedgerFixType, soeREQUIRED}, {sfOwner, soeOPTIONAL}, })) diff --git a/src/test/app/Delegate_test.cpp b/src/test/app/Delegate_test.cpp index c8415a558a..5136627148 100644 --- a/src/test/app/Delegate_test.cpp +++ b/src/test/app/Delegate_test.cpp @@ -215,9 +215,7 @@ class Delegate_test : public beast::unit_test::suite ter(terNO_ACCOUNT)); } - // for security reasons, AccountSet, SetRegularKey, SignerListSet, - // AccountDelete, DelegateSet are prohibited to be delegated to - // other accounts. + // non-delegatable transaction { env(delegate::set(gw, alice, {"SetRegularKey"}), ter(tecNO_PERMISSION)); @@ -229,6 +227,10 @@ class Delegate_test : public beast::unit_test::suite ter(tecNO_PERMISSION)); env(delegate::set(gw, alice, {"SetRegularKey"}), ter(tecNO_PERMISSION)); + env(delegate::set(gw, alice, {"EnableAmendment"}), + ter(tecNO_PERMISSION)); + env(delegate::set(gw, alice, {"UNLModify"}), ter(tecNO_PERMISSION)); + env(delegate::set(gw, alice, {"SetFee"}), ter(tecNO_PERMISSION)); } } From c6c7c843559c31954a473c51f3b9b32447a85d4c Mon Sep 17 00:00:00 2001 From: Bart Date: Mon, 12 May 2025 13:42:03 +0100 Subject: [PATCH 022/244] Configure CODEOWNERS for changes to RPC code (#5266) To ensure changes to any RPC-related code are compatible with other services, such as Clio, the RPC team will be required to review them. --- .github/CODEOWNERS | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 .github/CODEOWNERS diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000000..b6ecc5b7d4 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,8 @@ +# Allow anyone to review any change by default. +* + +# Require the rpc-reviewers team to review changes to the rpc code. +include/libxrpl/protocol/ @xrplf/rpc-reviewers +src/libxrpl/protocol/ @xrplf/rpc-reviewers +src/xrpld/rpc/ @xrplf/rpc-reviewers +src/xrpld/app/misc/ @xrplf/rpc-reviewers From e3ebc253faaf6acd0f0678451d9ca361c9c8b53a Mon Sep 17 00:00:00 2001 From: Jingchen Date: Mon, 12 May 2025 15:54:01 +0100 Subject: [PATCH 023/244] fix: Ensure that coverage file generation is atomic. (#5426) Running unit tests in parallel and multiple threads can write into one file can corrupt output files, and then gcovr won't be able to parse the corrupted file. This change adds -fprofile-update=atomic as instructed by https://gcc.gnu.org/bugzilla/show_bug.cgi?id=68080. --- .github/workflows/nix.yml | 2 +- cmake/CodeCoverage.cmake | 17 ++++++++++++++++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index 8a8ba94e2d..de59e07761 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -247,7 +247,7 @@ jobs: mkdir -p ~/.conan tar -xzf conan.tar -C ~/.conan - name: install gcovr - run: pip install "gcovr>=7,<8" + run: pip install "gcovr>=7,<9" - name: check environment run: | echo ${PATH} | tr ':' '\n' diff --git a/cmake/CodeCoverage.cmake b/cmake/CodeCoverage.cmake index 323303c92d..ce1733988b 100644 --- a/cmake/CodeCoverage.cmake +++ b/cmake/CodeCoverage.cmake @@ -98,6 +98,9 @@ # 2024-04-03, Bronek Kozicki # - add support for output formats: jacoco, clover, lcov # +# 2025-05-12, Jingchen Wu +# - add -fprofile-update=atomic to ensure atomic profile generation +# # USAGE: # # 1. Copy this file into your cmake modules path. @@ -200,15 +203,27 @@ set(COVERAGE_COMPILER_FLAGS "-g --coverage" CACHE INTERNAL "") if(CMAKE_CXX_COMPILER_ID MATCHES "(GNU|Clang)") include(CheckCXXCompilerFlag) + include(CheckCCompilerFlag) + check_cxx_compiler_flag(-fprofile-abs-path HAVE_cxx_fprofile_abs_path) if(HAVE_cxx_fprofile_abs_path) set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path") endif() - include(CheckCCompilerFlag) + check_c_compiler_flag(-fprofile-abs-path HAVE_c_fprofile_abs_path) if(HAVE_c_fprofile_abs_path) set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path") endif() + + check_cxx_compiler_flag(-fprofile-update HAVE_cxx_fprofile_update) + if(HAVE_cxx_fprofile_update) + set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-update=atomic") + endif() + + check_c_compiler_flag(-fprofile-update HAVE_c_fprofile_update) + if(HAVE_c_fprofile_update) + set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-update=atomic") + endif() endif() set(CMAKE_Fortran_FLAGS_COVERAGE From c3e9380fb49f6ca057eead71b2e42da18efe0786 Mon Sep 17 00:00:00 2001 From: brettmollin Date: Fri, 16 May 2025 05:49:14 -0400 Subject: [PATCH 024/244] fix: Update validators-example.txt fix xrplf example URL (#5384) --- cfg/validators-example.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cfg/validators-example.txt b/cfg/validators-example.txt index df9757caba..dbcff90f12 100644 --- a/cfg/validators-example.txt +++ b/cfg/validators-example.txt @@ -26,7 +26,7 @@ # # Examples: # https://vl.ripple.com -# https://vl.xrplf.org +# https://unl.xrplf.org # http://127.0.0.1:8000 # file:///etc/opt/ripple/vl.txt # From 380ba9f1c16cd1214462f1caa2370e73d7530f81 Mon Sep 17 00:00:00 2001 From: Valentin Balaschenko <13349202+vlntb@users.noreply.github.com> Date: Fri, 16 May 2025 11:31:51 +0100 Subject: [PATCH 025/244] Fix: Resolve slow test on macOS pipeline (#5392) Using std::barrier performs extremely poorly (~1 hour vs ~1 minute to run the test suite) in certain macOS environments. To unblock our macOS CI pipeline, std::barrier has been replaced with a custom mutex-based barrier (Barrier) that significantly improves performance without compromising correctness. --- .github/workflows/macos.yml | 14 +++-- src/test/basics/IntrusiveShared_test.cpp | 70 ++++++++++++++++++++---- 2 files changed, 68 insertions(+), 16 deletions(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 905df7e83d..63d54175ea 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -71,6 +71,9 @@ jobs: nproc --version echo -n "nproc returns: " nproc + system_profiler SPHardwareDataType + sysctl -n hw.logicalcpu + clang --version - name: configure Conan run : | conan profile new default --detect || true @@ -89,9 +92,8 @@ jobs: generator: ${{ matrix.generator }} configuration: ${{ matrix.configuration }} cmake-args: "-Dassert=TRUE -Dwerr=TRUE ${{ matrix.cmake-args }}" - # TODO: Temporary disabled tests - # - name: test - # run: | - # n=$(nproc) - # echo "Using $n test jobs" - # ${build_dir}/rippled --unittest --unittest-jobs $n + - name: test + run: | + n=$(nproc) + echo "Using $n test jobs" + ${build_dir}/rippled --unittest --unittest-jobs $n diff --git a/src/test/basics/IntrusiveShared_test.cpp b/src/test/basics/IntrusiveShared_test.cpp index fe0cdba777..736cc47345 100644 --- a/src/test/basics/IntrusiveShared_test.cpp +++ b/src/test/basics/IntrusiveShared_test.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -19,6 +20,55 @@ namespace ripple { namespace tests { +/** +Experimentally, we discovered that using std::barrier performs extremely +poorly (~1 hour vs ~1 minute to run the test suite) in certain macOS +environments. To unblock our macOS CI pipeline, we replaced std::barrier with a +custom mutex-based barrier (Barrier) that significantly improves performance +without compromising correctness. For future reference, if we ever consider +reintroducing std::barrier, the following configuration is known to exhibit the +problem: + + Model Name: Mac mini + Model Identifier: Mac14,3 + Model Number: Z16K000R4LL/A + Chip: Apple M2 + Total Number of Cores: 8 (4 performance and 4 efficiency) + Memory: 24 GB + System Firmware Version: 11881.41.5 + OS Loader Version: 11881.1.1 + Apple clang version 16.0.0 (clang-1600.0.26.3) + Target: arm64-apple-darwin24.0.0 + Thread model: posix + + */ +struct Barrier +{ + std::mutex mtx; + std::condition_variable cv; + int count; + int const initial; + + Barrier(int n) : count(n), initial(n) + { + } + + void + arrive_and_wait() + { + std::unique_lock lock(mtx); + if (--count == 0) + { + count = initial; + cv.notify_all(); + } + else + { + cv.wait(lock, [&] { return count == initial; }); + } + } +}; + namespace { enum class TrackedState : std::uint8_t { uninitialized, @@ -500,9 +550,9 @@ public: constexpr int loopIters = 2 * 1024; constexpr int numThreads = 16; std::vector> toClone; - std::barrier loopStartSyncPoint{numThreads}; - std::barrier postCreateToCloneSyncPoint{numThreads}; - std::barrier postCreateVecOfPointersSyncPoint{numThreads}; + Barrier loopStartSyncPoint{numThreads}; + Barrier postCreateToCloneSyncPoint{numThreads}; + Barrier postCreateVecOfPointersSyncPoint{numThreads}; auto engines = [&]() -> std::vector { std::random_device rd; std::vector result; @@ -628,10 +678,10 @@ public: constexpr int flipPointersLoopIters = 256; constexpr int numThreads = 16; std::vector> toClone; - std::barrier loopStartSyncPoint{numThreads}; - std::barrier postCreateToCloneSyncPoint{numThreads}; - std::barrier postCreateVecOfPointersSyncPoint{numThreads}; - std::barrier postFlipPointersLoopSyncPoint{numThreads}; + Barrier loopStartSyncPoint{numThreads}; + Barrier postCreateToCloneSyncPoint{numThreads}; + Barrier postCreateVecOfPointersSyncPoint{numThreads}; + Barrier postFlipPointersLoopSyncPoint{numThreads}; auto engines = [&]() -> std::vector { std::random_device rd; std::vector result; @@ -761,9 +811,9 @@ public: constexpr int lockWeakLoopIters = 256; constexpr int numThreads = 16; std::vector> toLock; - std::barrier loopStartSyncPoint{numThreads}; - std::barrier postCreateToLockSyncPoint{numThreads}; - std::barrier postLockWeakLoopSyncPoint{numThreads}; + Barrier loopStartSyncPoint{numThreads}; + Barrier postCreateToLockSyncPoint{numThreads}; + Barrier postLockWeakLoopSyncPoint{numThreads}; // lockAndDestroy creates weak pointers from the strong pointer // and runs a loop that locks the weak pointer. At the end of the loop From 09690f1b38eb9e476992cc92b8937d4aa4836762 Mon Sep 17 00:00:00 2001 From: Michael Legleux Date: Sun, 18 May 2025 20:39:18 +0100 Subject: [PATCH 026/244] Set version to 2.5.0-b1 --- src/libxrpl/protocol/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libxrpl/protocol/BuildInfo.cpp b/src/libxrpl/protocol/BuildInfo.cpp index 39d34c43e7..1f061cebdc 100644 --- a/src/libxrpl/protocol/BuildInfo.cpp +++ b/src/libxrpl/protocol/BuildInfo.cpp @@ -36,7 +36,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "2.4.0" +char const* const versionString = "2.5.0-b1" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From dd62cfcc22971405f1d424a77159d6e5b5c636e1 Mon Sep 17 00:00:00 2001 From: Bart Date: Tue, 20 May 2025 11:24:07 -0400 Subject: [PATCH 027/244] fix: Update path in CODEOWNERS (#5440) --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index b6ecc5b7d4..bc4fe2febd 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -2,7 +2,7 @@ * # Require the rpc-reviewers team to review changes to the rpc code. -include/libxrpl/protocol/ @xrplf/rpc-reviewers +include/xrpl/protocol/ @xrplf/rpc-reviewers src/libxrpl/protocol/ @xrplf/rpc-reviewers src/xrpld/rpc/ @xrplf/rpc-reviewers src/xrpld/app/misc/ @xrplf/rpc-reviewers From e514de76ed4eb9b1ad55c94170a56d03e52562e0 Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Tue, 20 May 2025 19:06:41 +0100 Subject: [PATCH 028/244] Add single asset vault (XLS-65d) (#5224) - Specification: XRPLF/XRPL-Standards#239 - Amendment: `SingleAssetVault` - Implements a vault feature used to store a fungible asset (XRP, IOU, or MPT, but not NFT) and to receive shares in the vault (an MPT) in exchange. - A vault can be private or public. - A private vault can use permissioned domains, subject to the `PermissionedDomains` amendment. - Shares can be exchanged back into asset with `VaultWithdraw`. - Permissions on the asset in the vault are transitively applied on shares in the vault. - Issuer of the asset in the vault can clawback with `VaultClawback`. - Extended `MPTokenIssuance` with `DomainID`, used by the permissioned domain on the vault shares. Co-authored-by: John Freeman --- include/xrpl/json/json_value.h | 11 + include/xrpl/protocol/AMMCore.h | 8 - include/xrpl/protocol/Asset.h | 16 +- include/xrpl/protocol/IOUAmount.h | 1 - include/xrpl/protocol/Indexes.h | 9 + include/xrpl/protocol/LedgerFormats.h | 3 + include/xrpl/protocol/MPTAmount.h | 3 - include/xrpl/protocol/MPTIssue.h | 7 +- include/xrpl/protocol/Protocol.h | 10 + include/xrpl/protocol/SField.h | 1 - include/xrpl/protocol/STAmount.h | 23 +- include/xrpl/protocol/STBase.h | 12 +- include/xrpl/protocol/STIssue.h | 4 + include/xrpl/protocol/STNumber.h | 20 + include/xrpl/protocol/STObject.h | 59 +- include/xrpl/protocol/STTx.h | 4 + include/xrpl/protocol/TER.h | 16 + include/xrpl/protocol/TxFlags.h | 6 + include/xrpl/protocol/detail/features.macro | 1 + .../xrpl/protocol/detail/ledger_entries.macro | 57 +- include/xrpl/protocol/detail/sfields.macro | 7 + .../xrpl/protocol/detail/transactions.macro | 43 + include/xrpl/protocol/jss.h | 5 +- src/libxrpl/json/json_value.cpp | 13 + src/libxrpl/protocol/AMMCore.cpp | 12 - src/libxrpl/protocol/Asset.cpp | 14 +- src/libxrpl/protocol/Indexes.cpp | 7 + src/libxrpl/protocol/Keylet.cpp | 2 +- src/libxrpl/protocol/MPTIssue.cpp | 6 - src/libxrpl/protocol/STAmount.cpp | 90 +- src/libxrpl/protocol/STNumber.cpp | 100 + src/libxrpl/protocol/STParsedJSON.cpp | 15 + src/libxrpl/protocol/STTx.cpp | 6 + src/libxrpl/protocol/STVar.cpp | 4 + src/libxrpl/protocol/TER.cpp | 4 + src/test/app/AMM_test.cpp | 73 +- src/test/app/Credentials_test.cpp | 28 +- src/test/app/MPToken_test.cpp | 43 +- src/test/app/Vault_test.cpp | 3085 +++++++++++++++++ src/test/basics/IOUAmount_test.cpp | 5 + src/test/jtx/Env.h | 1 + src/test/jtx/amount.h | 55 +- src/test/jtx/basic_prop.h | 2 + src/test/jtx/credentials.h | 10 + src/test/jtx/impl/Env.cpp | 1 - src/test/jtx/impl/vault.cpp | 104 + src/test/jtx/mpt.h | 5 +- src/test/jtx/vault.h | 109 + src/test/ledger/Invariants_test.cpp | 84 +- src/test/protocol/STNumber_test.cpp | 194 ++ src/test/rpc/Transaction_test.cpp | 3 +- src/xrpld/app/misc/CredentialHelpers.cpp | 122 +- src/xrpld/app/misc/CredentialHelpers.h | 32 +- src/xrpld/app/misc/NetworkOPs.cpp | 2 +- src/xrpld/app/tx/detail/AMMCreate.cpp | 72 +- src/xrpld/app/tx/detail/CashCheck.cpp | 2 +- src/xrpld/app/tx/detail/Clawback.cpp | 7 +- src/xrpld/app/tx/detail/CreateCheck.cpp | 9 +- src/xrpld/app/tx/detail/CreateOffer.cpp | 2 +- src/xrpld/app/tx/detail/Escrow.cpp | 10 +- src/xrpld/app/tx/detail/InvariantCheck.cpp | 61 +- src/xrpld/app/tx/detail/InvariantCheck.h | 2 + src/xrpld/app/tx/detail/MPTokenAuthorize.cpp | 21 +- src/xrpld/app/tx/detail/MPTokenAuthorize.h | 6 +- .../app/tx/detail/MPTokenIssuanceCreate.cpp | 47 +- .../app/tx/detail/MPTokenIssuanceCreate.h | 18 +- .../app/tx/detail/MPTokenIssuanceDestroy.cpp | 2 +- src/xrpld/app/tx/detail/PayChan.cpp | 10 +- src/xrpld/app/tx/detail/Payment.cpp | 12 +- src/xrpld/app/tx/detail/SetTrust.cpp | 44 +- src/xrpld/app/tx/detail/VaultClawback.cpp | 239 ++ src/xrpld/app/tx/detail/VaultClawback.h | 48 + src/xrpld/app/tx/detail/VaultCreate.cpp | 244 ++ src/xrpld/app/tx/detail/VaultCreate.h | 51 + src/xrpld/app/tx/detail/VaultDelete.cpp | 189 + src/xrpld/app/tx/detail/VaultDelete.h | 48 + src/xrpld/app/tx/detail/VaultDeposit.cpp | 283 ++ src/xrpld/app/tx/detail/VaultDeposit.h | 48 + src/xrpld/app/tx/detail/VaultSet.cpp | 197 ++ src/xrpld/app/tx/detail/VaultSet.h | 48 + src/xrpld/app/tx/detail/VaultWithdraw.cpp | 258 ++ src/xrpld/app/tx/detail/VaultWithdraw.h | 48 + src/xrpld/app/tx/detail/applySteps.cpp | 6 + src/xrpld/ledger/View.h | 280 +- src/xrpld/ledger/detail/View.cpp | 595 +++- src/xrpld/net/detail/RPCCall.cpp | 19 + src/xrpld/rpc/detail/Handler.cpp | 1 + src/xrpld/rpc/detail/RPCHelpers.cpp | 1 + src/xrpld/rpc/detail/RPCHelpers.h | 2 + src/xrpld/rpc/handlers/AccountObjects.cpp | 4 +- src/xrpld/rpc/handlers/Handlers.h | 2 + src/xrpld/rpc/handlers/LedgerEntry.cpp | 35 + src/xrpld/rpc/handlers/VaultInfo.cpp | 114 + 93 files changed, 7257 insertions(+), 385 deletions(-) create mode 100644 src/test/app/Vault_test.cpp create mode 100644 src/test/jtx/impl/vault.cpp create mode 100644 src/test/jtx/vault.h create mode 100644 src/xrpld/app/tx/detail/VaultClawback.cpp create mode 100644 src/xrpld/app/tx/detail/VaultClawback.h create mode 100644 src/xrpld/app/tx/detail/VaultCreate.cpp create mode 100644 src/xrpld/app/tx/detail/VaultCreate.h create mode 100644 src/xrpld/app/tx/detail/VaultDelete.cpp create mode 100644 src/xrpld/app/tx/detail/VaultDelete.h create mode 100644 src/xrpld/app/tx/detail/VaultDeposit.cpp create mode 100644 src/xrpld/app/tx/detail/VaultDeposit.h create mode 100644 src/xrpld/app/tx/detail/VaultSet.cpp create mode 100644 src/xrpld/app/tx/detail/VaultSet.h create mode 100644 src/xrpld/app/tx/detail/VaultWithdraw.cpp create mode 100644 src/xrpld/app/tx/detail/VaultWithdraw.h create mode 100644 src/xrpld/rpc/handlers/VaultInfo.cpp diff --git a/include/xrpl/json/json_value.h b/include/xrpl/json/json_value.h index 3431ab7744..2e815b79f2 100644 --- a/include/xrpl/json/json_value.h +++ b/include/xrpl/json/json_value.h @@ -20,11 +20,13 @@ #ifndef RIPPLE_JSON_JSON_VALUE_H_INCLUDED #define RIPPLE_JSON_JSON_VALUE_H_INCLUDED +#include #include #include #include #include +#include #include /** \brief JSON (JavaScript Object Notation). @@ -216,6 +218,7 @@ public: Value(UInt value); Value(double value); Value(char const* value); + Value(ripple::Number const& value); /** \brief Constructs a value from a static string. * Like other value string constructor but do not duplicate the string for @@ -365,6 +368,8 @@ public: */ Value& operator[](StaticString const& key); + Value const& + operator[](StaticString const& key) const; /// Return the member named key if it exist, defaultValue otherwise. Value @@ -436,6 +441,12 @@ private: int allocated_ : 1; // Notes: if declared as bool, bitfield is useless. }; +inline Value +to_json(ripple::Number const& number) +{ + return to_string(number); +} + bool operator==(Value const&, Value const&); diff --git a/include/xrpl/protocol/AMMCore.h b/include/xrpl/protocol/AMMCore.h index 32988af5fc..442f24d878 100644 --- a/include/xrpl/protocol/AMMCore.h +++ b/include/xrpl/protocol/AMMCore.h @@ -48,14 +48,6 @@ class STObject; class STAmount; class Rules; -/** Calculate AMM account ID. - */ -AccountID -ammAccountID( - std::uint16_t prefix, - uint256 const& parentHash, - uint256 const& ammID); - /** Calculate Liquidity Provider Token (LPT) Currency. */ Currency diff --git a/include/xrpl/protocol/Asset.h b/include/xrpl/protocol/Asset.h index 0d12cd4058..4438106738 100644 --- a/include/xrpl/protocol/Asset.h +++ b/include/xrpl/protocol/Asset.h @@ -20,6 +20,7 @@ #ifndef RIPPLE_PROTOCOL_ASSET_H_INCLUDED #define RIPPLE_PROTOCOL_ASSET_H_INCLUDED +#include #include #include #include @@ -27,6 +28,7 @@ namespace ripple { class Asset; +class STAmount; template concept ValidIssueType = @@ -92,6 +94,9 @@ public: void setJson(Json::Value& jv) const; + STAmount + operator()(Number const&) const; + bool native() const { @@ -114,6 +119,14 @@ public: equalTokens(Asset const& lhs, Asset const& rhs); }; +inline Json::Value +to_json(Asset const& asset) +{ + Json::Value jv; + asset.setJson(jv); + return jv; +} + template constexpr bool Asset::holds() const @@ -219,9 +232,6 @@ validJSONAsset(Json::Value const& jv); Asset assetFromJson(Json::Value const& jv); -Json::Value -to_json(Asset const& asset); - } // namespace ripple #endif // RIPPLE_PROTOCOL_ASSET_H_INCLUDED diff --git a/include/xrpl/protocol/IOUAmount.h b/include/xrpl/protocol/IOUAmount.h index 6895ed08ae..a27069e37b 100644 --- a/include/xrpl/protocol/IOUAmount.h +++ b/include/xrpl/protocol/IOUAmount.h @@ -28,7 +28,6 @@ #include #include -#include namespace ripple { diff --git a/include/xrpl/protocol/Indexes.h b/include/xrpl/protocol/Indexes.h index 979a994c10..57c8727ae6 100644 --- a/include/xrpl/protocol/Indexes.h +++ b/include/xrpl/protocol/Indexes.h @@ -334,6 +334,15 @@ mptoken(uint256 const& mptokenKey) Keylet mptoken(uint256 const& issuanceKey, AccountID const& holder) noexcept; +Keylet +vault(AccountID const& owner, std::uint32_t seq) noexcept; + +inline Keylet +vault(uint256 const& vaultKey) +{ + return {ltVAULT, vaultKey}; +} + Keylet permissionedDomain(AccountID const& account, std::uint32_t seq) noexcept; diff --git a/include/xrpl/protocol/LedgerFormats.h b/include/xrpl/protocol/LedgerFormats.h index e2ac5bd071..3edd656213 100644 --- a/include/xrpl/protocol/LedgerFormats.h +++ b/include/xrpl/protocol/LedgerFormats.h @@ -191,6 +191,9 @@ enum LedgerSpecificFlags { // ltCREDENTIAL lsfAccepted = 0x00010000, + + // ltVAULT + lsfVaultPrivate = 0x00010000, }; //------------------------------------------------------------------------------ diff --git a/include/xrpl/protocol/MPTAmount.h b/include/xrpl/protocol/MPTAmount.h index 244d683915..419450eeb9 100644 --- a/include/xrpl/protocol/MPTAmount.h +++ b/include/xrpl/protocol/MPTAmount.h @@ -24,15 +24,12 @@ #include #include #include -#include #include #include #include -#include #include -#include namespace ripple { diff --git a/include/xrpl/protocol/MPTIssue.h b/include/xrpl/protocol/MPTIssue.h index 028051ab1a..d1c757337e 100644 --- a/include/xrpl/protocol/MPTIssue.h +++ b/include/xrpl/protocol/MPTIssue.h @@ -42,8 +42,11 @@ public: AccountID const& getIssuer() const; - MPTID const& - getMptID() const; + constexpr MPTID const& + getMptID() const + { + return mptID_; + } std::string getText() const; diff --git a/include/xrpl/protocol/Protocol.h b/include/xrpl/protocol/Protocol.h index 041b53d6cb..49bad8a076 100644 --- a/include/xrpl/protocol/Protocol.h +++ b/include/xrpl/protocol/Protocol.h @@ -116,6 +116,16 @@ std::size_t constexpr maxMPTokenMetadataLength = 1024; /** The maximum amount of MPTokenIssuance */ std::uint64_t constexpr maxMPTokenAmount = 0x7FFF'FFFF'FFFF'FFFFull; +/** The maximum length of Data payload */ +std::size_t constexpr maxDataPayloadLength = 256; + +/** Vault withdrawal policies */ +std::uint8_t constexpr vaultStrategyFirstComeFirstServe = 1; + +/** Maximum recursion depth for vault shares being put as an asset inside + * another vault; counted from 0 */ +std::uint8_t constexpr maxAssetCheckDepth = 5; + /** A ledger index. */ using LedgerIndex = std::uint32_t; diff --git a/include/xrpl/protocol/SField.h b/include/xrpl/protocol/SField.h index 04d4dc82fc..777cfa02ba 100644 --- a/include/xrpl/protocol/SField.h +++ b/include/xrpl/protocol/SField.h @@ -25,7 +25,6 @@ #include #include -#include namespace ripple { diff --git a/include/xrpl/protocol/STAmount.h b/include/xrpl/protocol/STAmount.h index 518edacf0b..c66d273254 100644 --- a/include/xrpl/protocol/STAmount.h +++ b/include/xrpl/protocol/STAmount.h @@ -153,6 +153,12 @@ public: template STAmount(A const& asset, int mantissa, int exponent = 0); + template + STAmount(A const& asset, Number const& number) + : STAmount(asset, number.mantissa(), number.exponent()) + { + } + // Legacy support for new-style amounts STAmount(IOUAmount const& amount, Issue const& issue); STAmount(XRPAmount const& amount); @@ -230,6 +236,9 @@ public: STAmount& operator=(XRPAmount const& amount); + STAmount& + operator=(Number const&); + //-------------------------------------------------------------------------- // // Modification @@ -268,7 +277,7 @@ public: std::string getText() const override; - Json::Value getJson(JsonOptions) const override; + Json::Value getJson(JsonOptions = JsonOptions::none) const override; void add(Serializer& s) const override; @@ -417,7 +426,7 @@ STAmount amountFromQuality(std::uint64_t rate); STAmount -amountFromString(Asset const& issue, std::string const& amount); +amountFromString(Asset const& asset, std::string const& amount); STAmount amountFromJson(SField const& name, Json::Value const& v); @@ -541,6 +550,16 @@ STAmount::operator=(XRPAmount const& amount) return *this; } +inline STAmount& +STAmount::operator=(Number const& number) +{ + mIsNegative = number.mantissa() < 0; + mValue = mIsNegative ? -number.mantissa() : number.mantissa(); + mOffset = number.exponent(); + canonicalize(); + return *this; +} + inline void STAmount::negate() { diff --git a/include/xrpl/protocol/STBase.h b/include/xrpl/protocol/STBase.h index 8d0aaabe48..eec9a97987 100644 --- a/include/xrpl/protocol/STBase.h +++ b/include/xrpl/protocol/STBase.h @@ -92,6 +92,16 @@ struct JsonOptions } }; +template + requires requires(T const& t) { + { t.getJson(JsonOptions::none) } -> std::convertible_to; + } +Json::Value +to_json(T const& t) +{ + return t.getJson(JsonOptions::none); +} + namespace detail { class STVar; } @@ -157,7 +167,7 @@ public: virtual std::string getText() const; - virtual Json::Value getJson(JsonOptions /*options*/) const; + virtual Json::Value getJson(JsonOptions = JsonOptions::none) const; virtual void add(Serializer& s) const; diff --git a/include/xrpl/protocol/STIssue.h b/include/xrpl/protocol/STIssue.h index c729854e1b..9fe61f32cd 100644 --- a/include/xrpl/protocol/STIssue.h +++ b/include/xrpl/protocol/STIssue.h @@ -37,6 +37,7 @@ public: using value_type = Asset; STIssue() = default; + STIssue(STIssue const& rhs) = default; explicit STIssue(SerialIter& sit, SField const& name); @@ -45,6 +46,9 @@ public: explicit STIssue(SField const& name); + STIssue& + operator=(STIssue const& rhs) = default; + template TIss const& get() const; diff --git a/include/xrpl/protocol/STNumber.h b/include/xrpl/protocol/STNumber.h index c0fce572c8..3c1f73e4e6 100644 --- a/include/xrpl/protocol/STNumber.h +++ b/include/xrpl/protocol/STNumber.h @@ -63,6 +63,13 @@ public: void setValue(Number const& v); + STNumber& + operator=(Number const& rhs) + { + setValue(rhs); + return *this; + } + bool isEquivalent(STBase const& t) const override; bool @@ -83,6 +90,19 @@ private: std::ostream& operator<<(std::ostream& out, STNumber const& rhs); +struct NumberParts +{ + std::uint64_t mantissa = 0; + int exponent = 0; + bool negative = false; +}; + +NumberParts +partsFromString(std::string const& number); + +STNumber +numberFromJson(SField const& field, Json::Value const& value); + } // namespace ripple #endif diff --git a/include/xrpl/protocol/STObject.h b/include/xrpl/protocol/STObject.h index 2efa828267..6cd083ef85 100644 --- a/include/xrpl/protocol/STObject.h +++ b/include/xrpl/protocol/STObject.h @@ -154,8 +154,7 @@ public: getText() const override; // TODO(tom): options should be an enum. - Json::Value - getJson(JsonOptions options) const override; + Json::Value getJson(JsonOptions = JsonOptions::none) const override; void addWithoutSigningFields(Serializer& s) const; @@ -484,9 +483,19 @@ private: template class STObject::Proxy { -protected: +public: using value_type = typename T::value_type; + value_type + value() const; + + value_type + operator*() const; + + T const* + operator->() const; + +protected: STObject* st_; SOEStyle style_; TypedField const* f_; @@ -495,9 +504,6 @@ protected: Proxy(STObject* st, TypedField const* f); - value_type - value() const; - T const* find() const; @@ -512,7 +518,7 @@ template concept IsArithmetic = std::is_arithmetic_v || std::is_same_v; template -class STObject::ValueProxy : private Proxy +class STObject::ValueProxy : public Proxy { private: using value_type = typename T::value_type; @@ -538,6 +544,13 @@ public: operator value_type() const; + template + friend bool + operator==(U const& lhs, STObject::ValueProxy const& rhs) + { + return rhs.value() == lhs; + } + private: friend class STObject; @@ -545,7 +558,7 @@ private: }; template -class STObject::OptionalProxy : private Proxy +class STObject::OptionalProxy : public Proxy { private: using value_type = typename T::value_type; @@ -565,15 +578,6 @@ public: explicit operator bool() const noexcept; - /** Return the contained value - - Throws: - - STObject::FieldErr if !engaged() - */ - value_type - operator*() const; - operator optional_type() const; /** Explicit conversion to std::optional */ @@ -717,6 +721,20 @@ STObject::Proxy::value() const -> value_type return value_type{}; } +template +auto +STObject::Proxy::operator*() const -> value_type +{ + return this->value(); +} + +template +T const* +STObject::Proxy::operator->() const +{ + return this->find(); +} + template inline T const* STObject::Proxy::find() const @@ -792,13 +810,6 @@ STObject::OptionalProxy::operator bool() const noexcept return engaged(); } -template -auto -STObject::OptionalProxy::operator*() const -> value_type -{ - return this->value(); -} - template STObject::OptionalProxy::operator typename STObject::OptionalProxy< T>::optional_type() const diff --git a/include/xrpl/protocol/STTx.h b/include/xrpl/protocol/STTx.h index 8de2c8cc31..b00495bf76 100644 --- a/include/xrpl/protocol/STTx.h +++ b/include/xrpl/protocol/STTx.h @@ -102,6 +102,10 @@ public: SeqProxy getSeqProxy() const; + /** Returns the first non-zero value of (Sequence, TicketSequence). */ + std::uint32_t + getSeqValue() const; + boost::container::flat_set getMentionedAccounts() const; diff --git a/include/xrpl/protocol/TER.h b/include/xrpl/protocol/TER.h index da3788cd6a..b87bc3f8a4 100644 --- a/include/xrpl/protocol/TER.h +++ b/include/xrpl/protocol/TER.h @@ -225,6 +225,8 @@ enum TERcodes : TERUnderlyingType { terQUEUED, // Transaction is being held in TxQ until fee drops terPRE_TICKET, // Ticket is not yet in ledger but might be on its way terNO_AMM, // AMM doesn't exist for the asset pair + terADDRESS_COLLISION, // Failed to allocate AccountID when trying to + // create a pseudo-account }; //------------------------------------------------------------------------------ @@ -265,6 +267,17 @@ enum TECcodes : TERUnderlyingType { // Otherwise, treated as terRETRY. // // DO NOT CHANGE THESE NUMBERS: They appear in ledger meta data. + // + // Note: + // tecNO_ENTRY is often used interchangeably with tecOBJECT_NOT_FOUND. + // While there does not seem to be a clear rule which to use when, the + // following guidance will help to keep errors consistent with the + // majority of (but not all) transaction types: + // - tecNO_ENTRY : cannot find the primary ledger object on which the + // transaction is being attempted + // - tecOBJECT_NOT_FOUND : cannot find the additional object(s) needed to + // complete the transaction + tecCLAIM = 100, tecPATH_PARTIAL = 101, tecUNFUNDED_ADD = 102, // Unused legacy code @@ -344,6 +357,9 @@ enum TECcodes : TERUnderlyingType { tecARRAY_TOO_LARGE = 191, tecLOCKED = 192, tecBAD_CREDENTIALS = 193, + tecWRONG_ASSET = 194, + tecLIMIT_EXCEEDED = 195, + tecPSEUDO_ACCOUNT = 196, }; //------------------------------------------------------------------------------ diff --git a/include/xrpl/protocol/TxFlags.h b/include/xrpl/protocol/TxFlags.h index 7a600676f8..505000cfd6 100644 --- a/include/xrpl/protocol/TxFlags.h +++ b/include/xrpl/protocol/TxFlags.h @@ -233,6 +233,12 @@ constexpr std::uint32_t tfAMMClawbackMask = ~(tfUniversal | tfClawTwoAssets); // BridgeModify flags: constexpr std::uint32_t tfClearAccountCreateAmount = 0x00010000; constexpr std::uint32_t tfBridgeModifyMask = ~(tfUniversal | tfClearAccountCreateAmount); + +// VaultCreate flags: +constexpr std::uint32_t const tfVaultPrivate = 0x00010000; +static_assert(tfVaultPrivate == lsfVaultPrivate); +constexpr std::uint32_t const tfVaultShareNonTransferable = 0x00020000; +constexpr std::uint32_t const tfVaultCreateMask = ~(tfUniversal | tfVaultPrivate | tfVaultShareNonTransferable); // clang-format on } // namespace ripple diff --git a/include/xrpl/protocol/detail/features.macro b/include/xrpl/protocol/detail/features.macro index 31b5c25d91..3be0fd426c 100644 --- a/include/xrpl/protocol/detail/features.macro +++ b/include/xrpl/protocol/detail/features.macro @@ -32,6 +32,7 @@ // If you add an amendment here, then do not forget to increment `numFeatures` // in include/xrpl/protocol/Feature.h. +XRPL_FEATURE(SingleAssetVault, Supported::no, VoteBehavior::DefaultNo) XRPL_FEATURE(PermissionDelegation, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (PayChanCancelAfter, Supported::yes, VoteBehavior::DefaultNo) // Check flags in Credential transactions diff --git a/include/xrpl/protocol/detail/ledger_entries.macro b/include/xrpl/protocol/detail/ledger_entries.macro index 66573eaf4a..a902b32026 100644 --- a/include/xrpl/protocol/detail/ledger_entries.macro +++ b/include/xrpl/protocol/detail/ledger_entries.macro @@ -165,7 +165,8 @@ LEDGER_ENTRY(ltACCOUNT_ROOT, 0x0061, AccountRoot, account, ({ {sfMintedNFTokens, soeDEFAULT}, {sfBurnedNFTokens, soeDEFAULT}, {sfFirstNFTokenSequence, soeOPTIONAL}, - {sfAMMID, soeOPTIONAL}, + {sfAMMID, soeOPTIONAL}, // pseudo-account designator + {sfVaultID, soeOPTIONAL}, // pseudo-account designator })) /** A ledger object which contains a list of object identifiers. @@ -390,21 +391,6 @@ LEDGER_ENTRY(ltAMM, 0x0079, AMM, amm, ({ {sfPreviousTxnLgrSeq, soeOPTIONAL}, })) -/** A ledger object which tracks Oracle - \sa keylet::oracle - */ -LEDGER_ENTRY(ltORACLE, 0x0080, Oracle, oracle, ({ - {sfOwner, soeREQUIRED}, - {sfProvider, soeREQUIRED}, - {sfPriceDataSeries, soeREQUIRED}, - {sfAssetClass, soeREQUIRED}, - {sfLastUpdateTime, soeREQUIRED}, - {sfURI, soeOPTIONAL}, - {sfOwnerNode, soeREQUIRED}, - {sfPreviousTxnID, soeREQUIRED}, - {sfPreviousTxnLgrSeq, soeREQUIRED}, -})) - /** A ledger object which tracks MPTokenIssuance \sa keylet::mptIssuance */ @@ -419,6 +405,7 @@ LEDGER_ENTRY(ltMPTOKEN_ISSUANCE, 0x007e, MPTokenIssuance, mpt_issuance, ({ {sfMPTokenMetadata, soeOPTIONAL}, {sfPreviousTxnID, soeREQUIRED}, {sfPreviousTxnLgrSeq, soeREQUIRED}, + {sfDomainID, soeOPTIONAL}, })) /** A ledger object which tracks MPToken @@ -433,6 +420,21 @@ LEDGER_ENTRY(ltMPTOKEN, 0x007f, MPToken, mptoken, ({ {sfPreviousTxnLgrSeq, soeREQUIRED}, })) +/** A ledger object which tracks Oracle + \sa keylet::oracle + */ +LEDGER_ENTRY(ltORACLE, 0x0080, Oracle, oracle, ({ + {sfOwner, soeREQUIRED}, + {sfProvider, soeREQUIRED}, + {sfPriceDataSeries, soeREQUIRED}, + {sfAssetClass, soeREQUIRED}, + {sfLastUpdateTime, soeREQUIRED}, + {sfURI, soeOPTIONAL}, + {sfOwnerNode, soeREQUIRED}, + {sfPreviousTxnID, soeREQUIRED}, + {sfPreviousTxnLgrSeq, soeREQUIRED}, +})) + /** A ledger object which tracks Credential \sa keylet::credential */ @@ -472,6 +474,29 @@ LEDGER_ENTRY(ltDELEGATE, 0x0083, Delegate, delegate, ({ {sfPreviousTxnLgrSeq, soeREQUIRED}, })) +/** A ledger object representing a single asset vault. + + \sa keylet::mptoken + */ +LEDGER_ENTRY(ltVAULT, 0x0084, Vault, vault, ({ + {sfPreviousTxnID, soeREQUIRED}, + {sfPreviousTxnLgrSeq, soeREQUIRED}, + {sfSequence, soeREQUIRED}, + {sfOwnerNode, soeREQUIRED}, + {sfOwner, soeREQUIRED}, + {sfAccount, soeREQUIRED}, + {sfData, soeOPTIONAL}, + {sfAsset, soeREQUIRED}, + {sfAssetsTotal, soeREQUIRED}, + {sfAssetsAvailable, soeREQUIRED}, + {sfAssetsMaximum, soeDEFAULT}, + {sfLossUnrealized, soeREQUIRED}, + {sfShareMPTID, soeREQUIRED}, + {sfWithdrawalPolicy, soeREQUIRED}, + // no SharesTotal ever (use MPTIssuance.sfOutstandingAmount) + // no PermissionedDomainID ever (use MPTIssuance.sfDomainID) +})) + #undef EXPAND #undef LEDGER_ENTRY_DUPLICATE diff --git a/include/xrpl/protocol/detail/sfields.macro b/include/xrpl/protocol/detail/sfields.macro index e98709c8c3..63bc52de6a 100644 --- a/include/xrpl/protocol/detail/sfields.macro +++ b/include/xrpl/protocol/detail/sfields.macro @@ -42,6 +42,7 @@ TYPED_SFIELD(sfTickSize, UINT8, 16) TYPED_SFIELD(sfUNLModifyDisabling, UINT8, 17) TYPED_SFIELD(sfHookResult, UINT8, 18) TYPED_SFIELD(sfWasLockingChainSend, UINT8, 19) +TYPED_SFIELD(sfWithdrawalPolicy, UINT8, 20) // 16-bit integers (common) TYPED_SFIELD(sfLedgerEntryType, UINT16, 1, SField::sMD_Never) @@ -155,6 +156,7 @@ TYPED_SFIELD(sfTakerGetsIssuer, UINT160, 4) // 192-bit (common) TYPED_SFIELD(sfMPTokenIssuanceID, UINT192, 1) +TYPED_SFIELD(sfShareMPTID, UINT192, 2) // 256-bit (common) TYPED_SFIELD(sfLedgerHash, UINT256, 1) @@ -192,9 +194,14 @@ TYPED_SFIELD(sfHookHash, UINT256, 31) TYPED_SFIELD(sfHookNamespace, UINT256, 32) TYPED_SFIELD(sfHookSetTxnID, UINT256, 33) TYPED_SFIELD(sfDomainID, UINT256, 34) +TYPED_SFIELD(sfVaultID, UINT256, 35) // number (common) TYPED_SFIELD(sfNumber, NUMBER, 1) +TYPED_SFIELD(sfAssetsAvailable, NUMBER, 2) +TYPED_SFIELD(sfAssetsMaximum, NUMBER, 3) +TYPED_SFIELD(sfAssetsTotal, NUMBER, 4) +TYPED_SFIELD(sfLossUnrealized, NUMBER, 5) // currency amount (common) TYPED_SFIELD(sfAmount, AMOUNT, 1) diff --git a/include/xrpl/protocol/detail/transactions.macro b/include/xrpl/protocol/detail/transactions.macro index 54f97f942f..0f614df692 100644 --- a/include/xrpl/protocol/detail/transactions.macro +++ b/include/xrpl/protocol/detail/transactions.macro @@ -471,6 +471,49 @@ TRANSACTION(ttDELEGATE_SET, 64, DelegateSet, Delegation::notDelegatable, ({ {sfPermissions, soeREQUIRED}, })) +/** This transaction creates a single asset vault. */ +TRANSACTION(ttVAULT_CREATE, 65, VaultCreate, Delegation::delegatable, ({ + {sfAsset, soeREQUIRED, soeMPTSupported}, + {sfAssetsMaximum, soeOPTIONAL}, + {sfMPTokenMetadata, soeOPTIONAL}, + {sfDomainID, soeOPTIONAL}, // PermissionedDomainID + {sfWithdrawalPolicy, soeOPTIONAL}, + {sfData, soeOPTIONAL}, +})) + +/** This transaction updates a single asset vault. */ +TRANSACTION(ttVAULT_SET, 66, VaultSet, Delegation::delegatable, ({ + {sfVaultID, soeREQUIRED}, + {sfAssetsMaximum, soeOPTIONAL}, + {sfDomainID, soeOPTIONAL}, // PermissionedDomainID + {sfData, soeOPTIONAL}, +})) + +/** This transaction deletes a single asset vault. */ +TRANSACTION(ttVAULT_DELETE, 67, VaultDelete, Delegation::delegatable, ({ + {sfVaultID, soeREQUIRED}, +})) + +/** This transaction trades assets for shares with a vault. */ +TRANSACTION(ttVAULT_DEPOSIT, 68, VaultDeposit, Delegation::delegatable, ({ + {sfVaultID, soeREQUIRED}, + {sfAmount, soeREQUIRED, soeMPTSupported}, +})) + +/** This transaction trades shares for assets with a vault. */ +TRANSACTION(ttVAULT_WITHDRAW, 69, VaultWithdraw, Delegation::delegatable, ({ + {sfVaultID, soeREQUIRED}, + {sfAmount, soeREQUIRED, soeMPTSupported}, + {sfDestination, soeOPTIONAL}, +})) + +/** This transaction claws back tokens from a vault. */ +TRANSACTION(ttVAULT_CLAWBACK, 70, VaultClawback, Delegation::delegatable, ({ + {sfVaultID, soeREQUIRED}, + {sfHolder, soeREQUIRED}, + {sfAmount, soeOPTIONAL, soeMPTSupported}, +})) + /** This system-generated transaction type is used to update the status of the various amendments. For details, see: https://xrpl.org/amendments.html diff --git a/include/xrpl/protocol/jss.h b/include/xrpl/protocol/jss.h index bb2ffa7bb0..de3560d7f9 100644 --- a/include/xrpl/protocol/jss.h +++ b/include/xrpl/protocol/jss.h @@ -95,10 +95,10 @@ JSS(SigningPubKey); // field. JSS(Subject); // in: Credential transactions JSS(TakerGets); // field. JSS(TakerPays); // field. -JSS(TxnSignature); // field. JSS(TradingFee); // in/out: AMM trading fee JSS(TransactionType); // in: TransactionSign. JSS(TransferRate); // in: TransferRate. +JSS(TxnSignature); // field. JSS(URI); // field. JSS(VoteSlots); // out: AMM Vote JSS(aborted); // out: InboundLedger @@ -449,6 +449,7 @@ JSS(node_reads_hit); // out: GetCounts JSS(node_reads_total); // out: GetCounts JSS(node_reads_duration_us); // out: GetCounts JSS(node_size); // out: server_info +JSS(nodes); // out: VaultInfo JSS(nodestore); // out: GetCounts JSS(node_writes); // out: GetCounts JSS(node_written_bytes); // out: GetCounts @@ -559,6 +560,7 @@ JSS(server_status); // out: NetworkOPs JSS(server_version); // out: NetworkOPs JSS(settle_delay); // out: AccountChannels JSS(severity); // in: LogLevel +JSS(shares); // out: VaultInfo JSS(signature); // out: NetworkOPs, ChannelAuthorize JSS(signature_verified); // out: ChannelVerify JSS(signing_key); // out: NetworkOPs @@ -684,6 +686,7 @@ JSS(validations); // out: AmendmentTableImpl JSS(validator_list_threshold); // out: ValidatorList JSS(validator_sites); // out: ValidatorSites JSS(value); // out: STAmount +JSS(vault_id); // in: VaultInfo JSS(version); // out: RPCVersion JSS(vetoed); // out: AmendmentTableImpl JSS(volume_a); // out: BookChanges diff --git a/src/libxrpl/json/json_value.cpp b/src/libxrpl/json/json_value.cpp index 86a8ed5aee..a1e0a04875 100644 --- a/src/libxrpl/json/json_value.cpp +++ b/src/libxrpl/json/json_value.cpp @@ -237,6 +237,13 @@ Value::Value(char const* value) : type_(stringValue), allocated_(true) value_.string_ = valueAllocator()->duplicateStringValue(value); } +Value::Value(ripple::Number const& value) : type_(stringValue), allocated_(true) +{ + auto const tmp = to_string(value); + value_.string_ = + valueAllocator()->duplicateStringValue(tmp.c_str(), tmp.length()); +} + Value::Value(std::string const& value) : type_(stringValue), allocated_(true) { value_.string_ = valueAllocator()->duplicateStringValue( @@ -893,6 +900,12 @@ Value::operator[](StaticString const& key) return resolveReference(key, true); } +Value const& +Value::operator[](StaticString const& key) const +{ + return (*this)[key.c_str()]; +} + Value& Value::append(Value const& value) { diff --git a/src/libxrpl/protocol/AMMCore.cpp b/src/libxrpl/protocol/AMMCore.cpp index aa48827195..60660357ea 100644 --- a/src/libxrpl/protocol/AMMCore.cpp +++ b/src/libxrpl/protocol/AMMCore.cpp @@ -39,18 +39,6 @@ namespace ripple { -AccountID -ammAccountID( - std::uint16_t prefix, - uint256 const& parentHash, - uint256 const& ammID) -{ - ripesha_hasher rsh; - auto const hash = sha512Half(prefix, parentHash, ammID); - rsh(hash.data(), hash.size()); - return AccountID{static_cast(rsh)}; -} - Currency ammLPTCurrency(Currency const& cur1, Currency const& cur2) { diff --git a/src/libxrpl/protocol/Asset.cpp b/src/libxrpl/protocol/Asset.cpp index d4a2fccb4a..104d627d81 100644 --- a/src/libxrpl/protocol/Asset.cpp +++ b/src/libxrpl/protocol/Asset.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -51,6 +52,12 @@ Asset::setJson(Json::Value& jv) const std::visit([&](auto&& issue) { issue.setJson(jv); }, issue_); } +STAmount +Asset::operator()(Number const& number) const +{ + return STAmount{*this, number}; +} + std::string to_string(Asset const& asset) { @@ -78,11 +85,4 @@ assetFromJson(Json::Value const& v) return mptIssueFromJson(v); } -Json::Value -to_json(Asset const& asset) -{ - return std::visit( - [&](auto const& issue) { return to_json(issue); }, asset.value()); -} - } // namespace ripple diff --git a/src/libxrpl/protocol/Indexes.cpp b/src/libxrpl/protocol/Indexes.cpp index 8256c7a77c..2426092d13 100644 --- a/src/libxrpl/protocol/Indexes.cpp +++ b/src/libxrpl/protocol/Indexes.cpp @@ -95,6 +95,7 @@ enum class LedgerNameSpace : std::uint16_t { CREDENTIAL = 'D', PERMISSIONED_DOMAIN = 'm', DELEGATE = 'E', + VAULT = 'V', // No longer used or supported. Left here to reserve the space // to avoid accidental reuse. @@ -552,6 +553,12 @@ credential( indexHash(LedgerNameSpace::CREDENTIAL, subject, issuer, credType)}; } +Keylet +vault(AccountID const& owner, std::uint32_t seq) noexcept +{ + return vault(indexHash(LedgerNameSpace::VAULT, owner, seq)); +} + Keylet permissionedDomain(AccountID const& account, std::uint32_t seq) noexcept { diff --git a/src/libxrpl/protocol/Keylet.cpp b/src/libxrpl/protocol/Keylet.cpp index 8deb2d735a..a83186547c 100644 --- a/src/libxrpl/protocol/Keylet.cpp +++ b/src/libxrpl/protocol/Keylet.cpp @@ -37,7 +37,7 @@ Keylet::check(STLedgerEntry const& sle) const if (type == ltCHILD) return sle.getType() != ltDIR_NODE; - return sle.getType() == type; + return sle.getType() == type && sle.key() == key; } } // namespace ripple diff --git a/src/libxrpl/protocol/MPTIssue.cpp b/src/libxrpl/protocol/MPTIssue.cpp index c8decb2b3f..9238b4302d 100644 --- a/src/libxrpl/protocol/MPTIssue.cpp +++ b/src/libxrpl/protocol/MPTIssue.cpp @@ -48,12 +48,6 @@ MPTIssue::getIssuer() const return *account; } -MPTID const& -MPTIssue::getMptID() const -{ - return mptID_; -} - std::string MPTIssue::getText() const { diff --git a/src/libxrpl/protocol/STAmount.cpp b/src/libxrpl/protocol/STAmount.cpp index f02042bc2c..02de5d4c58 100644 --- a/src/libxrpl/protocol/STAmount.cpp +++ b/src/libxrpl/protocol/STAmount.cpp @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -309,6 +310,7 @@ STAmount::xrp() const "Cannot return non-native STAmount as XRPAmount"); auto drops = static_cast(mValue); + XRPL_ASSERT(mOffset == 0, "ripple::STAmount::xrp : amount is canonical"); if (mIsNegative) drops = -drops; @@ -338,6 +340,7 @@ STAmount::mpt() const Throw("Cannot return STAmount as MPTAmount"); auto value = static_cast(mValue); + XRPL_ASSERT(mOffset == 0, "ripple::STAmount::mpt : amount is canonical"); if (mIsNegative) value = -value; @@ -865,75 +868,16 @@ amountFromQuality(std::uint64_t rate) STAmount amountFromString(Asset const& asset, std::string const& amount) { - static boost::regex const reNumber( - "^" // the beginning of the string - "([-+]?)" // (optional) + or - character - "(0|[1-9][0-9]*)" // a number (no leading zeroes, unless 0) - "(\\.([0-9]+))?" // (optional) period followed by any number - "([eE]([+-]?)([0-9]+))?" // (optional) E, optional + or -, any number - "$", - boost::regex_constants::optimize); - - boost::smatch match; - - if (!boost::regex_match(amount, match, reNumber)) - Throw("Number '" + amount + "' is not valid"); - - // Match fields: - // 0 = whole input - // 1 = sign - // 2 = integer portion - // 3 = whole fraction (with '.') - // 4 = fraction (without '.') - // 5 = whole exponent (with 'e') - // 6 = exponent sign - // 7 = exponent number - - // CHECKME: Why 32? Shouldn't this be 16? - if ((match[2].length() + match[4].length()) > 32) - Throw("Number '" + amount + "' is overlong"); - - bool negative = (match[1].matched && (match[1] == "-")); - - // Can't specify XRP or MPT using fractional representation - if ((asset.native() || asset.holds()) && match[3].matched) + auto const parts = partsFromString(amount); + if ((asset.native() || asset.holds()) && parts.exponent < 0) Throw( "XRP and MPT must be specified as integral amount."); - - std::uint64_t mantissa; - int exponent; - - if (!match[4].matched) // integer only - { - mantissa = - beast::lexicalCastThrow(std::string(match[2])); - exponent = 0; - } - else - { - // integer and fraction - mantissa = beast::lexicalCastThrow(match[2] + match[4]); - exponent = -(match[4].length()); - } - - if (match[5].matched) - { - // we have an exponent - if (match[6].matched && (match[6] == "-")) - exponent -= beast::lexicalCastThrow(std::string(match[7])); - else - exponent += beast::lexicalCastThrow(std::string(match[7])); - } - - return {asset, mantissa, exponent, negative}; + return {asset, parts.mantissa, parts.exponent, parts.negative}; } STAmount amountFromJson(SField const& name, Json::Value const& v) { - STAmount::mantissa_type mantissa = 0; - STAmount::exponent_type exponent = 0; - bool negative = false; Asset asset; Json::Value value; @@ -1025,36 +969,38 @@ amountFromJson(SField const& name, Json::Value const& v) } } + NumberParts parts; + if (value.isInt()) { if (value.asInt() >= 0) { - mantissa = value.asInt(); + parts.mantissa = value.asInt(); } else { - mantissa = -value.asInt(); - negative = true; + parts.mantissa = -value.asInt(); + parts.negative = true; } } else if (value.isUInt()) { - mantissa = v.asUInt(); + parts.mantissa = v.asUInt(); } else if (value.isString()) { - auto const ret = amountFromString(asset, value.asString()); - - mantissa = ret.mantissa(); - exponent = ret.exponent(); - negative = ret.negative(); + parts = partsFromString(value.asString()); + // Can't specify XRP or MPT using fractional representation + if ((asset.native() || asset.holds()) && parts.exponent < 0) + Throw( + "XRP and MPT must be specified as integral amount."); } else { Throw("invalid amount type"); } - return {name, asset, mantissa, exponent, negative}; + return {name, asset, parts.mantissa, parts.exponent, parts.negative}; } bool diff --git a/src/libxrpl/protocol/STNumber.cpp b/src/libxrpl/protocol/STNumber.cpp index c0cdcccd6e..975fd5723b 100644 --- a/src/libxrpl/protocol/STNumber.cpp +++ b/src/libxrpl/protocol/STNumber.cpp @@ -18,12 +18,16 @@ //============================================================================== #include +#include #include #include #include #include #include +#include +#include + #include #include #include @@ -115,4 +119,100 @@ operator<<(std::ostream& out, STNumber const& rhs) return out << rhs.getText(); } +NumberParts +partsFromString(std::string const& number) +{ + static boost::regex const reNumber( + "^" // the beginning of the string + "([-+]?)" // (optional) + or - character + "(0|[1-9][0-9]*)" // a number (no leading zeroes, unless 0) + "(\\.([0-9]+))?" // (optional) period followed by any number + "([eE]([+-]?)([0-9]+))?" // (optional) E, optional + or -, any number + "$", + boost::regex_constants::optimize); + + boost::smatch match; + + if (!boost::regex_match(number, match, reNumber)) + Throw("'" + number + "' is not a number"); + + // Match fields: + // 0 = whole input + // 1 = sign + // 2 = integer portion + // 3 = whole fraction (with '.') + // 4 = fraction (without '.') + // 5 = whole exponent (with 'e') + // 6 = exponent sign + // 7 = exponent number + + bool negative = (match[1].matched && (match[1] == "-")); + + std::uint64_t mantissa; + int exponent; + + if (!match[4].matched) // integer only + { + mantissa = boost::lexical_cast(std::string(match[2])); + exponent = 0; + } + else + { + // integer and fraction + mantissa = boost::lexical_cast(match[2] + match[4]); + exponent = -(match[4].length()); + } + + if (match[5].matched) + { + // we have an exponent + if (match[6].matched && (match[6] == "-")) + exponent -= boost::lexical_cast(std::string(match[7])); + else + exponent += boost::lexical_cast(std::string(match[7])); + } + + return {mantissa, exponent, negative}; +} + +STNumber +numberFromJson(SField const& field, Json::Value const& value) +{ + NumberParts parts; + + if (value.isInt()) + { + if (value.asInt() >= 0) + { + parts.mantissa = value.asInt(); + } + else + { + parts.mantissa = -value.asInt(); + parts.negative = true; + } + } + else if (value.isUInt()) + { + parts.mantissa = value.asUInt(); + } + else if (value.isString()) + { + parts = partsFromString(value.asString()); + // Only strings can represent out-of-range values. + if (parts.mantissa > std::numeric_limits::max()) + Throw("too high"); + } + else + { + Throw("not a number"); + } + + std::int64_t mantissa = parts.mantissa; + if (parts.negative) + mantissa = -mantissa; + + return STNumber{field, Number{mantissa, parts.exponent}}; +} + } // namespace ripple diff --git a/src/libxrpl/protocol/STParsedJSON.cpp b/src/libxrpl/protocol/STParsedJSON.cpp index 1437ed922b..bc9aad0a13 100644 --- a/src/libxrpl/protocol/STParsedJSON.cpp +++ b/src/libxrpl/protocol/STParsedJSON.cpp @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -613,6 +614,20 @@ parseLeaf( break; + case STI_NUMBER: + try + { + ret = + detail::make_stvar(numberFromJson(field, value)); + } + catch (std::exception const&) + { + error = invalid_data(json_name, fieldName); + return ret; + } + + break; + case STI_VECTOR256: if (!value.isArrayOrNull()) { diff --git a/src/libxrpl/protocol/STTx.cpp b/src/libxrpl/protocol/STTx.cpp index a60f7325f0..7b6b4c1ee2 100644 --- a/src/libxrpl/protocol/STTx.cpp +++ b/src/libxrpl/protocol/STTx.cpp @@ -224,6 +224,12 @@ STTx::getSeqProxy() const return SeqProxy{SeqProxy::ticket, *ticketSeq}; } +std::uint32_t +STTx::getSeqValue() const +{ + return getSeqProxy().value(); +} + void STTx::sign(PublicKey const& publicKey, SecretKey const& secretKey) { diff --git a/src/libxrpl/protocol/STVar.cpp b/src/libxrpl/protocol/STVar.cpp index 3af0345c4e..24954c4add 100644 --- a/src/libxrpl/protocol/STVar.cpp +++ b/src/libxrpl/protocol/STVar.cpp @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -192,6 +193,9 @@ STVar::constructST(SerializedTypeID id, int depth, Args&&... args) case STI_AMOUNT: construct(std::forward(args)...); return; + case STI_NUMBER: + construct(std::forward(args)...); + return; case STI_UINT128: construct(std::forward(args)...); return; diff --git a/src/libxrpl/protocol/TER.cpp b/src/libxrpl/protocol/TER.cpp index 8a3a6af0de..943a0e601b 100644 --- a/src/libxrpl/protocol/TER.cpp +++ b/src/libxrpl/protocol/TER.cpp @@ -123,6 +123,9 @@ transResults() MAKE_ERROR(tecARRAY_TOO_LARGE, "Array is too large."), MAKE_ERROR(tecLOCKED, "Fund is locked."), MAKE_ERROR(tecBAD_CREDENTIALS, "Bad credentials."), + MAKE_ERROR(tecWRONG_ASSET, "Wrong asset given."), + MAKE_ERROR(tecLIMIT_EXCEEDED, "Limit exceeded."), + MAKE_ERROR(tecPSEUDO_ACCOUNT, "This operation is not allowed against a pseudo-account."), MAKE_ERROR(tefALREADY, "The exact transaction was already in this ledger."), MAKE_ERROR(tefBAD_ADD_AUTH, "Not authorized to add account."), @@ -228,6 +231,7 @@ transResults() MAKE_ERROR(terQUEUED, "Held until escalated fee drops."), MAKE_ERROR(terPRE_TICKET, "Ticket is not yet in ledger."), MAKE_ERROR(terNO_AMM, "AMM doesn't exist for the asset pair."), + MAKE_ERROR(terADDRESS_COLLISION, "Failed to allocate an unique account address."), MAKE_ERROR(tesSUCCESS, "The transaction was applied. Only final in a validated ledger."), }; diff --git a/src/test/app/AMM_test.cpp b/src/test/app/AMM_test.cpp index 87988315f4..e0b3dc1ec7 100644 --- a/src/test/app/AMM_test.cpp +++ b/src/test/app/AMM_test.cpp @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -31,6 +32,7 @@ #include #include #include +#include #include @@ -54,11 +56,27 @@ private: using namespace jtx; - // XRP to IOU - testAMM([&](AMM& ammAlice, Env&) { - BEAST_EXPECT(ammAlice.expectBalances( - XRP(10'000), USD(10'000), IOUAmount{10'000'000, 0})); - }); + // XRP to IOU, with featureSingleAssetVault + testAMM( + [&](AMM& ammAlice, Env&) { + BEAST_EXPECT(ammAlice.expectBalances( + XRP(10'000), USD(10'000), IOUAmount{10'000'000, 0})); + }, + {}, + 0, + {}, + {supported_amendments() | featureSingleAssetVault}); + + // XRP to IOU, without featureSingleAssetVault + testAMM( + [&](AMM& ammAlice, Env&) { + BEAST_EXPECT(ammAlice.expectBalances( + XRP(10'000), USD(10'000), IOUAmount{10'000'000, 0})); + }, + {}, + 0, + {}, + {supported_amendments() - featureSingleAssetVault}); // IOU to IOU testAMM( @@ -7137,6 +7155,50 @@ private: }); } + void + testFailedPseudoAccount() + { + using namespace test::jtx; + + auto const testCase = [&](std::string suffix, FeatureBitset features) { + testcase("Failed pseudo-account allocation " + suffix); + Env env{*this, features}; + env.fund(XRP(30'000), gw, alice); + env.close(); + env(trust(alice, gw["USD"](30'000), 0)); + env(pay(gw, alice, USD(10'000))); + env.close(); + + STAmount amount = XRP(10'000); + STAmount amount2 = USD(10'000); + auto const keylet = keylet::amm(amount.issue(), amount2.issue()); + for (int i = 0; i < 256; ++i) + { + AccountID const accountId = + ripple::pseudoAccountAddress(*env.current(), keylet.key); + + env(pay(env.master.id(), accountId, XRP(1000)), + seq(autofill), + fee(autofill), + sig(autofill)); + } + + AMM ammAlice( + env, + alice, + amount, + amount2, + features[featureSingleAssetVault] ? ter{terADDRESS_COLLISION} + : ter{tecDUPLICATE}); + }; + + testCase( + "tecDUPLICATE", supported_amendments() - featureSingleAssetVault); + testCase( + "terADDRESS_COLLISION", + supported_amendments() | featureSingleAssetVault); + } + void run() override { @@ -7192,6 +7254,7 @@ private: testAMMDepositWithFrozenAssets(all - fixAMMv1_1 - featureAMMClawback); testFixReserveCheckOnWithdrawal(all); testFixReserveCheckOnWithdrawal(all - fixAMMv1_2); + testFailedPseudoAccount(); } }; diff --git a/src/test/app/Credentials_test.cpp b/src/test/app/Credentials_test.cpp index 87946c13bb..fa6505e926 100644 --- a/src/test/app/Credentials_test.cpp +++ b/src/test/app/Credentials_test.cpp @@ -43,16 +43,6 @@ checkVL( return strHex(expected) == strHex(sle->getFieldVL(field)); } -static inline Keylet -credentialKeylet( - test::jtx::Account const& subject, - test::jtx::Account const& issuer, - std::string_view credType) -{ - return keylet::credential( - subject.id(), issuer.id(), Slice(credType.data(), credType.size())); -} - struct Credentials_test : public beast::unit_test::suite { void @@ -72,7 +62,7 @@ struct Credentials_test : public beast::unit_test::suite { testcase("Create for subject."); - auto const credKey = credentialKeylet(subject, issuer, credType); + auto const credKey = credentials::keylet(subject, issuer, credType); env.fund(XRP(5000), subject, issuer, other); env.close(); @@ -150,7 +140,7 @@ struct Credentials_test : public beast::unit_test::suite { testcase("Create for themself."); - auto const credKey = credentialKeylet(issuer, issuer, credType); + auto const credKey = credentials::keylet(issuer, issuer, credType); env(credentials::create(issuer, issuer, credType), credentials::uri(uri)); @@ -224,7 +214,7 @@ struct Credentials_test : public beast::unit_test::suite { testcase("Delete issuer before accept"); - auto const credKey = credentialKeylet(subject, issuer, credType); + auto const credKey = credentials::keylet(subject, issuer, credType); env(credentials::create(subject, issuer, credType)); env.close(); @@ -260,7 +250,7 @@ struct Credentials_test : public beast::unit_test::suite { testcase("Delete issuer after accept"); - auto const credKey = credentialKeylet(subject, issuer, credType); + auto const credKey = credentials::keylet(subject, issuer, credType); env(credentials::create(subject, issuer, credType)); env.close(); env(credentials::accept(subject, issuer, credType)); @@ -298,7 +288,7 @@ struct Credentials_test : public beast::unit_test::suite { testcase("Delete subject before accept"); - auto const credKey = credentialKeylet(subject, issuer, credType); + auto const credKey = credentials::keylet(subject, issuer, credType); env(credentials::create(subject, issuer, credType)); env.close(); @@ -334,7 +324,7 @@ struct Credentials_test : public beast::unit_test::suite { testcase("Delete subject after accept"); - auto const credKey = credentialKeylet(subject, issuer, credType); + auto const credKey = credentials::keylet(subject, issuer, credType); env(credentials::create(subject, issuer, credType)); env.close(); env(credentials::accept(subject, issuer, credType)); @@ -372,7 +362,7 @@ struct Credentials_test : public beast::unit_test::suite { testcase("Delete by other"); - auto const credKey = credentialKeylet(subject, issuer, credType); + auto const credKey = credentials::keylet(subject, issuer, credType); auto jv = credentials::create(subject, issuer, credType); uint32_t const t = env.current() ->info() @@ -417,7 +407,7 @@ struct Credentials_test : public beast::unit_test::suite env.close(); { auto const credKey = - credentialKeylet(subject, issuer, credType); + credentials::keylet(subject, issuer, credType); BEAST_EXPECT(!env.le(credKey)); BEAST_EXPECT(!ownerCount(env, subject)); BEAST_EXPECT(!ownerCount(env, issuer)); @@ -439,7 +429,7 @@ struct Credentials_test : public beast::unit_test::suite env.close(); { auto const credKey = - credentialKeylet(subject, issuer, credType); + credentials::keylet(subject, issuer, credType); BEAST_EXPECT(!env.le(credKey)); BEAST_EXPECT(!ownerCount(env, subject)); BEAST_EXPECT(!ownerCount(env, issuer)); diff --git a/src/test/app/MPToken_test.cpp b/src/test/app/MPToken_test.cpp index 0f29e22dd9..a6055d85f6 100644 --- a/src/test/app/MPToken_test.cpp +++ b/src/test/app/MPToken_test.cpp @@ -412,6 +412,9 @@ class MPToken_test : public beast::unit_test::suite // bob creates a mptoken mptAlice.authorize({.account = bob, .holderCount = 1}); + mptAlice.authorize( + {.account = bob, .holderCount = 1, .err = tecDUPLICATE}); + // bob deletes his mptoken mptAlice.authorize( {.account = bob, .holderCount = 0, .flags = tfMPTUnauthorize}); @@ -621,6 +624,25 @@ class MPToken_test : public beast::unit_test::suite // locks up bob's mptoken again mptAlice.set({.account = alice, .holder = bob, .flags = tfMPTLock}); + if (!features[featureSingleAssetVault]) + { + // Delete bobs' mptoken even though it is locked + mptAlice.authorize({.account = bob, .flags = tfMPTUnauthorize}); + + mptAlice.set( + {.account = alice, + .holder = bob, + .flags = tfMPTUnlock, + .err = tecOBJECT_NOT_FOUND}); + + return; + } + + // Cannot delete locked MPToken + mptAlice.authorize( + {.account = bob, + .flags = tfMPTUnauthorize, + .err = tecNO_PERMISSION}); // alice unlocks mptissuance mptAlice.set({.account = alice, .flags = tfMPTUnlock}); @@ -2283,20 +2305,27 @@ public: FeatureBitset const all{supported_amendments()}; // MPTokenIssuanceCreate - testCreateValidation(all); - testCreateEnabled(all); + testCreateValidation(all - featureSingleAssetVault); + testCreateValidation(all | featureSingleAssetVault); + testCreateEnabled(all - featureSingleAssetVault); + testCreateEnabled(all | featureSingleAssetVault); // MPTokenIssuanceDestroy - testDestroyValidation(all); - testDestroyEnabled(all); + testDestroyValidation(all - featureSingleAssetVault); + testDestroyValidation(all | featureSingleAssetVault); + testDestroyEnabled(all - featureSingleAssetVault); + testDestroyEnabled(all | featureSingleAssetVault); // MPTokenAuthorize - testAuthorizeValidation(all); - testAuthorizeEnabled(all); + testAuthorizeValidation(all - featureSingleAssetVault); + testAuthorizeValidation(all | featureSingleAssetVault); + testAuthorizeEnabled(all - featureSingleAssetVault); + testAuthorizeEnabled(all | featureSingleAssetVault); // MPTokenIssuanceSet testSetValidation(all); - testSetEnabled(all); + testSetEnabled(all - featureSingleAssetVault); + testSetEnabled(all | featureSingleAssetVault); // MPT clawback testClawbackValidation(all); diff --git a/src/test/app/Vault_test.cpp b/src/test/app/Vault_test.cpp new file mode 100644 index 0000000000..67cc3812df --- /dev/null +++ b/src/test/app/Vault_test.cpp @@ -0,0 +1,3085 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2024 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { + +using namespace test::jtx; + +class Vault_test : public beast::unit_test::suite +{ + static auto constexpr negativeAmount = + [](PrettyAsset const& asset) -> PrettyAmount { + return {STAmount{asset.raw(), 1ul, 0, true, STAmount::unchecked{}}, ""}; + }; + + void + testSequences() + { + using namespace test::jtx; + + auto const testSequence = [this]( + std::string const& prefix, + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + Account const& charlie, + Vault& vault, + PrettyAsset const& asset) { + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + tx[sfData] = "AFEED00E"; + tx[sfAssetsMaximum] = asset(100).number(); + env(tx); + env.close(); + BEAST_EXPECT(env.le(keylet)); + + auto const share = [&env, keylet = keylet, this]() -> PrettyAsset { + auto const vault = env.le(keylet); + BEAST_EXPECT(vault != nullptr); + return MPTIssue(vault->at(sfShareMPTID)); + }(); + + // Several 3rd party accounts which cannot receive funds + Account alice{"alice"}; + Account dave{"dave"}; + Account erin{"erin"}; // not authorized by issuer + env.fund(XRP(1000), alice, dave, erin); + env(fset(alice, asfDepositAuth)); + env(fset(dave, asfRequireDest)); + env.close(); + + { + testcase(prefix + " fail to deposit more than assets held"); + auto tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(10000)}); + env(tx, ter(tecINSUFFICIENT_FUNDS)); + } + + { + testcase(prefix + " deposit non-zero amount"); + auto tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(50)}); + env(tx); + } + + { + testcase(prefix + " deposit non-zero amount again"); + auto tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(50)}); + env(tx); + } + + { + testcase(prefix + " fail to delete non-empty vault"); + auto tx = vault.del({.owner = owner, .id = keylet.key}); + env(tx, ter(tecHAS_OBLIGATIONS)); + } + + { + testcase(prefix + " fail to update because wrong owner"); + auto tx = vault.set({.owner = issuer, .id = keylet.key}); + tx[sfAssetsMaximum] = asset(50).number(); + env(tx, ter(tecNO_PERMISSION)); + } + + { + testcase( + prefix + " fail to set maximum lower than current amount"); + auto tx = vault.set({.owner = owner, .id = keylet.key}); + tx[sfAssetsMaximum] = asset(50).number(); + env(tx, ter(tecLIMIT_EXCEEDED)); + } + + { + testcase(prefix + " set maximum higher than current amount"); + auto tx = vault.set({.owner = owner, .id = keylet.key}); + tx[sfAssetsMaximum] = asset(150).number(); + env(tx); + } + + { + testcase(prefix + " set data"); + auto tx = vault.set({.owner = owner, .id = keylet.key}); + tx[sfData] = "0"; + env(tx); + } + + { + testcase(prefix + " fail to set domain on public vault"); + auto tx = vault.set({.owner = owner, .id = keylet.key}); + tx[sfDomainID] = to_string(base_uint<256>(42ul)); + env(tx, ter{tecNO_PERMISSION}); + } + + { + testcase(prefix + " fail to deposit more than maximum"); + auto tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + env(tx, ter(tecLIMIT_EXCEEDED)); + } + + { + testcase(prefix + " reset maximum to zero i.e. not enforced"); + auto tx = vault.set({.owner = owner, .id = keylet.key}); + tx[sfAssetsMaximum] = asset(0).number(); + env(tx); + } + + { + testcase(prefix + " fail to withdraw more than assets held"); + auto tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(1000)}); + env(tx, ter(tecINSUFFICIENT_FUNDS)); + } + + { + testcase(prefix + " deposit some more"); + auto tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + env(tx); + } + + { + testcase(prefix + " clawback some"); + auto code = + asset.raw().native() ? ter(temMALFORMED) : ter(tesSUCCESS); + auto tx = vault.clawback( + {.issuer = issuer, + .id = keylet.key, + .holder = depositor, + .amount = asset(10)}); + env(tx, code); + } + + { + testcase(prefix + " clawback all"); + auto code = asset.raw().native() ? ter(tecNO_PERMISSION) + : ter(tesSUCCESS); + auto tx = vault.clawback( + {.issuer = issuer, .id = keylet.key, .holder = depositor}); + env(tx, code); + } + + if (!asset.raw().native()) + { + testcase(prefix + " deposit again"); + auto tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(200)}); + env(tx); + } + + { + testcase( + prefix + " fail to withdraw to 3rd party lsfDepositAuth"); + auto tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + tx[sfDestination] = alice.human(); + env(tx, ter{tecNO_PERMISSION}); + } + + if (!asset.raw().native()) + { + testcase( + prefix + " fail to withdraw to 3rd party no authorization"); + auto tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + tx[sfDestination] = erin.human(); + env(tx, + ter{asset.raw().holds() ? tecNO_LINE : tecNO_AUTH}); + } + + if (!asset.raw().native() && asset.raw().holds()) + { + testcase(prefix + " temporary authorization for 3rd party"); + env(trust(erin, asset(1000))); + env(trust(issuer, asset(0), erin, tfSetfAuth)); + env(pay(issuer, erin, asset(10))); + + // Erin deposits all in vault, then sends shares to depositor + auto tx = vault.deposit( + {.depositor = erin, .id = keylet.key, .amount = asset(10)}); + env(tx); + env(pay(erin, depositor, share(10))); + + testcase(prefix + " withdraw to authorized 3rd party"); + // Depositor withdraws shares, destined to Erin + tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(10)}); + tx[sfDestination] = erin.human(); + env(tx); + // Erin returns assets to issuer + env(pay(erin, issuer, asset(10))); + + testcase(prefix + " fail to pay to unauthorized 3rd party"); + env(trust(erin, asset(0))); + // Erin has MPToken but is no longer authorized to hold assets + env(pay(depositor, erin, share(1)), ter{tecNO_LINE}); + } + + { + testcase( + prefix + + " fail to withdraw to 3rd party lsfRequireDestTag"); + auto tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + tx[sfDestination] = dave.human(); + env(tx, ter{tecDST_TAG_NEEDED}); + } + + { + testcase(prefix + " withdraw to authorized 3rd party"); + auto tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + tx[sfDestination] = charlie.human(); + env(tx); + } + + { + testcase(prefix + " withdraw to issuer"); + auto tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(50)}); + tx[sfDestination] = issuer.human(); + env(tx); + } + + { + testcase(prefix + " withdraw remaining assets"); + auto tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(50)}); + env(tx); + } + + { + testcase(prefix + " fail to delete because wrong owner"); + auto tx = vault.del({.owner = issuer, .id = keylet.key}); + env(tx, ter(tecNO_PERMISSION)); + } + + { + testcase(prefix + " delete empty vault"); + auto tx = vault.del({.owner = owner, .id = keylet.key}); + env(tx); + BEAST_EXPECT(!env.le(keylet)); + } + }; + + auto testCases = [this, &testSequence]( + std::string prefix, + std::function setup) { + Env env{*this, supported_amendments() | featureSingleAssetVault}; + Account issuer{"issuer"}; + Account owner{"owner"}; + Account depositor{"depositor"}; + Account charlie{"charlie"}; // authorized 3rd party + Vault vault{env}; + env.fund(XRP(1000), issuer, owner, depositor, charlie); + env.close(); + env(fset(issuer, asfAllowTrustLineClawback)); + env(fset(issuer, asfRequireAuth)); + env.close(); + env.require(flags(issuer, asfAllowTrustLineClawback)); + env.require(flags(issuer, asfRequireAuth)); + + PrettyAsset asset = setup(env, issuer, owner, depositor, charlie); + testSequence( + prefix, env, issuer, owner, depositor, charlie, vault, asset); + }; + + testCases( + "XRP", + [](Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + Account const& charlie) -> PrettyAsset { + return {xrpIssue(), 1'000'000}; + }); + + testCases( + "IOU", + [](Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + Account const& charlie) -> Asset { + PrettyAsset asset = issuer["IOU"]; + env(trust(owner, asset(1000))); + env(trust(depositor, asset(1000))); + env(trust(charlie, asset(1000))); + env(trust(issuer, asset(0), owner, tfSetfAuth)); + env(trust(issuer, asset(0), depositor, tfSetfAuth)); + env(trust(issuer, asset(0), charlie, tfSetfAuth)); + env(pay(issuer, depositor, asset(1000))); + env.close(); + return asset; + }); + + testCases( + "MPT", + [](Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + Account const& charlie) -> Asset { + MPTTester mptt{env, issuer, mptInitNoFund}; + mptt.create( + {.flags = + tfMPTCanClawback | tfMPTCanTransfer | tfMPTCanLock}); + PrettyAsset asset = mptt.issuanceID(); + mptt.authorize({.account = depositor}); + mptt.authorize({.account = charlie}); + env(pay(issuer, depositor, asset(1000))); + env.close(); + return asset; + }); + } + + void + testPreflight() + { + using namespace test::jtx; + + struct CaseArgs + { + FeatureBitset features = + supported_amendments() | featureSingleAssetVault; + }; + + auto testCase = [&, this]( + std::function test, + CaseArgs args = {}) { + Env env{*this, args.features}; + Account issuer{"issuer"}; + Account owner{"owner"}; + Vault vault{env}; + env.fund(XRP(1000), issuer, owner); + env.close(); + + env(fset(issuer, asfAllowTrustLineClawback)); + env(fset(issuer, asfRequireAuth)); + env.close(); + + PrettyAsset asset = issuer["IOU"]; + env(trust(owner, asset(1000))); + env(trust(issuer, asset(0), owner, tfSetfAuth)); + env(pay(issuer, owner, asset(1000))); + env.close(); + + test(env, issuer, owner, asset, vault); + }; + + testCase( + [&](Env& env, + Account const& issuer, + Account const& owner, + Asset const& asset, + Vault& vault) { + testcase("disabled single asset vault"); + + auto [tx, keylet] = + vault.create({.owner = owner, .asset = asset}); + env(tx, ter{temDISABLED}); + + { + auto tx = vault.set({.owner = owner, .id = keylet.key}); + env(tx, ter{temDISABLED}); + } + + { + auto tx = vault.deposit( + {.depositor = owner, + .id = keylet.key, + .amount = asset(10)}); + env(tx, ter{temDISABLED}); + } + + { + auto tx = vault.withdraw( + {.depositor = owner, + .id = keylet.key, + .amount = asset(10)}); + env(tx, ter{temDISABLED}); + } + + { + auto tx = vault.clawback( + {.issuer = issuer, + .id = keylet.key, + .holder = owner, + .amount = asset(10)}); + env(tx, ter{temDISABLED}); + } + + { + auto tx = vault.del({.owner = owner, .id = keylet.key}); + env(tx, ter{temDISABLED}); + } + }, + {.features = supported_amendments() - featureSingleAssetVault}); + + testCase([&](Env& env, + Account const& issuer, + Account const& owner, + Asset const& asset, + Vault& vault) { + testcase("invalid flags"); + + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + tx[sfFlags] = tfClearDeepFreeze; + env(tx, ter{temINVALID_FLAG}); + + { + auto tx = vault.set({.owner = owner, .id = keylet.key}); + tx[sfFlags] = tfClearDeepFreeze; + env(tx, ter{temINVALID_FLAG}); + } + + { + auto tx = vault.deposit( + {.depositor = owner, + .id = keylet.key, + .amount = asset(10)}); + tx[sfFlags] = tfClearDeepFreeze; + env(tx, ter{temINVALID_FLAG}); + } + + { + auto tx = vault.withdraw( + {.depositor = owner, + .id = keylet.key, + .amount = asset(10)}); + tx[sfFlags] = tfClearDeepFreeze; + env(tx, ter{temINVALID_FLAG}); + } + + { + auto tx = vault.clawback( + {.issuer = issuer, + .id = keylet.key, + .holder = owner, + .amount = asset(10)}); + tx[sfFlags] = tfClearDeepFreeze; + env(tx, ter{temINVALID_FLAG}); + } + + { + auto tx = vault.del({.owner = owner, .id = keylet.key}); + tx[sfFlags] = tfClearDeepFreeze; + env(tx, ter{temINVALID_FLAG}); + } + }); + + testCase([&](Env& env, + Account const& issuer, + Account const& owner, + Asset const& asset, + Vault& vault) { + testcase("invalid fee"); + + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + tx[jss::Fee] = "-1"; + env(tx, ter{temBAD_FEE}); + + { + auto tx = vault.set({.owner = owner, .id = keylet.key}); + tx[jss::Fee] = "-1"; + env(tx, ter{temBAD_FEE}); + } + + { + auto tx = vault.deposit( + {.depositor = owner, + .id = keylet.key, + .amount = asset(10)}); + tx[jss::Fee] = "-1"; + env(tx, ter{temBAD_FEE}); + } + + { + auto tx = vault.withdraw( + {.depositor = owner, + .id = keylet.key, + .amount = asset(10)}); + tx[jss::Fee] = "-1"; + env(tx, ter{temBAD_FEE}); + } + + { + auto tx = vault.clawback( + {.issuer = issuer, + .id = keylet.key, + .holder = owner, + .amount = asset(10)}); + tx[jss::Fee] = "-1"; + env(tx, ter{temBAD_FEE}); + } + + { + auto tx = vault.del({.owner = owner, .id = keylet.key}); + tx[jss::Fee] = "-1"; + env(tx, ter{temBAD_FEE}); + } + }); + + testCase( + [&](Env& env, + Account const&, + Account const& owner, + Asset const&, + Vault& vault) { + testcase("disabled permissioned domain"); + + auto [tx, keylet] = + vault.create({.owner = owner, .asset = xrpIssue()}); + tx[sfDomainID] = to_string(base_uint<256>(42ul)); + env(tx, ter{temDISABLED}); + + { + auto tx = vault.set({.owner = owner, .id = keylet.key}); + tx[sfDomainID] = to_string(base_uint<256>(42ul)); + env(tx, ter{temDISABLED}); + } + + { + auto tx = vault.set({.owner = owner, .id = keylet.key}); + tx[sfDomainID] = "0"; + env(tx, ter{temDISABLED}); + } + }, + {.features = (supported_amendments() | featureSingleAssetVault) - + featurePermissionedDomains}); + + testCase([&](Env& env, + Account const& issuer, + Account const& owner, + Asset const& asset, + Vault& vault) { + testcase("use zero vault"); + + auto [tx, keylet] = + vault.create({.owner = owner, .asset = xrpIssue()}); + + { + auto tx = vault.set({ + .owner = owner, + .id = beast::zero, + }); + env(tx, ter{temMALFORMED}); + } + + { + auto tx = vault.deposit( + {.depositor = owner, + .id = beast::zero, + .amount = asset(10)}); + env(tx, ter(temMALFORMED)); + } + + { + auto tx = vault.withdraw( + {.depositor = owner, + .id = beast::zero, + .amount = asset(10)}); + env(tx, ter{temMALFORMED}); + } + + { + auto tx = vault.clawback( + {.issuer = issuer, + .id = beast::zero, + .holder = owner, + .amount = asset(10)}); + env(tx, ter{temMALFORMED}); + } + + { + auto tx = vault.del({ + .owner = owner, + .id = beast::zero, + }); + env(tx, ter{temMALFORMED}); + } + }); + + testCase([&](Env& env, + Account const& issuer, + Account const& owner, + Asset const& asset, + Vault& vault) { + testcase("clawback from self"); + + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + + { + auto tx = vault.clawback( + {.issuer = issuer, + .id = keylet.key, + .holder = issuer, + .amount = asset(10)}); + env(tx, ter{temMALFORMED}); + } + }); + + testCase([&](Env& env, + Account const&, + Account const& owner, + Asset const& asset, + Vault& vault) { + testcase("withdraw to bad destination"); + + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + + { + auto tx = vault.withdraw( + {.depositor = owner, + .id = keylet.key, + .amount = asset(10)}); + tx[jss::Destination] = "0"; + env(tx, ter{temMALFORMED}); + } + }); + + testCase([&](Env& env, + Account const&, + Account const& owner, + Asset const& asset, + Vault& vault) { + testcase("create or set invalid data"); + + auto [tx1, keylet] = vault.create({.owner = owner, .asset = asset}); + + { + auto tx = tx1; + tx[sfData] = ""; + env(tx, ter(temMALFORMED)); + } + + { + auto tx = tx1; + // A hexadecimal string of 257 bytes. + tx[sfData] = std::string(514, 'A'); + env(tx, ter(temMALFORMED)); + } + + { + auto tx = vault.set({.owner = owner, .id = keylet.key}); + tx[sfData] = ""; + env(tx, ter{temMALFORMED}); + } + + { + auto tx = vault.set({.owner = owner, .id = keylet.key}); + // A hexadecimal string of 257 bytes. + tx[sfData] = std::string(514, 'A'); + env(tx, ter{temMALFORMED}); + } + }); + + testCase([&](Env& env, + Account const&, + Account const& owner, + Asset const& asset, + Vault& vault) { + testcase("set nothing updated"); + + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + + { + auto tx = vault.set({.owner = owner, .id = keylet.key}); + env(tx, ter{temMALFORMED}); + } + }); + + testCase([&](Env& env, + Account const&, + Account const& owner, + Asset const& asset, + Vault& vault) { + testcase("create with invalid metadata"); + + auto [tx1, keylet] = vault.create({.owner = owner, .asset = asset}); + + { + auto tx = tx1; + tx[sfMPTokenMetadata] = ""; + env(tx, ter(temMALFORMED)); + } + + { + auto tx = tx1; + // This metadata is for the share token. + // A hexadecimal string of 1025 bytes. + tx[sfMPTokenMetadata] = std::string(2050, 'B'); + env(tx, ter(temMALFORMED)); + } + }); + + testCase([&](Env& env, + Account const&, + Account const& owner, + Asset const& asset, + Vault& vault) { + testcase("set negative maximum"); + + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + + { + auto tx = vault.set({.owner = owner, .id = keylet.key}); + tx[sfAssetsMaximum] = negativeAmount(asset).number(); + env(tx, ter{temMALFORMED}); + } + }); + + testCase([&](Env& env, + Account const&, + Account const& owner, + Asset const& asset, + Vault& vault) { + testcase("invalid deposit amount"); + + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + + { + auto tx = vault.deposit( + {.depositor = owner, + .id = keylet.key, + .amount = negativeAmount(asset)}); + env(tx, ter(temBAD_AMOUNT)); + } + + { + auto tx = vault.deposit( + {.depositor = owner, .id = keylet.key, .amount = asset(0)}); + env(tx, ter(temBAD_AMOUNT)); + } + }); + + testCase([&](Env& env, + Account const&, + Account const& owner, + Asset const& asset, + Vault& vault) { + testcase("invalid set immutable flag"); + + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + + { + auto tx = vault.set({.owner = owner, .id = keylet.key}); + tx[sfFlags] = tfVaultPrivate; + env(tx, ter(temINVALID_FLAG)); + } + }); + + testCase([&](Env& env, + Account const&, + Account const& owner, + Asset const& asset, + Vault& vault) { + testcase("invalid withdraw amount"); + + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + + { + auto tx = vault.withdraw( + {.depositor = owner, + .id = keylet.key, + .amount = negativeAmount(asset)}); + env(tx, ter(temBAD_AMOUNT)); + } + + { + auto tx = vault.withdraw( + {.depositor = owner, .id = keylet.key, .amount = asset(0)}); + env(tx, ter(temBAD_AMOUNT)); + } + }); + + testCase([&](Env& env, + Account const& issuer, + Account const& owner, + Asset const& asset, + Vault& vault) { + testcase("invalid clawback"); + + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + + { + auto tx = vault.clawback( + {.issuer = owner, + .id = keylet.key, + .holder = issuer, + .amount = asset(50)}); + env(tx, ter(temMALFORMED)); + } + + { + auto tx = vault.clawback( + {.issuer = issuer, + .id = keylet.key, + .holder = owner, + .amount = negativeAmount(asset)}); + env(tx, ter(temBAD_AMOUNT)); + } + }); + + testCase([&](Env& env, + Account const&, + Account const& owner, + Asset const& asset, + Vault& vault) { + testcase("invalid create"); + + auto [tx1, keylet] = vault.create({.owner = owner, .asset = asset}); + + { + auto tx = tx1; + tx[sfWithdrawalPolicy] = 0; + env(tx, ter(temMALFORMED)); + } + + { + auto tx = tx1; + tx[sfDomainID] = to_string(base_uint<256>(42ul)); + env(tx, ter{temMALFORMED}); + } + + { + auto tx = tx1; + tx[sfAssetsMaximum] = negativeAmount(asset).number(); + env(tx, ter{temMALFORMED}); + } + + { + auto tx = tx1; + tx[sfFlags] = tfVaultPrivate; + tx[sfDomainID] = "0"; + env(tx, ter{temMALFORMED}); + } + }); + } + + // Test for non-asset specific behaviors. + void + testCreateFailXRP() + { + using namespace test::jtx; + + auto testCase = [this](std::function test) { + Env env{*this, supported_amendments() | featureSingleAssetVault}; + Account issuer{"issuer"}; + Account owner{"owner"}; + Account depositor{"depositor"}; + env.fund(XRP(1000), issuer, owner, depositor); + env.close(); + Vault vault{env}; + Asset asset = xrpIssue(); + + test(env, issuer, owner, depositor, asset, vault); + }; + + testCase([this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + PrettyAsset const& asset, + Vault& vault) { + testcase("nothing to set"); + auto tx = vault.set({.owner = owner, .id = keylet::skip().key}); + tx[sfAssetsMaximum] = asset(0).number(); + env(tx, ter(tecNO_ENTRY)); + }); + + testCase([this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + PrettyAsset const& asset, + Vault& vault) { + testcase("nothing to deposit to"); + auto tx = vault.deposit( + {.depositor = depositor, + .id = keylet::skip().key, + .amount = asset(10)}); + env(tx, ter(tecNO_ENTRY)); + }); + + testCase([this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + PrettyAsset const& asset, + Vault& vault) { + testcase("nothing to withdraw from"); + auto tx = vault.withdraw( + {.depositor = depositor, + .id = keylet::skip().key, + .amount = asset(10)}); + env(tx, ter(tecNO_ENTRY)); + }); + + testCase([this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + Asset const& asset, + Vault& vault) { + testcase("nothing to delete"); + auto tx = vault.del({.owner = owner, .id = keylet::skip().key}); + env(tx, ter(tecNO_ENTRY)); + }); + + testCase([this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + Asset const& asset, + Vault& vault) { + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + testcase("transaction is good"); + env(tx); + }); + + testCase([this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + Asset const& asset, + Vault& vault) { + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + tx[sfWithdrawalPolicy] = 1; + testcase("explicitly select withdrawal policy"); + env(tx); + }); + + testCase([this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + Asset const& asset, + Vault& vault) { + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + testcase("insufficient fee"); + env(tx, fee(env.current()->fees().base), ter(telINSUF_FEE_P)); + }); + + testCase([this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + Asset const& asset, + Vault& vault) { + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + testcase("insufficient reserve"); + // It is possible to construct a complicated mathematical + // expression for this amount, but it is sadly not easy. + env(pay(owner, issuer, XRP(775))); + env.close(); + env(tx, ter(tecINSUFFICIENT_RESERVE)); + }); + + testCase([this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + Asset const& asset, + Vault& vault) { + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + tx[sfFlags] = tfVaultPrivate; + tx[sfDomainID] = to_string(base_uint<256>(42ul)); + testcase("non-existing domain"); + env(tx, ter{tecOBJECT_NOT_FOUND}); + }); + } + + void + testCreateFailIOU() + { + using namespace test::jtx; + { + { + testcase("IOU fail create frozen"); + Env env{ + *this, supported_amendments() | featureSingleAssetVault}; + Account issuer{"issuer"}; + Account owner{"owner"}; + env.fund(XRP(1000), issuer, owner); + env.close(); + env(fset(issuer, asfGlobalFreeze)); + env.close(); + + Vault vault{env}; + Asset asset = issuer["IOU"]; + auto [tx, keylet] = + vault.create({.owner = owner, .asset = asset}); + + env(tx, ter(tecFROZEN)); + env.close(); + } + + { + testcase("IOU fail create no ripling"); + Env env{ + *this, supported_amendments() | featureSingleAssetVault}; + Account issuer{"issuer"}; + Account owner{"owner"}; + env.fund(XRP(1000), issuer, owner); + env.close(); + env(fclear(issuer, asfDefaultRipple)); + env.close(); + + Vault vault{env}; + Asset asset = issuer["IOU"]; + auto [tx, keylet] = + vault.create({.owner = owner, .asset = asset}); + env(tx, ter(terNO_RIPPLE)); + env.close(); + } + + { + testcase("IOU no issuer"); + Env env{ + *this, supported_amendments() | featureSingleAssetVault}; + Account issuer{"issuer"}; + Account owner{"owner"}; + env.fund(XRP(1000), owner); + env.close(); + + Vault vault{env}; + Asset asset = issuer["IOU"]; + { + auto [tx, keylet] = + vault.create({.owner = owner, .asset = asset}); + env(tx, ter(terNO_ACCOUNT)); + env.close(); + } + } + } + + { + testcase("IOU fail create vault for AMM LPToken"); + Env env{*this, supported_amendments() | featureSingleAssetVault}; + Account const gw("gateway"); + Account const alice("alice"); + Account const carol("carol"); + IOU const USD = gw["USD"]; + + auto const [asset1, asset2] = + std::pair(XRP(10000), USD(10000)); + auto tofund = [&](STAmount const& a) -> STAmount { + if (a.native()) + { + auto const defXRP = XRP(30000); + if (a <= defXRP) + return defXRP; + return a + XRP(1000); + } + auto const defIOU = STAmount{a.issue(), 30000}; + if (a <= defIOU) + return defIOU; + return a + STAmount{a.issue(), 1000}; + }; + auto const toFund1 = tofund(asset1); + auto const toFund2 = tofund(asset2); + BEAST_EXPECT(asset1 <= toFund1 && asset2 <= toFund2); + + if (!asset1.native() && !asset2.native()) + fund(env, gw, {alice, carol}, {toFund1, toFund2}, Fund::All); + else if (asset1.native()) + fund(env, gw, {alice, carol}, toFund1, {toFund2}, Fund::All); + else if (asset2.native()) + fund(env, gw, {alice, carol}, toFund2, {toFund1}, Fund::All); + + AMM ammAlice( + env, alice, asset1, asset2, CreateArg{.log = false, .tfee = 0}); + + Account const owner{"owner"}; + env.fund(XRP(1000000), owner); + + Vault vault{env}; + auto [tx, k] = + vault.create({.owner = owner, .asset = ammAlice.lptIssue()}); + env(tx, ter{tecWRONG_ASSET}); + env.close(); + } + } + + void + testCreateFailMPT() + { + using namespace test::jtx; + Env env{*this, supported_amendments() | featureSingleAssetVault}; + Account issuer{"issuer"}; + Account owner{"owner"}; + Account depositor{"depositor"}; + env.fund(XRP(1000), issuer, owner, depositor); + env.close(); + Vault vault{env}; + + MPTTester mptt{env, issuer, mptInitNoFund}; + + // Locked because that is the default flag. + mptt.create(); + Asset asset = mptt.issuanceID(); + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + env(tx, ter(tecNO_AUTH)); + } + + void + testNonTransferableShares() + { + using namespace test::jtx; + Env env{*this, supported_amendments() | featureSingleAssetVault}; + Account issuer{"issuer"}; + Account owner{"owner"}; + Account depositor{"depositor"}; + env.fund(XRP(1000), issuer, owner, depositor); + env.close(); + + Vault vault{env}; + PrettyAsset asset = issuer["IOU"]; + env.trust(asset(1000), owner); + env(pay(issuer, owner, asset(100))); + env.trust(asset(1000), depositor); + env(pay(issuer, depositor, asset(100))); + env.close(); + + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + tx[sfFlags] = tfVaultShareNonTransferable; + env(tx); + env.close(); + + { + testcase("nontransferable deposits"); + auto tx1 = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(40)}); + env(tx1); + + auto tx2 = vault.deposit( + {.depositor = owner, .id = keylet.key, .amount = asset(60)}); + env(tx2); + env.close(); + } + + auto const vaultAccount = // + [&env, key = keylet.key, this]() -> AccountID { + auto jvVault = env.rpc("vault_info", strHex(key)); + + BEAST_EXPECT( + jvVault[jss::result][jss::vault][sfAssetsTotal] == "100"); + BEAST_EXPECT( + jvVault[jss::result][jss::vault][jss::shares] + [sfOutstandingAmount] == "100"); + + // Vault pseudo-account + return parseBase58( + jvVault[jss::result][jss::vault][jss::Account] + .asString()) + .value(); + }(); + + auto const MptID = makeMptID(1, vaultAccount); + Asset shares = MptID; + + { + testcase("nontransferable shares cannot be moved"); + env(pay(owner, depositor, shares(10)), ter{tecNO_AUTH}); + env(pay(depositor, owner, shares(10)), ter{tecNO_AUTH}); + } + + { + testcase("nontransferable shares can be used to withdraw"); + auto tx1 = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(20)}); + env(tx1); + + auto tx2 = vault.withdraw( + {.depositor = owner, .id = keylet.key, .amount = asset(30)}); + env(tx2); + env.close(); + } + + { + testcase("nontransferable shares balance check"); + auto jvVault = env.rpc("vault_info", strHex(keylet.key)); + BEAST_EXPECT( + jvVault[jss::result][jss::vault][sfAssetsTotal] == "50"); + BEAST_EXPECT( + jvVault[jss::result][jss::vault][jss::shares] + [sfOutstandingAmount] == "50"); + } + + { + testcase("nontransferable shares withdraw rest"); + auto tx1 = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(20)}); + env(tx1); + + auto tx2 = vault.withdraw( + {.depositor = owner, .id = keylet.key, .amount = asset(30)}); + env(tx2); + env.close(); + } + + { + testcase("nontransferable shares delete empty vault"); + auto tx = vault.del({.owner = owner, .id = keylet.key}); + env(tx); + BEAST_EXPECT(!env.le(keylet)); + } + } + + void + testWithMPT() + { + using namespace test::jtx; + + struct CaseArgs + { + bool enableClawback = true; + }; + + auto testCase = [this]( + std::function test, + CaseArgs args = {}) { + Env env{*this, supported_amendments() | featureSingleAssetVault}; + Account issuer{"issuer"}; + Account owner{"owner"}; + Account depositor{"depositor"}; + env.fund(XRP(1000), issuer, owner, depositor); + env.close(); + Vault vault{env}; + + MPTTester mptt{env, issuer, mptInitNoFund}; + mptt.create( + {.flags = tfMPTCanTransfer | tfMPTCanLock | + (args.enableClawback ? lsfMPTCanClawback + : LedgerSpecificFlags(0)) | + tfMPTRequireAuth}); + PrettyAsset asset = mptt.issuanceID(); + mptt.authorize({.account = owner}); + mptt.authorize({.account = issuer, .holder = owner}); + mptt.authorize({.account = depositor}); + mptt.authorize({.account = issuer, .holder = depositor}); + env(pay(issuer, depositor, asset(1000))); + env.close(); + + test(env, issuer, owner, depositor, asset, vault, mptt); + }; + + testCase([this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + PrettyAsset const& asset, + Vault& vault, + MPTTester& mptt) { + testcase("MPT nothing to clawback from"); + auto tx = vault.clawback( + {.issuer = issuer, + .id = keylet::skip().key, + .holder = depositor, + .amount = asset(10)}); + env(tx, ter(tecNO_ENTRY)); + }); + + testCase([this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + Asset const& asset, + Vault& vault, + MPTTester& mptt) { + testcase("MPT global lock blocks create"); + mptt.set({.account = issuer, .flags = tfMPTLock}); + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + env(tx, ter(tecLOCKED)); + }); + + testCase([this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + Asset const& asset, + Vault& vault, + MPTTester& mptt) { + testcase("MPT global lock blocks deposit"); + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + + mptt.set({.account = issuer, .flags = tfMPTLock}); + env.close(); + + tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + env(tx, ter{tecLOCKED}); + env.close(); + + // Can delete empty vault, even if global lock + tx = vault.del({.owner = owner, .id = keylet.key}); + env(tx); + }); + + testCase([this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + Asset const& asset, + Vault& vault, + MPTTester& mptt) { + testcase("MPT global lock blocks withdrawal"); + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + env(tx); + env.close(); + + // Check that the OutstandingAmount field of MPTIssuance + // accounts for the issued shares. + auto v = env.le(keylet); + BEAST_EXPECT(v); + MPTID share = (*v)[sfShareMPTID]; + auto issuance = env.le(keylet::mptIssuance(share)); + BEAST_EXPECT(issuance); + Number outstandingShares = issuance->at(sfOutstandingAmount); + BEAST_EXPECT(outstandingShares == 100); + + mptt.set({.account = issuer, .flags = tfMPTLock}); + env.close(); + + tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + env(tx, ter(tecLOCKED)); + + tx[sfDestination] = issuer.human(); + env(tx, ter(tecLOCKED)); + + // Clawback is still permitted, even with global lock + tx = vault.clawback( + {.issuer = issuer, + .id = keylet.key, + .holder = depositor, + .amount = asset(0)}); + env(tx); + env.close(); + + // Can delete empty vault, even if global lock + tx = vault.del({.owner = owner, .id = keylet.key}); + env(tx); + }); + + testCase([this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + PrettyAsset const& asset, + Vault& vault, + MPTTester& mptt) { + testcase("MPT only issuer can clawback"); + + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + + tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + env(tx); + env.close(); + + { + auto tx = vault.clawback( + {.issuer = owner, .id = keylet.key, .holder = depositor}); + env(tx, ter(tecNO_PERMISSION)); + } + }); + + testCase([this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + PrettyAsset const& asset, + Vault& vault, + MPTTester& mptt) { + testcase("MPT issuance deleted"); + + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + + tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(1000)}); + env(tx); + env.close(); + + { + auto tx = vault.clawback( + {.issuer = issuer, + .id = keylet.key, + .holder = depositor, + .amount = asset(0)}); + env(tx); + } + + mptt.destroy({.issuer = issuer, .id = mptt.issuanceID()}); + env.close(); + + { + auto [tx, keylet] = + vault.create({.owner = depositor, .asset = asset}); + env(tx, ter{tecOBJECT_NOT_FOUND}); + } + + { + auto tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(10)}); + env(tx, ter{tecOBJECT_NOT_FOUND}); + } + + { + auto tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(10)}); + env(tx, ter{tecOBJECT_NOT_FOUND}); + } + + { + auto tx = vault.clawback( + {.issuer = issuer, + .id = keylet.key, + .holder = depositor, + .amount = asset(0)}); + env(tx, ter{tecOBJECT_NOT_FOUND}); + } + + env(vault.del({.owner = owner, .id = keylet.key})); + }); + + testCase( + [this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + PrettyAsset const& asset, + Vault& vault, + MPTTester& mptt) { + testcase("MPT clawback disabled"); + + auto [tx, keylet] = + vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + + tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(1000)}); + env(tx); + env.close(); + + { + auto tx = vault.clawback( + {.issuer = issuer, + .id = keylet.key, + .holder = depositor, + .amount = asset(0)}); + env(tx, ter{tecNO_PERMISSION}); + } + }, + {.enableClawback = false}); + + testCase([this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + Asset const& asset, + Vault& vault, + MPTTester& mptt) { + testcase("MPT un-authorization"); + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(1000)}); + env(tx); + env.close(); + + mptt.authorize( + {.account = issuer, + .holder = depositor, + .flags = tfMPTUnauthorize}); + env.close(); + + { + auto tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + env(tx, ter(tecNO_AUTH)); + + // Withdrawal to other (authorized) accounts works + tx[sfDestination] = issuer.human(); + env(tx); + tx[sfDestination] = owner.human(); + env(tx); + env.close(); + } + + { + // Cannot deposit some more + auto tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + env(tx, ter(tecNO_AUTH)); + } + + // Clawback works + tx = vault.clawback( + {.issuer = issuer, + .id = keylet.key, + .holder = depositor, + .amount = asset(800)}); + env(tx); + + env(vault.del({.owner = owner, .id = keylet.key})); + }); + + testCase([this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + Asset const& asset, + Vault& vault, + MPTTester& mptt) { + testcase("MPT lock of vault pseudo-account"); + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + + auto const vaultAccount = + [&env, keylet = keylet, this]() -> AccountID { + auto const vault = env.le(keylet); + BEAST_EXPECT(vault != nullptr); + return vault->at(sfAccount); + }(); + + tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + env(tx); + env.close(); + + tx = [&]() { + Json::Value jv; + jv[jss::Account] = issuer.human(); + jv[sfMPTokenIssuanceID] = + to_string(asset.get().getMptID()); + jv[jss::Holder] = toBase58(vaultAccount); + jv[jss::TransactionType] = jss::MPTokenIssuanceSet; + jv[jss::Flags] = tfMPTLock; + return jv; + }(); + env(tx); + env.close(); + + tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + env(tx, ter(tecLOCKED)); + + tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + env(tx, ter(tecLOCKED)); + + // Clawback works, even when locked + tx = vault.clawback( + {.issuer = issuer, + .id = keylet.key, + .holder = depositor, + .amount = asset(100)}); + env(tx); + + // Can delete an empty vault even when asset is locked. + tx = vault.del({.owner = owner, .id = keylet.key}); + env(tx); + }); + + { + testcase("MPT shares to a vault"); + + Env env{*this, supported_amendments() | featureSingleAssetVault}; + Account owner{"owner"}; + Account issuer{"issuer"}; + env.fund(XRP(1000000), owner, issuer); + env.close(); + Vault vault{env}; + + MPTTester mptt{env, issuer, mptInitNoFund}; + mptt.create( + {.flags = tfMPTCanTransfer | tfMPTCanLock | lsfMPTCanClawback | + tfMPTRequireAuth}); + mptt.authorize({.account = owner}); + mptt.authorize({.account = issuer, .holder = owner}); + PrettyAsset asset = mptt.issuanceID(); + env(pay(issuer, owner, asset(100))); + auto [tx1, k1] = vault.create({.owner = owner, .asset = asset}); + env(tx1); + env.close(); + + auto const shares = [&env, keylet = k1, this]() -> Asset { + auto const vault = env.le(keylet); + BEAST_EXPECT(vault != nullptr); + return MPTIssue(vault->at(sfShareMPTID)); + }(); + + auto [tx2, k2] = vault.create({.owner = owner, .asset = shares}); + env(tx2, ter{tecWRONG_ASSET}); + env.close(); + } + } + + void + testWithIOU() + { + auto testCase = + [&, this]( + std::function vaultAccount, + Vault& vault, + PrettyAsset const& asset, + std::function issuanceId, + std::function vaultBalance)> + test) { + Env env{ + *this, supported_amendments() | featureSingleAssetVault}; + Account const owner{"owner"}; + Account const issuer{"issuer"}; + Account const charlie{"charlie"}; + Vault vault{env}; + env.fund(XRP(1000), issuer, owner, charlie); + env(fset(issuer, asfAllowTrustLineClawback)); + env.close(); + + PrettyAsset const asset = issuer["IOU"]; + env.trust(asset(1000), owner); + env(pay(issuer, owner, asset(200))); + env(rate(issuer, 1.25)); + env.close(); + + auto const [tx, keylet] = + vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + + auto const vaultAccount = + [&env](ripple::Keylet keylet) -> AccountID { + return env.le(keylet)->at(sfAccount); + }; + auto const issuanceId = [&env](ripple::Keylet keylet) -> MPTID { + return env.le(keylet)->at(sfShareMPTID); + }; + auto const vaultBalance = // + [&env, &vaultAccount, issue = asset.raw().get()]( + ripple::Keylet keylet) -> PrettyAmount { + auto const account = vaultAccount(keylet); + auto const sle = env.le(keylet::line(account, issue)); + if (sle == nullptr) + return { + STAmount(issue, 0), + env.lookup(issue.account).name()}; + auto amount = sle->getFieldAmount(sfBalance); + amount.setIssuer(issue.account); + if (account > issue.account) + amount.negate(); + return {amount, env.lookup(issue.account).name()}; + }; + + test( + env, + owner, + issuer, + charlie, + vaultAccount, + vault, + asset, + issuanceId, + vaultBalance); + }; + + testCase([&, this]( + Env& env, + Account const& owner, + Account const& issuer, + Account const&, + auto vaultAccount, + Vault& vault, + PrettyAsset const& asset, + auto&&...) { + testcase("IOU cannot use different asset"); + PrettyAsset const foo = issuer["FOO"]; + + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + + { + // Cannot create new trustline to a vault + auto tx = [&, account = vaultAccount(keylet)]() { + Json::Value jv; + jv[jss::Account] = issuer.human(); + { + auto& ja = jv[jss::LimitAmount] = + foo(0).value().getJson(JsonOptions::none); + ja[jss::issuer] = toBase58(account); + } + jv[jss::TransactionType] = jss::TrustSet; + jv[jss::Flags] = tfSetFreeze; + return jv; + }(); + env(tx, ter{tecNO_PERMISSION}); + env.close(); + } + + { + auto tx = vault.deposit( + {.depositor = issuer, .id = keylet.key, .amount = foo(20)}); + env(tx, ter{tecWRONG_ASSET}); + env.close(); + } + + { + auto tx = vault.withdraw( + {.depositor = issuer, .id = keylet.key, .amount = foo(20)}); + env(tx, ter{tecWRONG_ASSET}); + env.close(); + } + + env(vault.del({.owner = owner, .id = keylet.key})); + env.close(); + }); + + testCase([&, this]( + Env& env, + Account const& owner, + Account const& issuer, + Account const& charlie, + auto vaultAccount, + Vault& vault, + PrettyAsset const& asset, + auto issuanceId, + auto) { + testcase("IOU frozen trust line to vault account"); + + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + + env(vault.deposit( + {.depositor = owner, .id = keylet.key, .amount = asset(100)})); + env.close(); + + Asset const share = Asset(issuanceId(keylet)); + + // Freeze the trustline to the vault + auto trustSet = [&, account = vaultAccount(keylet)]() { + Json::Value jv; + jv[jss::Account] = issuer.human(); + { + auto& ja = jv[jss::LimitAmount] = + asset(0).value().getJson(JsonOptions::none); + ja[jss::issuer] = toBase58(account); + } + jv[jss::TransactionType] = jss::TrustSet; + jv[jss::Flags] = tfSetFreeze; + return jv; + }(); + env(trustSet); + env.close(); + + { + // Note, the "frozen" state of the trust line to vault account + // is reported as "locked" state of the vault shares, because + // this state is attached to shares by means of the transitive + // isFrozen. + auto tx = vault.deposit( + {.depositor = owner, + .id = keylet.key, + .amount = asset(80)}); + env(tx, ter{tecLOCKED}); + } + + { + auto tx = vault.withdraw( + {.depositor = owner, + .id = keylet.key, + .amount = asset(100)}); + env(tx, ter{tecLOCKED}); + + // also when trying to withdraw to a 3rd party + tx[sfDestination] = charlie.human(); + env(tx, ter{tecLOCKED}); + env.close(); + } + + { + // Clawback works, even when locked + auto tx = vault.clawback( + {.issuer = issuer, + .id = keylet.key, + .holder = owner, + .amount = asset(50)}); + env(tx); + env.close(); + } + + // Clear the frozen state + trustSet[jss::Flags] = tfClearFreeze; + env(trustSet); + env.close(); + + env(vault.withdraw( + {.depositor = owner, .id = keylet.key, .amount = share(50)})); + + env(vault.del({.owner = owner, .id = keylet.key})); + env.close(); + }); + + testCase([&, this]( + Env& env, + Account const& owner, + Account const& issuer, + Account const& charlie, + auto, + Vault& vault, + PrettyAsset const& asset, + auto issuanceId, + auto vaultBalance) { + testcase("IOU transfer fees not applied"); + + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + + env(vault.deposit( + {.depositor = owner, .id = keylet.key, .amount = asset(100)})); + env.close(); + + auto const issue = asset.raw().get(); + Asset const share = Asset(issuanceId(keylet)); + + // transfer fees ignored on deposit + BEAST_EXPECT(env.balance(owner, issue) == asset(100)); + BEAST_EXPECT(vaultBalance(keylet) == asset(100)); + + { + auto tx = vault.clawback( + {.issuer = issuer, + .id = keylet.key, + .holder = owner, + .amount = asset(50)}); + env(tx); + env.close(); + } + + // transfer fees ignored on clawback + BEAST_EXPECT(env.balance(owner, issue) == asset(100)); + BEAST_EXPECT(vaultBalance(keylet) == asset(50)); + + env(vault.withdraw( + {.depositor = owner, .id = keylet.key, .amount = share(20)})); + + // transfer fees ignored on withdraw + BEAST_EXPECT(env.balance(owner, issue) == asset(120)); + BEAST_EXPECT(vaultBalance(keylet) == asset(30)); + + { + auto tx = vault.withdraw( + {.depositor = owner, + .id = keylet.key, + .amount = share(30)}); + tx[sfDestination] = charlie.human(); + env(tx); + } + + // transfer fees ignored on withdraw to 3rd party + BEAST_EXPECT(env.balance(owner, issue) == asset(120)); + BEAST_EXPECT(env.balance(charlie, issue) == asset(30)); + BEAST_EXPECT(vaultBalance(keylet) == asset(0)); + + env(vault.del({.owner = owner, .id = keylet.key})); + env.close(); + }); + + testCase([&, this]( + Env& env, + Account const& owner, + Account const& issuer, + Account const& charlie, + auto, + Vault& vault, + PrettyAsset const& asset, + auto&&...) { + testcase("IOU frozen trust line to depositor"); + + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + + env(vault.deposit( + {.depositor = owner, .id = keylet.key, .amount = asset(100)})); + env.close(); + + // Withdraw to 3rd party works + auto const withdrawToCharlie = [&](ripple::Keylet keylet) { + auto tx = vault.withdraw( + {.depositor = owner, + .id = keylet.key, + .amount = asset(10)}); + tx[sfDestination] = charlie.human(); + return tx; + }(keylet); + env(withdrawToCharlie); + + // Freeze the owner + env(trust(issuer, asset(0), owner, tfSetFreeze)); + env.close(); + + // Cannot withdraw + auto const withdraw = vault.withdraw( + {.depositor = owner, .id = keylet.key, .amount = asset(10)}); + env(withdraw, ter{tecFROZEN}); + + // Cannot withdraw to 3rd party + env(withdrawToCharlie, ter{tecLOCKED}); + env.close(); + + { + // Cannot deposit some more + auto tx = vault.deposit( + {.depositor = owner, + .id = keylet.key, + .amount = asset(10)}); + env(tx, ter{tecFROZEN}); + } + + { + // Clawback still works + auto tx = vault.clawback( + {.issuer = issuer, + .id = keylet.key, + .holder = owner, + .amount = asset(0)}); + env(tx); + env.close(); + } + + env(vault.del({.owner = owner, .id = keylet.key})); + env.close(); + }); + + testCase([&, this]( + Env& env, + Account const& owner, + Account const& issuer, + Account const& charlie, + auto, + Vault& vault, + PrettyAsset const& asset, + auto&&...) { + testcase("IOU frozen trust line to 3rd party"); + + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + + env(vault.deposit( + {.depositor = owner, .id = keylet.key, .amount = asset(100)})); + env.close(); + + // Withdraw to 3rd party works + auto const withdrawToCharlie = [&](ripple::Keylet keylet) { + auto tx = vault.withdraw( + {.depositor = owner, + .id = keylet.key, + .amount = asset(10)}); + tx[sfDestination] = charlie.human(); + return tx; + }(keylet); + env(withdrawToCharlie); + + // Freeze the 3rd party + env(trust(issuer, asset(0), charlie, tfSetFreeze)); + env.close(); + + // Can withdraw + auto const withdraw = vault.withdraw( + {.depositor = owner, .id = keylet.key, .amount = asset(10)}); + env(withdraw); + env.close(); + + // Cannot withdraw to 3rd party + env(withdrawToCharlie, ter{tecFROZEN}); + env.close(); + + env(vault.clawback( + {.issuer = issuer, + .id = keylet.key, + .holder = owner, + .amount = asset(0)})); + env.close(); + + env(vault.del({.owner = owner, .id = keylet.key})); + env.close(); + }); + + testCase([&, this]( + Env& env, + Account const& owner, + Account const& issuer, + Account const& charlie, + auto, + Vault& vault, + PrettyAsset const& asset, + auto&&...) { + testcase("IOU global freeze"); + + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + + env(vault.deposit( + {.depositor = owner, .id = keylet.key, .amount = asset(100)})); + env.close(); + + env(fset(issuer, asfGlobalFreeze)); + env.close(); + + { + // Cannot withdraw + auto tx = vault.withdraw( + {.depositor = owner, + .id = keylet.key, + .amount = asset(10)}); + env(tx, ter{tecFROZEN}); + + // Cannot withdraw to 3rd party + tx[sfDestination] = charlie.human(); + env(tx, ter{tecFROZEN}); + env.close(); + + // Cannot deposit some more + tx = vault.deposit( + {.depositor = owner, + .id = keylet.key, + .amount = asset(10)}); + + env(tx, ter{tecFROZEN}); + } + + // Clawback is permitted + env(vault.clawback( + {.issuer = issuer, + .id = keylet.key, + .holder = owner, + .amount = asset(0)})); + env.close(); + + env(vault.del({.owner = owner, .id = keylet.key})); + env.close(); + }); + } + + void + testWithDomainCheck() + { + testcase("private vault"); + + Env env{*this, supported_amendments() | featureSingleAssetVault}; + Account issuer{"issuer"}; + Account owner{"owner"}; + Account depositor{"depositor"}; + Account charlie{"charlie"}; + Account pdOwner{"pdOwner"}; + Account credIssuer1{"credIssuer1"}; + Account credIssuer2{"credIssuer2"}; + std::string const credType = "credential"; + Vault vault{env}; + env.fund( + XRP(1000), + issuer, + owner, + depositor, + charlie, + pdOwner, + credIssuer1, + credIssuer2); + env.close(); + env(fset(issuer, asfAllowTrustLineClawback)); + env.close(); + env.require(flags(issuer, asfAllowTrustLineClawback)); + + PrettyAsset asset = issuer["IOU"]; + env.trust(asset(1000), owner); + env(pay(issuer, owner, asset(500))); + env.trust(asset(1000), depositor); + env(pay(issuer, depositor, asset(500))); + env.close(); + + auto [tx, keylet] = vault.create( + {.owner = owner, .asset = asset, .flags = tfVaultPrivate}); + env(tx); + env.close(); + BEAST_EXPECT(env.le(keylet)); + + { + testcase("private vault owner can deposit"); + auto tx = vault.deposit( + {.depositor = owner, .id = keylet.key, .amount = asset(50)}); + env(tx); + } + + { + testcase("private vault depositor not authorized yet"); + auto tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(50)}); + env(tx, ter{tecNO_AUTH}); + } + + { + testcase("private vault cannot set non-existing domain"); + auto tx = vault.set({.owner = owner, .id = keylet.key}); + tx[sfDomainID] = to_string(base_uint<256>(42ul)); + env(tx, ter{tecOBJECT_NOT_FOUND}); + } + + { + testcase("private vault set domainId"); + + { + pdomain::Credentials const credentials1{ + {.issuer = credIssuer1, .credType = credType}}; + + env(pdomain::setTx(pdOwner, credentials1)); + auto const domainId1 = [&]() { + auto tx = env.tx()->getJson(JsonOptions::none); + return pdomain::getNewDomain(env.meta()); + }(); + + auto tx = vault.set({.owner = owner, .id = keylet.key}); + tx[sfDomainID] = to_string(domainId1); + env(tx); + env.close(); + + // Update domain second time, should be harmless + env(tx); + env.close(); + } + + { + pdomain::Credentials const credentials{ + {.issuer = credIssuer1, .credType = credType}, + {.issuer = credIssuer2, .credType = credType}}; + + env(pdomain::setTx(pdOwner, credentials)); + auto const domainId = [&]() { + auto tx = env.tx()->getJson(JsonOptions::none); + return pdomain::getNewDomain(env.meta()); + }(); + + auto tx = vault.set({.owner = owner, .id = keylet.key}); + tx[sfDomainID] = to_string(domainId); + env(tx); + env.close(); + } + } + + { + testcase("private vault depositor still not authorized"); + auto tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(50)}); + env(tx, ter{tecNO_AUTH}); + env.close(); + } + + auto const credKeylet = + credentials::keylet(depositor, credIssuer1, credType); + { + testcase("private vault depositor now authorized"); + env(credentials::create(depositor, credIssuer1, credType)); + env(credentials::accept(depositor, credIssuer1, credType)); + env(credentials::create(charlie, credIssuer1, credType)); + env(credentials::accept(charlie, credIssuer1, credType)); + env.close(); + auto credSle = env.le(credKeylet); + BEAST_EXPECT(credSle != nullptr); + + auto tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(50)}); + env(tx); + env.close(); + + tx = vault.deposit( + {.depositor = charlie, .id = keylet.key, .amount = asset(50)}); + env(tx, ter{tecINSUFFICIENT_FUNDS}); + env.close(); + } + + { + testcase("private vault depositor lost authorization"); + env(credentials::deleteCred( + credIssuer1, depositor, credIssuer1, credType)); + env.close(); + auto credSle = env.le(credKeylet); + BEAST_EXPECT(credSle == nullptr); + + auto tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(50)}); + env(tx, ter{tecNO_AUTH}); + env.close(); + } + + { + testcase("private vault depositor new authorization"); + env(credentials::create(depositor, credIssuer2, credType)); + env(credentials::accept(depositor, credIssuer2, credType)); + env.close(); + + auto tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(50)}); + env(tx); + env.close(); + } + + { + testcase("private vault reset domainId"); + auto tx = vault.set({.owner = owner, .id = keylet.key}); + tx[sfDomainID] = "0"; + env(tx); + env.close(); + + tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(50)}); + env(tx, ter{tecNO_AUTH}); + env.close(); + + tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(50)}); + env(tx); + + tx = vault.clawback( + {.issuer = issuer, + .id = keylet.key, + .holder = depositor, + .amount = asset(0)}); + env(tx); + + tx = vault.clawback( + {.issuer = issuer, + .id = keylet.key, + .holder = owner, + .amount = asset(0)}); + env(tx); + + tx = vault.del({ + .owner = owner, + .id = keylet.key, + }); + env(tx); + } + } + + void + testWithDomainCheckXRP() + { + testcase("private XRP vault"); + + Env env{*this, supported_amendments() | featureSingleAssetVault}; + Account owner{"owner"}; + Account depositor{"depositor"}; + Account alice{"charlie"}; + std::string const credType = "credential"; + Vault vault{env}; + env.fund(XRP(100000), owner, depositor, alice); + env.close(); + + PrettyAsset asset = xrpIssue(); + auto [tx, keylet] = vault.create( + {.owner = owner, .asset = asset, .flags = tfVaultPrivate}); + env(tx); + env.close(); + + auto const [vaultAccount, issuanceId] = + [&env, keylet = keylet, this]() -> std::tuple { + auto const vault = env.le(keylet); + BEAST_EXPECT(vault != nullptr); + return {vault->at(sfAccount), vault->at(sfShareMPTID)}; + }(); + BEAST_EXPECT(env.le(keylet::account(vaultAccount))); + BEAST_EXPECT(env.le(keylet::mptIssuance(issuanceId))); + PrettyAsset shares{issuanceId}; + + { + testcase("private XRP vault owner can deposit"); + auto tx = vault.deposit( + {.depositor = owner, .id = keylet.key, .amount = asset(50)}); + env(tx); + } + + { + testcase("private XRP vault cannot pay shares to depositor yet"); + env(pay(owner, depositor, shares(1)), ter{tecNO_AUTH}); + } + + { + testcase("private XRP vault depositor not authorized yet"); + auto tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(50)}); + env(tx, ter{tecNO_AUTH}); + } + + { + testcase("private XRP vault set DomainID"); + pdomain::Credentials const credentials{ + {.issuer = owner, .credType = credType}}; + + env(pdomain::setTx(owner, credentials)); + auto const domainId = [&]() { + auto tx = env.tx()->getJson(JsonOptions::none); + return pdomain::getNewDomain(env.meta()); + }(); + + auto tx = vault.set({.owner = owner, .id = keylet.key}); + tx[sfDomainID] = to_string(domainId); + env(tx); + env.close(); + } + + auto const credKeylet = credentials::keylet(depositor, owner, credType); + { + testcase("private XRP vault depositor now authorized"); + env(credentials::create(depositor, owner, credType)); + env(credentials::accept(depositor, owner, credType)); + env.close(); + + BEAST_EXPECT(env.le(credKeylet)); + auto tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(50)}); + env(tx); + env.close(); + } + + { + testcase("private XRP vault can pay shares to depositor"); + env(pay(owner, depositor, shares(1))); + } + + { + testcase("private XRP vault cannot pay shares to 3rd party"); + Json::Value jv; + jv[sfAccount] = alice.human(); + jv[sfTransactionType] = jss::MPTokenAuthorize; + jv[sfMPTokenIssuanceID] = to_string(issuanceId); + env(jv); + env.close(); + + env(pay(owner, alice, shares(1)), ter{tecNO_AUTH}); + } + } + + void + testFailedPseudoAccount() + { + using namespace test::jtx; + + testcase("failed pseudo-account allocation"); + Env env{*this, supported_amendments() | featureSingleAssetVault}; + Account const owner{"owner"}; + Vault vault{env}; + env.fund(XRP(1000), owner); + + auto const keylet = keylet::vault(owner.id(), env.seq(owner)); + for (int i = 0; i < 256; ++i) + { + AccountID const accountId = + ripple::pseudoAccountAddress(*env.current(), keylet.key); + + env(pay(env.master.id(), accountId, XRP(1000)), + seq(autofill), + fee(autofill), + sig(autofill)); + } + + auto [tx, keylet1] = + vault.create({.owner = owner, .asset = xrpIssue()}); + BEAST_EXPECT(keylet.key == keylet1.key); + env(tx, ter{terADDRESS_COLLISION}); + } + + void + testRPC() + { + testcase("RPC"); + Env env{*this, supported_amendments() | featureSingleAssetVault}; + Account const owner{"owner"}; + Account const issuer{"issuer"}; + Vault vault{env}; + env.fund(XRP(1000), issuer, owner); + env.close(); + + PrettyAsset asset = issuer["IOU"]; + env.trust(asset(1000), owner); + env(pay(issuer, owner, asset(200))); + env.close(); + + auto const sequence = env.seq(owner); + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + + // Set some fields + { + auto tx1 = vault.deposit( + {.depositor = owner, .id = keylet.key, .amount = asset(50)}); + env(tx1); + + auto tx2 = vault.set({.owner = owner, .id = keylet.key}); + tx2[sfAssetsMaximum] = asset(1000).number(); + env(tx2); + env.close(); + } + + auto const sleVault = [&env, keylet = keylet, this]() { + auto const vault = env.le(keylet); + BEAST_EXPECT(vault != nullptr); + return vault; + }(); + + auto const check = [&, keylet = keylet, sle = sleVault, this]( + Json::Value const& vault, + Json::Value const& issuance = Json::nullValue) { + BEAST_EXPECT(vault.isObject()); + + constexpr auto checkString = + [](auto& node, SField const& field, std::string v) -> bool { + return node.isMember(field.fieldName) && + node[field.fieldName].isString() && + node[field.fieldName] == v; + }; + constexpr auto checkObject = + [](auto& node, SField const& field, Json::Value v) -> bool { + return node.isMember(field.fieldName) && + node[field.fieldName].isObject() && + node[field.fieldName] == v; + }; + constexpr auto checkInt = + [](auto& node, SField const& field, int v) -> bool { + return node.isMember(field.fieldName) && + ((node[field.fieldName].isInt() && + node[field.fieldName] == Json::Int(v)) || + (node[field.fieldName].isUInt() && + node[field.fieldName] == Json::UInt(v))); + }; + + BEAST_EXPECT(vault["LedgerEntryType"].asString() == "Vault"); + BEAST_EXPECT(vault[jss::index].asString() == strHex(keylet.key)); + BEAST_EXPECT(checkInt(vault, sfFlags, 0)); + // Ignore all other standard fields, this test doesn't care + + BEAST_EXPECT( + checkString(vault, sfAccount, toBase58(sle->at(sfAccount)))); + BEAST_EXPECT( + checkObject(vault, sfAsset, to_json(sle->at(sfAsset)))); + BEAST_EXPECT(checkString(vault, sfAssetsAvailable, "50")); + BEAST_EXPECT(checkString(vault, sfAssetsMaximum, "1000")); + BEAST_EXPECT(checkString(vault, sfAssetsTotal, "50")); + BEAST_EXPECT(checkString(vault, sfLossUnrealized, "0")); + + auto const strShareID = strHex(sle->at(sfShareMPTID)); + BEAST_EXPECT(checkString(vault, sfShareMPTID, strShareID)); + BEAST_EXPECT(checkString(vault, sfOwner, toBase58(owner.id()))); + BEAST_EXPECT(checkInt(vault, sfSequence, sequence)); + BEAST_EXPECT(checkInt( + vault, sfWithdrawalPolicy, vaultStrategyFirstComeFirstServe)); + + if (issuance.isObject()) + { + BEAST_EXPECT( + issuance["LedgerEntryType"].asString() == + "MPTokenIssuance"); + BEAST_EXPECT( + issuance[jss::mpt_issuance_id].asString() == strShareID); + BEAST_EXPECT(checkInt(issuance, sfSequence, 1)); + BEAST_EXPECT(checkInt( + issuance, + sfFlags, + int(lsfMPTCanEscrow | lsfMPTCanTrade | lsfMPTCanTransfer))); + BEAST_EXPECT(checkString(issuance, sfOutstandingAmount, "50")); + } + }; + + { + testcase("RPC ledger_entry selected by key"); + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::vault] = strHex(keylet.key); + auto jvVault = env.rpc("json", "ledger_entry", to_string(jvParams)); + + BEAST_EXPECT(!jvVault[jss::result].isMember(jss::error)); + BEAST_EXPECT(jvVault[jss::result].isMember(jss::node)); + check(jvVault[jss::result][jss::node]); + } + + { + testcase("RPC ledger_entry selected by owner and seq"); + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::vault][jss::owner] = owner.human(); + jvParams[jss::vault][jss::seq] = sequence; + auto jvVault = env.rpc("json", "ledger_entry", to_string(jvParams)); + + BEAST_EXPECT(!jvVault[jss::result].isMember(jss::error)); + BEAST_EXPECT(jvVault[jss::result].isMember(jss::node)); + check(jvVault[jss::result][jss::node]); + } + + { + testcase("RPC ledger_entry cannot find vault by key"); + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::vault] = to_string(uint256(42)); + auto jvVault = env.rpc("json", "ledger_entry", to_string(jvParams)); + BEAST_EXPECT( + jvVault[jss::result][jss::error].asString() == "entryNotFound"); + } + + { + testcase("RPC ledger_entry cannot find vault by owner and seq"); + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::vault][jss::owner] = issuer.human(); + jvParams[jss::vault][jss::seq] = 1'000'000; + auto jvVault = env.rpc("json", "ledger_entry", to_string(jvParams)); + BEAST_EXPECT( + jvVault[jss::result][jss::error].asString() == "entryNotFound"); + } + + { + testcase("RPC ledger_entry malformed key"); + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::vault] = 42; + auto jvVault = env.rpc("json", "ledger_entry", to_string(jvParams)); + BEAST_EXPECT( + jvVault[jss::result][jss::error].asString() == + "malformedRequest"); + } + + { + testcase("RPC ledger_entry malformed owner"); + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::vault][jss::owner] = 42; + jvParams[jss::vault][jss::seq] = sequence; + auto jvVault = env.rpc("json", "ledger_entry", to_string(jvParams)); + BEAST_EXPECT( + jvVault[jss::result][jss::error].asString() == + "malformedOwner"); + } + + { + testcase("RPC ledger_entry malformed seq"); + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::vault][jss::owner] = issuer.human(); + jvParams[jss::vault][jss::seq] = "foo"; + auto jvVault = env.rpc("json", "ledger_entry", to_string(jvParams)); + BEAST_EXPECT( + jvVault[jss::result][jss::error].asString() == + "malformedRequest"); + } + + { + testcase("RPC ledger_entry zero seq"); + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::vault][jss::owner] = issuer.human(); + jvParams[jss::vault][jss::seq] = 0; + auto jvVault = env.rpc("json", "ledger_entry", to_string(jvParams)); + BEAST_EXPECT( + jvVault[jss::result][jss::error].asString() == + "malformedRequest"); + } + + { + testcase("RPC ledger_entry negative seq"); + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::vault][jss::owner] = issuer.human(); + jvParams[jss::vault][jss::seq] = -1; + auto jvVault = env.rpc("json", "ledger_entry", to_string(jvParams)); + BEAST_EXPECT( + jvVault[jss::result][jss::error].asString() == + "malformedRequest"); + } + + { + testcase("RPC ledger_entry oversized seq"); + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::vault][jss::owner] = issuer.human(); + jvParams[jss::vault][jss::seq] = 1e20; + auto jvVault = env.rpc("json", "ledger_entry", to_string(jvParams)); + BEAST_EXPECT( + jvVault[jss::result][jss::error].asString() == + "malformedRequest"); + } + + { + testcase("RPC ledger_entry bool seq"); + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::vault][jss::owner] = issuer.human(); + jvParams[jss::vault][jss::seq] = true; + auto jvVault = env.rpc("json", "ledger_entry", to_string(jvParams)); + BEAST_EXPECT( + jvVault[jss::result][jss::error].asString() == + "malformedRequest"); + } + + { + testcase("RPC account_objects"); + + Json::Value jvParams; + jvParams[jss::account] = owner.human(); + jvParams[jss::type] = jss::vault; + auto jv = env.rpc( + "json", "account_objects", to_string(jvParams))[jss::result]; + + BEAST_EXPECT(jv[jss::account_objects].size() == 1); + check(jv[jss::account_objects][0u]); + } + + { + testcase("RPC ledger_data"); + + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::binary] = false; + jvParams[jss::type] = jss::vault; + Json::Value jv = + env.rpc("json", "ledger_data", to_string(jvParams)); + BEAST_EXPECT(jv[jss::result][jss::state].size() == 1); + check(jv[jss::result][jss::state][0u]); + } + + { + testcase("RPC vault_info command line"); + Json::Value jv = + env.rpc("vault_info", strHex(keylet.key), "validated"); + + BEAST_EXPECT(!jv[jss::result].isMember(jss::error)); + BEAST_EXPECT(jv[jss::result].isMember(jss::vault)); + check( + jv[jss::result][jss::vault], + jv[jss::result][jss::vault][jss::shares]); + } + + { + testcase("RPC vault_info json"); + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::vault_id] = strHex(keylet.key); + auto jv = env.rpc("json", "vault_info", to_string(jvParams)); + + BEAST_EXPECT(!jv[jss::result].isMember(jss::error)); + BEAST_EXPECT(jv[jss::result].isMember(jss::vault)); + check( + jv[jss::result][jss::vault], + jv[jss::result][jss::vault][jss::shares]); + } + + { + testcase("RPC vault_info invalid vault_id"); + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::vault_id] = "foobar"; + auto jv = env.rpc("json", "vault_info", to_string(jvParams)); + BEAST_EXPECT( + jv[jss::result][jss::error].asString() == "malformedRequest"); + } + + { + testcase("RPC vault_info json invalid index"); + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::vault_id] = 0; + auto jv = env.rpc("json", "vault_info", to_string(jvParams)); + BEAST_EXPECT( + jv[jss::result][jss::error].asString() == "malformedRequest"); + } + + { + testcase("RPC vault_info json by owner and sequence"); + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::owner] = owner.human(); + jvParams[jss::seq] = sequence; + auto jv = env.rpc("json", "vault_info", to_string(jvParams)); + + BEAST_EXPECT(!jv[jss::result].isMember(jss::error)); + BEAST_EXPECT(jv[jss::result].isMember(jss::vault)); + check( + jv[jss::result][jss::vault], + jv[jss::result][jss::vault][jss::shares]); + } + + { + testcase("RPC vault_info json malformed sequence"); + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::owner] = owner.human(); + jvParams[jss::seq] = "foobar"; + auto jv = env.rpc("json", "vault_info", to_string(jvParams)); + BEAST_EXPECT( + jv[jss::result][jss::error].asString() == "malformedRequest"); + } + + { + testcase("RPC vault_info json invalid sequence"); + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::owner] = owner.human(); + jvParams[jss::seq] = 0; + auto jv = env.rpc("json", "vault_info", to_string(jvParams)); + BEAST_EXPECT( + jv[jss::result][jss::error].asString() == "malformedRequest"); + } + + { + testcase("RPC vault_info json negative sequence"); + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::owner] = owner.human(); + jvParams[jss::seq] = -1; + auto jv = env.rpc("json", "vault_info", to_string(jvParams)); + BEAST_EXPECT( + jv[jss::result][jss::error].asString() == "malformedRequest"); + } + + { + testcase("RPC vault_info json oversized sequence"); + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::owner] = owner.human(); + jvParams[jss::seq] = 1e20; + auto jv = env.rpc("json", "vault_info", to_string(jvParams)); + BEAST_EXPECT( + jv[jss::result][jss::error].asString() == "malformedRequest"); + } + + { + testcase("RPC vault_info json bool sequence"); + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::owner] = owner.human(); + jvParams[jss::seq] = true; + auto jv = env.rpc("json", "vault_info", to_string(jvParams)); + BEAST_EXPECT( + jv[jss::result][jss::error].asString() == "malformedRequest"); + } + + { + testcase("RPC vault_info json malformed owner"); + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::owner] = "foobar"; + jvParams[jss::seq] = sequence; + auto jv = env.rpc("json", "vault_info", to_string(jvParams)); + BEAST_EXPECT( + jv[jss::result][jss::error].asString() == "malformedRequest"); + } + + { + testcase("RPC vault_info json invalid combination only owner"); + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::owner] = owner.human(); + auto jv = env.rpc("json", "vault_info", to_string(jvParams)); + BEAST_EXPECT( + jv[jss::result][jss::error].asString() == "malformedRequest"); + } + + { + testcase("RPC vault_info json invalid combination only seq"); + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::seq] = sequence; + auto jv = env.rpc("json", "vault_info", to_string(jvParams)); + BEAST_EXPECT( + jv[jss::result][jss::error].asString() == "malformedRequest"); + } + + { + testcase("RPC vault_info json invalid combination seq vault_id"); + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::vault_id] = strHex(keylet.key); + jvParams[jss::seq] = sequence; + auto jv = env.rpc("json", "vault_info", to_string(jvParams)); + BEAST_EXPECT( + jv[jss::result][jss::error].asString() == "malformedRequest"); + } + + { + testcase("RPC vault_info json invalid combination owner vault_id"); + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::vault_id] = strHex(keylet.key); + jvParams[jss::owner] = owner.human(); + auto jv = env.rpc("json", "vault_info", to_string(jvParams)); + BEAST_EXPECT( + jv[jss::result][jss::error].asString() == "malformedRequest"); + } + + { + testcase( + "RPC vault_info json invalid combination owner seq " + "vault_id"); + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + jvParams[jss::vault_id] = strHex(keylet.key); + jvParams[jss::seq] = sequence; + jvParams[jss::owner] = owner.human(); + auto jv = env.rpc("json", "vault_info", to_string(jvParams)); + BEAST_EXPECT( + jv[jss::result][jss::error].asString() == "malformedRequest"); + } + + { + testcase("RPC vault_info json no input"); + Json::Value jvParams; + jvParams[jss::ledger_index] = jss::validated; + auto jv = env.rpc("json", "vault_info", to_string(jvParams)); + BEAST_EXPECT( + jv[jss::result][jss::error].asString() == "malformedRequest"); + } + + { + testcase("RPC vault_info command line invalid index"); + Json::Value jv = env.rpc("vault_info", "foobar", "validated"); + BEAST_EXPECT(jv[jss::error].asString() == "invalidParams"); + } + + { + testcase("RPC vault_info command line invalid index"); + Json::Value jv = env.rpc("vault_info", "0", "validated"); + BEAST_EXPECT( + jv[jss::result][jss::error].asString() == "malformedRequest"); + } + + { + testcase("RPC vault_info command line invalid index"); + Json::Value jv = + env.rpc("vault_info", strHex(uint256(42)), "validated"); + BEAST_EXPECT( + jv[jss::result][jss::error].asString() == "entryNotFound"); + } + + { + testcase("RPC vault_info command line invalid ledger"); + Json::Value jv = env.rpc("vault_info", strHex(keylet.key), "0"); + BEAST_EXPECT( + jv[jss::result][jss::error].asString() == "lgrNotFound"); + } + } + +public: + void + run() override + { + testSequences(); + testPreflight(); + testCreateFailXRP(); + testCreateFailIOU(); + testCreateFailMPT(); + testWithMPT(); + testWithIOU(); + testWithDomainCheck(); + testWithDomainCheckXRP(); + testNonTransferableShares(); + testFailedPseudoAccount(); + testRPC(); + } +}; + +BEAST_DEFINE_TESTSUITE_PRIO(Vault, tx, ripple, 1); + +} // namespace ripple diff --git a/src/test/basics/IOUAmount_test.cpp b/src/test/basics/IOUAmount_test.cpp index 306953d5ab..6ba1cfd6f1 100644 --- a/src/test/basics/IOUAmount_test.cpp +++ b/src/test/basics/IOUAmount_test.cpp @@ -44,6 +44,11 @@ public: IOUAmount const zz(beast::zero); BEAST_EXPECT(z == zz); + + // https://github.com/XRPLF/rippled/issues/5170 + IOUAmount const zzz{}; + BEAST_EXPECT(zzz == beast::zero); + // BEAST_EXPECT(zzz == zz); } void diff --git a/src/test/jtx/Env.h b/src/test/jtx/Env.h index ef26ebf2ee..de6b83362d 100644 --- a/src/test/jtx/Env.h +++ b/src/test/jtx/Env.h @@ -28,6 +28,7 @@ #include #include #include +#include #include #include diff --git a/src/test/jtx/amount.h b/src/test/jtx/amount.h index 589347b12c..344a2ab73c 100644 --- a/src/test/jtx/amount.h +++ b/src/test/jtx/amount.h @@ -21,7 +21,6 @@ #define RIPPLE_TEST_JTX_AMOUNT_H_INCLUDED #include -#include #include #include @@ -128,12 +127,23 @@ public: return amount_; } + Number + number() const + { + return amount_; + } + operator STAmount const&() const { return amount_; } operator AnyAmount() const; + + operator Json::Value() const + { + return to_json(value()); + } }; inline bool @@ -151,6 +161,49 @@ operator!=(PrettyAmount const& lhs, PrettyAmount const& rhs) std::ostream& operator<<(std::ostream& os, PrettyAmount const& amount); +struct PrettyAsset +{ +private: + Asset asset_; + unsigned int scale_; + +public: + template + requires std::convertible_to + PrettyAsset(A const& asset, unsigned int scale = 1) + : PrettyAsset{Asset{asset}, scale} + { + } + + PrettyAsset(Asset const& asset, unsigned int scale = 1) + : asset_(asset), scale_(scale) + { + } + + Asset const& + raw() const + { + return asset_; + } + + operator Asset const&() const + { + return asset_; + } + + operator Json::Value() const + { + return to_json(asset_); + } + + template + PrettyAmount + operator()(T v) const + { + STAmount amount{asset_, v * scale_}; + return {amount, ""}; + } +}; //------------------------------------------------------------------------------ // Specifies an order book diff --git a/src/test/jtx/basic_prop.h b/src/test/jtx/basic_prop.h index 742b8744ef..a8daafba41 100644 --- a/src/test/jtx/basic_prop.h +++ b/src/test/jtx/basic_prop.h @@ -20,6 +20,8 @@ #ifndef RIPPLE_TEST_JTX_BASIC_PROP_H_INCLUDED #define RIPPLE_TEST_JTX_BASIC_PROP_H_INCLUDED +#include + namespace ripple { namespace test { namespace jtx { diff --git a/src/test/jtx/credentials.h b/src/test/jtx/credentials.h index 9161b7241d..1a72e2360d 100644 --- a/src/test/jtx/credentials.h +++ b/src/test/jtx/credentials.h @@ -30,6 +30,16 @@ namespace jtx { namespace credentials { +inline Keylet +keylet( + test::jtx::Account const& subject, + test::jtx::Account const& issuer, + std::string_view credType) +{ + return keylet::credential( + subject.id(), issuer.id(), Slice(credType.data(), credType.size())); +} + // Sets the optional URI. class uri { diff --git a/src/test/jtx/impl/Env.cpp b/src/test/jtx/impl/Env.cpp index ecb2d62f43..ac00d3eed1 100644 --- a/src/test/jtx/impl/Env.cpp +++ b/src/test/jtx/impl/Env.cpp @@ -603,6 +603,5 @@ Env::disableFeature(uint256 const feature) } } // namespace jtx - } // namespace test } // namespace ripple diff --git a/src/test/jtx/impl/vault.cpp b/src/test/jtx/impl/vault.cpp new file mode 100644 index 0000000000..663c42c6ee --- /dev/null +++ b/src/test/jtx/impl/vault.cpp @@ -0,0 +1,104 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2024 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +#include +#include +#include + +#include + +namespace ripple { +namespace test { +namespace jtx { + +std::tuple +Vault::create(CreateArgs const& args) +{ + auto keylet = keylet::vault(args.owner.id(), env.seq(args.owner)); + Json::Value jv; + jv[jss::TransactionType] = jss::VaultCreate; + jv[jss::Account] = args.owner.human(); + jv[jss::Asset] = to_json(args.asset); + jv[jss::Fee] = STAmount(env.current()->fees().increment).getJson(); + if (args.flags) + jv[jss::Flags] = *args.flags; + return {jv, keylet}; +} + +Json::Value +Vault::set(SetArgs const& args) +{ + Json::Value jv; + jv[jss::TransactionType] = jss::VaultSet; + jv[jss::Account] = args.owner.human(); + jv[sfVaultID] = to_string(args.id); + return jv; +} + +Json::Value +Vault::del(DeleteArgs const& args) +{ + Json::Value jv; + jv[jss::TransactionType] = jss::VaultDelete; + jv[jss::Account] = args.owner.human(); + jv[sfVaultID] = to_string(args.id); + return jv; +} + +Json::Value +Vault::deposit(DepositArgs const& args) +{ + Json::Value jv; + jv[jss::TransactionType] = jss::VaultDeposit; + jv[jss::Account] = args.depositor.human(); + jv[sfVaultID] = to_string(args.id); + jv[jss::Amount] = to_json(args.amount); + return jv; +} + +Json::Value +Vault::withdraw(WithdrawArgs const& args) +{ + Json::Value jv; + jv[jss::TransactionType] = jss::VaultWithdraw; + jv[jss::Account] = args.depositor.human(); + jv[sfVaultID] = to_string(args.id); + jv[jss::Amount] = to_json(args.amount); + return jv; +} + +Json::Value +Vault::clawback(ClawbackArgs const& args) +{ + Json::Value jv; + jv[jss::TransactionType] = jss::VaultClawback; + jv[jss::Account] = args.issuer.human(); + jv[sfVaultID] = to_string(args.id); + jv[jss::Holder] = args.holder.human(); + if (args.amount) + jv[jss::Amount] = to_json(*args.amount); + return jv; +} + +} // namespace jtx +} // namespace test +} // namespace ripple diff --git a/src/test/jtx/mpt.h b/src/test/jtx/mpt.h index 950ab0d409..52ade92323 100644 --- a/src/test/jtx/mpt.h +++ b/src/test/jtx/mpt.h @@ -88,11 +88,12 @@ public: struct MPTInit { std::vector holders = {}; - PrettyAmount const& xrp = XRP(10'000); - PrettyAmount const& xrpHolders = XRP(10'000); + PrettyAmount const xrp = XRP(10'000); + PrettyAmount const xrpHolders = XRP(10'000); bool fund = true; bool close = true; }; +static MPTInit const mptInitNoFund{.fund = false}; struct MPTCreate { diff --git a/src/test/jtx/vault.h b/src/test/jtx/vault.h new file mode 100644 index 0000000000..74c482bf17 --- /dev/null +++ b/src/test/jtx/vault.h @@ -0,0 +1,109 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2024 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_TEST_JTX_VAULT_H_INCLUDED +#define RIPPLE_TEST_JTX_VAULT_H_INCLUDED + +#include +#include + +#include +#include +#include +#include + +#include +#include + +namespace ripple { +namespace test { +namespace jtx { + +class Env; + +struct Vault +{ + Env& env; + + struct CreateArgs + { + Account owner; + Asset asset; + std::optional flags{}; + }; + + /** Return a VaultCreate transaction and the Vault's expected keylet. */ + std::tuple + create(CreateArgs const& args); + + struct SetArgs + { + Account owner; + uint256 id; + }; + + Json::Value + set(SetArgs const& args); + + struct DeleteArgs + { + Account owner; + uint256 id; + }; + + Json::Value + del(DeleteArgs const& args); + + struct DepositArgs + { + Account depositor; + uint256 id; + STAmount amount; + }; + + Json::Value + deposit(DepositArgs const& args); + + struct WithdrawArgs + { + Account depositor; + uint256 id; + STAmount amount; + }; + + Json::Value + withdraw(WithdrawArgs const& args); + + struct ClawbackArgs + { + Account issuer; + uint256 id; + Account holder; + std::optional amount{}; + }; + + Json::Value + clawback(ClawbackArgs const& args); +}; + +} // namespace jtx +} // namespace test +} // namespace ripple + +#endif diff --git a/src/test/ledger/Invariants_test.cpp b/src/test/ledger/Invariants_test.cpp index 18b037cbbe..7ceb76504d 100644 --- a/src/test/ledger/Invariants_test.cpp +++ b/src/test/ledger/Invariants_test.cpp @@ -78,8 +78,8 @@ class Invariants_test : public beast::unit_test::suite Preclose const& preclose = {}) { using namespace test::jtx; - FeatureBitset amendments = - supported_amendments() | featureInvariantsV1_1; + FeatureBitset amendments = supported_amendments() | + featureInvariantsV1_1 | featureSingleAssetVault; Env env{*this, amendments}; Account const A1{"A1"}; @@ -116,12 +116,14 @@ class Invariants_test : public beast::unit_test::suite sink.messages().str().starts_with("Invariant failed:") || sink.messages().str().starts_with( "Transaction caused an exception")); - // uncomment if you want to log the invariant failure message - // log << " --> " << sink.messages().str() << std::endl; for (auto const& m : expect_logs) { - BEAST_EXPECT( - sink.messages().str().find(m) != std::string::npos); + if (sink.messages().str().find(m) == std::string::npos) + { + // uncomment if you want to log the invariant failure + // message log << " --> " << m << std::endl; + fail(); + } } } } @@ -784,7 +786,7 @@ class Invariants_test : public beast::unit_test::suite testcase << "valid new account root"; doInvariantCheck( - {{"account root created by a non-Payment"}}, + {{"account root created illegally"}}, [](Account const&, Account const&, ApplyContext& ac) { // Insert a new account root created by a non-payment into // the view. @@ -827,6 +829,74 @@ class Invariants_test : public beast::unit_test::suite }, XRPAmount{}, STTx{ttPAYMENT, [](STObject& tx) {}}); + + doInvariantCheck( + {{"pseudo-account created by a wrong transaction type"}}, + [](Account const&, Account const&, ApplyContext& ac) { + Account const A3{"A3"}; + Keylet const acctKeylet = keylet::account(A3); + auto const sleNew = std::make_shared(acctKeylet); + sleNew->setFieldU32(sfSequence, 0); + sleNew->setFieldH256(sfAMMID, uint256(1)); + sleNew->setFieldU32( + sfFlags, + lsfDisableMaster | lsfDefaultRipple | lsfDefaultRipple); + ac.view().insert(sleNew); + return true; + }, + XRPAmount{}, + STTx{ttPAYMENT, [](STObject& tx) {}}); + + doInvariantCheck( + {{"account created with wrong starting sequence number"}}, + [](Account const&, Account const&, ApplyContext& ac) { + Account const A3{"A3"}; + Keylet const acctKeylet = keylet::account(A3); + auto const sleNew = std::make_shared(acctKeylet); + sleNew->setFieldU32(sfSequence, ac.view().seq()); + sleNew->setFieldH256(sfAMMID, uint256(1)); + sleNew->setFieldU32( + sfFlags, + lsfDisableMaster | lsfDefaultRipple | lsfDepositAuth); + ac.view().insert(sleNew); + return true; + }, + XRPAmount{}, + STTx{ttAMM_CREATE, [](STObject& tx) {}}); + + doInvariantCheck( + {{"pseudo-account created with wrong flags"}}, + [](Account const&, Account const&, ApplyContext& ac) { + Account const A3{"A3"}; + Keylet const acctKeylet = keylet::account(A3); + auto const sleNew = std::make_shared(acctKeylet); + sleNew->setFieldU32(sfSequence, 0); + sleNew->setFieldH256(sfAMMID, uint256(1)); + sleNew->setFieldU32( + sfFlags, lsfDisableMaster | lsfDefaultRipple); + ac.view().insert(sleNew); + return true; + }, + XRPAmount{}, + STTx{ttVAULT_CREATE, [](STObject& tx) {}}); + + doInvariantCheck( + {{"pseudo-account created with wrong flags"}}, + [](Account const&, Account const&, ApplyContext& ac) { + Account const A3{"A3"}; + Keylet const acctKeylet = keylet::account(A3); + auto const sleNew = std::make_shared(acctKeylet); + sleNew->setFieldU32(sfSequence, 0); + sleNew->setFieldH256(sfAMMID, uint256(1)); + sleNew->setFieldU32( + sfFlags, + lsfDisableMaster | lsfDefaultRipple | lsfDepositAuth | + lsfRequireDestTag); + ac.view().insert(sleNew); + return true; + }, + XRPAmount{}, + STTx{ttAMM_CREATE, [](STObject& tx) {}}); } void diff --git a/src/test/protocol/STNumber_test.cpp b/src/test/protocol/STNumber_test.cpp index ed255e32f1..6f2c57ecb0 100644 --- a/src/test/protocol/STNumber_test.cpp +++ b/src/test/protocol/STNumber_test.cpp @@ -18,12 +18,15 @@ //============================================================================== #include +#include +#include #include #include #include #include #include +#include namespace ripple { @@ -78,6 +81,197 @@ struct STNumber_test : public beast::unit_test::suite STAmount const totalAmount{totalValue, strikePrice.issue()}; BEAST_EXPECT(totalAmount == Number{10'000}); } + + { + BEAST_EXPECT( + numberFromJson(sfNumber, Json::Value(42)) == + STNumber(sfNumber, 42)); + BEAST_EXPECT( + numberFromJson(sfNumber, Json::Value(-42)) == + STNumber(sfNumber, -42)); + + BEAST_EXPECT( + numberFromJson(sfNumber, Json::UInt(42)) == + STNumber(sfNumber, 42)); + + BEAST_EXPECT( + numberFromJson(sfNumber, "-123") == STNumber(sfNumber, -123)); + + BEAST_EXPECT( + numberFromJson(sfNumber, "123") == STNumber(sfNumber, 123)); + BEAST_EXPECT( + numberFromJson(sfNumber, "-123") == STNumber(sfNumber, -123)); + + BEAST_EXPECT( + numberFromJson(sfNumber, "3.14") == + STNumber(sfNumber, Number(314, -2))); + BEAST_EXPECT( + numberFromJson(sfNumber, "-3.14") == + STNumber(sfNumber, -Number(314, -2))); + BEAST_EXPECT( + numberFromJson(sfNumber, "3.14e2") == STNumber(sfNumber, 314)); + BEAST_EXPECT( + numberFromJson(sfNumber, "-3.14e2") == + STNumber(sfNumber, -314)); + + BEAST_EXPECT( + numberFromJson(sfNumber, "1000e-2") == STNumber(sfNumber, 10)); + BEAST_EXPECT( + numberFromJson(sfNumber, "-1000e-2") == + STNumber(sfNumber, -10)); + + BEAST_EXPECT( + numberFromJson(sfNumber, "0") == STNumber(sfNumber, 0)); + BEAST_EXPECT( + numberFromJson(sfNumber, "0.0") == STNumber(sfNumber, 0)); + BEAST_EXPECT( + numberFromJson(sfNumber, "0.000") == STNumber(sfNumber, 0)); + BEAST_EXPECT( + numberFromJson(sfNumber, "-0") == STNumber(sfNumber, 0)); + BEAST_EXPECT( + numberFromJson(sfNumber, "-0.0") == STNumber(sfNumber, 0)); + BEAST_EXPECT( + numberFromJson(sfNumber, "-0.000") == STNumber(sfNumber, 0)); + BEAST_EXPECT( + numberFromJson(sfNumber, "0e6") == STNumber(sfNumber, 0)); + BEAST_EXPECT( + numberFromJson(sfNumber, "0.0e6") == STNumber(sfNumber, 0)); + BEAST_EXPECT( + numberFromJson(sfNumber, "0.000e6") == STNumber(sfNumber, 0)); + BEAST_EXPECT( + numberFromJson(sfNumber, "-0e6") == STNumber(sfNumber, 0)); + BEAST_EXPECT( + numberFromJson(sfNumber, "-0.0e6") == STNumber(sfNumber, 0)); + BEAST_EXPECT( + numberFromJson(sfNumber, "-0.000e6") == STNumber(sfNumber, 0)); + + // Obvious non-numbers tested here + try + { + auto _ = numberFromJson(sfNumber, ""); + BEAST_EXPECT(false); + } + catch (std::runtime_error const& e) + { + std::string const expected = "'' is not a number"; + BEAST_EXPECT(e.what() == expected); + } + + try + { + auto _ = numberFromJson(sfNumber, "e"); + BEAST_EXPECT(false); + } + catch (std::runtime_error const& e) + { + std::string const expected = "'e' is not a number"; + BEAST_EXPECT(e.what() == expected); + } + + try + { + auto _ = numberFromJson(sfNumber, "1e"); + BEAST_EXPECT(false); + } + catch (std::runtime_error const& e) + { + std::string const expected = "'1e' is not a number"; + BEAST_EXPECT(e.what() == expected); + } + + try + { + auto _ = numberFromJson(sfNumber, "e2"); + BEAST_EXPECT(false); + } + catch (std::runtime_error const& e) + { + std::string const expected = "'e2' is not a number"; + BEAST_EXPECT(e.what() == expected); + } + + try + { + auto _ = numberFromJson(sfNumber, Json::Value()); + BEAST_EXPECT(false); + } + catch (std::runtime_error const& e) + { + std::string const expected = "not a number"; + BEAST_EXPECT(e.what() == expected); + } + + try + { + auto _ = numberFromJson( + sfNumber, + "1234567890123456789012345678901234567890123456789012345678" + "9012345678901234567890123456789012345678901234567890123456" + "78901234567890123456789012345678901234567890"); + BEAST_EXPECT(false); + } + catch (std::bad_cast const& e) + { + BEAST_EXPECT(true); + } + + // We do not handle leading zeros + try + { + auto _ = numberFromJson(sfNumber, "001"); + BEAST_EXPECT(false); + } + catch (std::runtime_error const& e) + { + std::string const expected = "'001' is not a number"; + BEAST_EXPECT(e.what() == expected); + } + + try + { + auto _ = numberFromJson(sfNumber, "000.0"); + BEAST_EXPECT(false); + } + catch (std::runtime_error const& e) + { + std::string const expected = "'000.0' is not a number"; + BEAST_EXPECT(e.what() == expected); + } + + // We do not handle dangling dot + try + { + auto _ = numberFromJson(sfNumber, ".1"); + BEAST_EXPECT(false); + } + catch (std::runtime_error const& e) + { + std::string const expected = "'.1' is not a number"; + BEAST_EXPECT(e.what() == expected); + } + + try + { + auto _ = numberFromJson(sfNumber, "1."); + BEAST_EXPECT(false); + } + catch (std::runtime_error const& e) + { + std::string const expected = "'1.' is not a number"; + BEAST_EXPECT(e.what() == expected); + } + + try + { + auto _ = numberFromJson(sfNumber, "1.e3"); + BEAST_EXPECT(false); + } + catch (std::runtime_error const& e) + { + std::string const expected = "'1.e3' is not a number"; + BEAST_EXPECT(e.what() == expected); + } + } } }; diff --git a/src/test/rpc/Transaction_test.cpp b/src/test/rpc/Transaction_test.cpp index 577f731200..724a3a0517 100644 --- a/src/test/rpc/Transaction_test.cpp +++ b/src/test/rpc/Transaction_test.cpp @@ -355,8 +355,7 @@ class Transaction_test : public beast::unit_test::suite } auto const tx = env.jt(noop(alice), seq(env.seq(alice))).stx; - auto const ctid = - *RPC::encodeCTID(endLegSeq, tx->getSeqProxy().value(), netID); + auto const ctid = *RPC::encodeCTID(endLegSeq, tx->getSeqValue(), netID); for (int deltaEndSeq = 0; deltaEndSeq < 2; ++deltaEndSeq) { auto const result = env.rpc( diff --git a/src/xrpld/app/misc/CredentialHelpers.cpp b/src/xrpld/app/misc/CredentialHelpers.cpp index dcc5975b34..03ad1f9c80 100644 --- a/src/xrpld/app/misc/CredentialHelpers.cpp +++ b/src/xrpld/app/misc/CredentialHelpers.cpp @@ -20,6 +20,7 @@ #include #include +#include #include #include @@ -39,12 +40,11 @@ checkExpired( } bool -removeExpired(ApplyView& view, STTx const& tx, beast::Journal const j) +removeExpired(ApplyView& view, STVector256 const& arr, beast::Journal const j) { auto const closeTime = view.info().parentCloseTime; bool foundExpired = false; - STVector256 const& arr(tx.getFieldV256(sfCredentialIDs)); for (auto const& h : arr) { // Credentials already checked in preclaim. Look only for expired here. @@ -78,19 +78,19 @@ deleteSLE( AccountID const& account, SField const& node, bool isOwner) -> TER { auto const sleAccount = view.peek(keylet::account(account)); if (!sleAccount) - { + { // LCOV_EXCL_START JLOG(j.fatal()) << "Internal error: can't retrieve Owner account."; return tecINTERNAL; - } + } // LCOV_EXCL_STOP // Remove object from owner directory std::uint64_t const page = sleCredential->getFieldU64(node); if (!view.dirRemove( keylet::ownerDir(account), page, sleCredential->key(), false)) - { + { // LCOV_EXCL_START JLOG(j.fatal()) << "Unable to delete Credential from owner."; return tefBAD_LEDGER; - } + } // LCOV_EXCL_STOP if (isOwner) adjustOwnerCount(view, sleAccount, -1, j); @@ -186,30 +186,69 @@ valid(PreclaimContext const& ctx, AccountID const& src) } TER -authorized(ApplyContext const& ctx, AccountID const& dst) +validDomain(ReadView const& view, uint256 domainID, AccountID const& subject) +{ + // Note, permissioned domain objects can be deleted at any time + auto const slePD = view.read(keylet::permissionedDomain(domainID)); + if (!slePD) + return tecOBJECT_NOT_FOUND; + + auto const closeTime = view.info().parentCloseTime; + bool foundExpired = false; + for (auto const& h : slePD->getFieldArray(sfAcceptedCredentials)) + { + auto const issuer = h.getAccountID(sfIssuer); + auto const type = h.getFieldVL(sfCredentialType); + auto const keyletCredential = + keylet::credential(subject, issuer, makeSlice(type)); + auto const sleCredential = view.read(keyletCredential); + + // We cannot delete expired credentials, that would require ApplyView& + // However we can check if credentials are expired. Expected transaction + // flow is to use `validDomain` in preclaim, converting tecEXPIRED to + // tesSUCCESS, then proceed to call `verifyValidDomain` in doApply. This + // allows expired credentials to be deleted by any transaction. + if (sleCredential) + { + if (checkExpired(sleCredential, closeTime)) + { + foundExpired = true; + continue; + } + else if (sleCredential->getFlags() & lsfAccepted) + return tesSUCCESS; + else + continue; + } + } + + return foundExpired ? tecEXPIRED : tecNO_AUTH; +} + +TER +authorizedDepositPreauth( + ApplyView const& view, + STVector256 const& credIDs, + AccountID const& dst) { - auto const& credIDs(ctx.tx.getFieldV256(sfCredentialIDs)); std::set> sorted; std::vector> lifeExtender; lifeExtender.reserve(credIDs.size()); for (auto const& h : credIDs) { - auto sleCred = ctx.view().read(keylet::credential(h)); - if (!sleCred) // already checked in preclaim - return tefINTERNAL; + auto sleCred = view.read(keylet::credential(h)); + if (!sleCred) // already checked in preclaim + return tefINTERNAL; // LCOV_EXCL_LINE auto [it, ins] = sorted.emplace((*sleCred)[sfIssuer], (*sleCred)[sfCredentialType]); if (!ins) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE lifeExtender.push_back(std::move(sleCred)); } - if (!ctx.view().exists(keylet::depositPreauth(dst, sorted))) - { - JLOG(ctx.journal.trace()) << "DepositPreauth doesn't exist"; + if (!view.exists(keylet::depositPreauth(dst, sorted))) return tecNO_PERMISSION; - } return tesSUCCESS; } @@ -273,6 +312,46 @@ checkArray(STArray const& credentials, unsigned maxSize, beast::Journal j) } // namespace credentials +TER +verifyValidDomain( + ApplyView& view, + AccountID const& account, + uint256 domainID, + beast::Journal j) +{ + auto const slePD = view.read(keylet::permissionedDomain(domainID)); + if (!slePD) + return tecOBJECT_NOT_FOUND; + + // Collect all matching credentials on a side, so we can remove expired ones + // We may finish the loop with this collection empty, it's fine. + STVector256 credentials; + for (auto const& h : slePD->getFieldArray(sfAcceptedCredentials)) + { + auto const issuer = h.getAccountID(sfIssuer); + auto const type = h.getFieldVL(sfCredentialType); + auto const keyletCredential = + keylet::credential(account, issuer, makeSlice(type)); + if (view.exists(keyletCredential)) + credentials.push_back(keyletCredential.key); + } + + // Result intentionally ignored. + [[maybe_unused]] bool _ = credentials::removeExpired(view, credentials, j); + + for (auto const& h : credentials) + { + auto sleCredential = view.read(keylet::credential(h)); + if (!sleCredential) + continue; // expired, i.e. deleted in credentials::removeExpired + + if (sleCredential->getFlags() & lsfAccepted) + return tesSUCCESS; + } + + return tecNO_PERMISSION; +} + TER verifyDepositPreauth( ApplyContext& ctx, @@ -289,7 +368,8 @@ verifyDepositPreauth( bool const credentialsPresent = ctx.tx.isFieldPresent(sfCredentialIDs); if (credentialsPresent && - credentials::removeExpired(ctx.view(), ctx.tx, ctx.journal)) + credentials::removeExpired( + ctx.view(), ctx.tx.getFieldV256(sfCredentialIDs), ctx.journal)) return tecEXPIRED; if (sleDst && (sleDst->getFlags() & lsfDepositAuth)) @@ -297,8 +377,12 @@ verifyDepositPreauth( if (src != dst) { if (!ctx.view().exists(keylet::depositPreauth(dst, src))) - return !credentialsPresent ? tecNO_PERMISSION - : credentials::authorized(ctx, dst); + return !credentialsPresent + ? tecNO_PERMISSION + : credentials::authorizedDepositPreauth( + ctx.view(), + ctx.tx.getFieldV256(sfCredentialIDs), + dst); } } diff --git a/src/xrpld/app/misc/CredentialHelpers.h b/src/xrpld/app/misc/CredentialHelpers.h index 8b52acf54e..162ddd6515 100644 --- a/src/xrpld/app/misc/CredentialHelpers.h +++ b/src/xrpld/app/misc/CredentialHelpers.h @@ -35,9 +35,9 @@ checkExpired( std::shared_ptr const& sleCredential, NetClock::time_point const& closed); -// Return true if at least 1 expired credentials was found(and deleted) +// Return true if any expired credential was found in arr (and deleted) bool -removeExpired(ApplyView& view, STTx const& tx, beast::Journal const j); +removeExpired(ApplyView& view, STVector256 const& arr, beast::Journal const j); // Actually remove a credentials object from the ledger TER @@ -50,14 +50,25 @@ deleteSLE( NotTEC checkFields(PreflightContext const& ctx); -// Accessing the ledger to check if provided credentials are valid +// Accessing the ledger to check if provided credentials are valid. Do not use +// in doApply (only in preclaim) since it does not remove expired credentials. +// If you call it in prelaim, you also must call verifyDepositPreauth in doApply TER valid(PreclaimContext const& ctx, AccountID const& src); -// This function is only called when we about to return tecNO_PERMISSION because -// all the checks for the DepositPreauth authorization failed. +// Check if subject has any credential maching the given domain. If you call it +// in preclaim and it returns tecEXPIRED, you should call verifyValidDomain in +// doApply. This will ensure that expired credentials are deleted. TER -authorized(ApplyContext const& ctx, AccountID const& dst); +validDomain(ReadView const& view, uint256 domainID, AccountID const& subject); + +// This function is only called when we about to return tecNO_PERMISSION +// because all the checks for the DepositPreauth authorization failed. +TER +authorizedDepositPreauth( + ApplyView const& view, + STVector256 const& ctx, + AccountID const& dst); // Sort credentials array, return empty set if there are duplicates std::set> @@ -70,6 +81,15 @@ checkArray(STArray const& credentials, unsigned maxSize, beast::Journal j); } // namespace credentials +// Check expired credentials and for credentials maching DomainID of the ledger +// object +TER +verifyValidDomain( + ApplyView& view, + AccountID const& account, + uint256 domainID, + beast::Journal j); + // Check expired credentials and for existing DepositPreauth ledger object TER verifyDepositPreauth( diff --git a/src/xrpld/app/misc/NetworkOPs.cpp b/src/xrpld/app/misc/NetworkOPs.cpp index 6f29f79384..d87dea3c52 100644 --- a/src/xrpld/app/misc/NetworkOPs.cpp +++ b/src/xrpld/app/misc/NetworkOPs.cpp @@ -3711,7 +3711,7 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo) { auto stx = tx->getSTransaction(); if (stx->getAccountID(sfAccount) == accountId && - stx->getSeqProxy().value() == 1) + stx->getSeqValue() == 1) return true; } diff --git a/src/xrpld/app/tx/detail/AMMCreate.cpp b/src/xrpld/app/tx/detail/AMMCreate.cpp index deafa6f27a..95cb5bf2e6 100644 --- a/src/xrpld/app/tx/detail/AMMCreate.cpp +++ b/src/xrpld/app/tx/detail/AMMCreate.cpp @@ -183,6 +183,14 @@ AMMCreate::preclaim(PreclaimContext const& ctx) return tecAMM_INVALID_TOKENS; } + if (ctx.view.rules().enabled(featureSingleAssetVault)) + { + if (auto const accountId = pseudoAccountAddress( + ctx.view, keylet::amm(amount.issue(), amount2.issue()).key); + accountId == beast::zero) + return terADDRESS_COLLISION; + } + // If featureAMMClawback is enabled, allow AMMCreate without checking // if the issuer has clawback enabled if (ctx.view.rules().enabled(featureAMMClawback)) @@ -219,64 +227,37 @@ applyCreate( auto const ammKeylet = keylet::amm(amount.issue(), amount2.issue()); // Mitigate same account exists possibility - auto const ammAccount = [&]() -> Expected { - std::uint16_t constexpr maxAccountAttempts = 256; - for (auto p = 0; p < maxAccountAttempts; ++p) - { - auto const ammAccount = - ammAccountID(p, sb.info().parentHash, ammKeylet.key); - if (!sb.read(keylet::account(ammAccount))) - return ammAccount; - } - return Unexpected(tecDUPLICATE); - }(); - + auto const maybeAccount = createPseudoAccount(sb, ammKeylet.key, sfAMMID); // AMM account already exists (should not happen) - if (!ammAccount) + if (!maybeAccount) { - JLOG(j_.error()) << "AMM Instance: AMM already exists."; - return {ammAccount.error(), false}; + JLOG(j_.error()) << "AMM Instance: failed to create pseudo account."; + return {maybeAccount.error(), false}; } + auto& account = *maybeAccount; + auto const accountId = (*account)[sfAccount]; // LP Token already exists. (should not happen) auto const lptIss = ammLPTIssue( - amount.issue().currency, amount2.issue().currency, *ammAccount); - if (sb.read(keylet::line(*ammAccount, lptIss))) + amount.issue().currency, amount2.issue().currency, accountId); + if (sb.read(keylet::line(accountId, lptIss))) { JLOG(j_.error()) << "AMM Instance: LP Token already exists."; return {tecDUPLICATE, false}; } - // Create AMM Root Account. - auto sleAMMRoot = std::make_shared(keylet::account(*ammAccount)); - sleAMMRoot->setAccountID(sfAccount, *ammAccount); - sleAMMRoot->setFieldAmount(sfBalance, STAmount{}); - std::uint32_t const seqno{ - ctx_.view().rules().enabled(featureDeletableAccounts) - ? ctx_.view().seq() - : 1}; - sleAMMRoot->setFieldU32(sfSequence, seqno); - // Ignore reserves requirement, disable the master key, allow default - // rippling (AMM LPToken can be used in payments and offer crossing but - // not as a token in another AMM), and enable deposit authorization to - // prevent payments into AMM. // Note, that the trustlines created by AMM have 0 credit limit. // This prevents shifting the balance between accounts via AMM, // or sending unsolicited LPTokens. This is a desired behavior. // A user can only receive LPTokens through affirmative action - // either an AMMDeposit, TrustSet, crossing an offer, etc. - sleAMMRoot->setFieldU32( - sfFlags, lsfDisableMaster | lsfDefaultRipple | lsfDepositAuth); - // Link the root account and AMM object - sleAMMRoot->setFieldH256(sfAMMID, ammKeylet.key); - sb.insert(sleAMMRoot); // Calculate initial LPT balance. auto const lpTokens = ammLPTokens(amount, amount2, lptIss); // Create ltAMM auto ammSle = std::make_shared(ammKeylet); - ammSle->setAccountID(sfAccount, *ammAccount); + ammSle->setAccountID(sfAccount, accountId); ammSle->setFieldAmount(sfLPTokenBalance, lpTokens); auto const& [issue1, issue2] = std::minmax(amount.issue(), amount2.issue()); ammSle->setFieldIssue(sfAsset, STIssue{sfAsset, issue1}); @@ -286,22 +267,15 @@ applyCreate( ctx_.view(), ammSle, account_, lptIss, ctx_.tx[sfTradingFee]); // Add owner directory to link the root account and AMM object. - if (auto const page = sb.dirInsert( - keylet::ownerDir(*ammAccount), - ammSle->key(), - describeOwnerDir(*ammAccount))) - { - ammSle->setFieldU64(sfOwnerNode, *page); - } - else + if (auto ter = dirLink(sb, accountId, ammSle); ter) { JLOG(j_.debug()) << "AMM Instance: failed to insert owner dir"; - return {tecDIR_FULL, false}; + return {ter, false}; } sb.insert(ammSle); // Send LPT to LP. - auto res = accountSend(sb, *ammAccount, account_, lpTokens, ctx_.journal); + auto res = accountSend(sb, accountId, account_, lpTokens, ctx_.journal); if (res != tesSUCCESS) { JLOG(j_.debug()) << "AMM Instance: failed to send LPT " << lpTokens; @@ -312,7 +286,7 @@ applyCreate( if (auto const res = accountSend( sb, account_, - *ammAccount, + accountId, amount, ctx_.journal, WaiveTransferFee::Yes)) @@ -321,7 +295,7 @@ applyCreate( if (!isXRP(amount)) { if (SLE::pointer sleRippleState = - sb.peek(keylet::line(*ammAccount, amount.issue())); + sb.peek(keylet::line(accountId, amount.issue())); !sleRippleState) return tecINTERNAL; else @@ -350,7 +324,7 @@ applyCreate( return {res, false}; } - JLOG(j_.debug()) << "AMM Instance: success " << *ammAccount << " " + JLOG(j_.debug()) << "AMM Instance: success " << accountId << " " << ammKeylet.key << " " << lpTokens << " " << amount << " " << amount2; auto addOrderBook = diff --git a/src/xrpld/app/tx/detail/CashCheck.cpp b/src/xrpld/app/tx/detail/CashCheck.cpp index 468adbd209..cccda83a68 100644 --- a/src/xrpld/app/tx/detail/CashCheck.cpp +++ b/src/xrpld/app/tx/detail/CashCheck.cpp @@ -212,7 +212,7 @@ CashCheck::preclaim(PreclaimContext const& ctx) if (!sleTrustLine) { // We can only create a trust line if the issuer does not - // have requireAuth set. + // have lsfRequireAuth set. return tecNO_AUTH; } diff --git a/src/xrpld/app/tx/detail/Clawback.cpp b/src/xrpld/app/tx/detail/Clawback.cpp index e58faf2286..41ab1256fb 100644 --- a/src/xrpld/app/tx/detail/Clawback.cpp +++ b/src/xrpld/app/tx/detail/Clawback.cpp @@ -207,7 +207,12 @@ Clawback::preclaim(PreclaimContext const& ctx) if (!sleIssuer || !sleHolder) return terNO_ACCOUNT; - if (sleHolder->isFieldPresent(sfAMMID)) + // Note the order of checks - when SAV is active, this check here will make + // the one which follows `sleHolder->isFieldPresent(sfAMMID)` redundant. + if (ctx.view.rules().enabled(featureSingleAssetVault) && + isPseudoAccount(sleHolder)) + return tecPSEUDO_ACCOUNT; + else if (sleHolder->isFieldPresent(sfAMMID)) return tecAMM_ACCOUNT; return std::visit( diff --git a/src/xrpld/app/tx/detail/CreateCheck.cpp b/src/xrpld/app/tx/detail/CreateCheck.cpp index 19ef28b843..9baceef944 100644 --- a/src/xrpld/app/tx/detail/CreateCheck.cpp +++ b/src/xrpld/app/tx/detail/CreateCheck.cpp @@ -97,8 +97,11 @@ CreateCheck::preclaim(PreclaimContext const& ctx) (flags & lsfDisallowIncomingCheck)) return tecNO_PERMISSION; - // AMM can not cash the check - if (sleDst->isFieldPresent(sfAMMID)) + // Pseudo-accounts cannot cash checks. Note, this is not amendment-gated + // because all writes to pseudo-account discriminator fields **are** + // amendment gated, hence the behaviour of this check will always match the + // currently active amendments. + if (isPseudoAccount(sleDst)) return tecNO_PERMISSION; if ((flags & lsfRequireDestTag) && !ctx.tx.isFieldPresent(sfDestinationTag)) @@ -184,7 +187,7 @@ CreateCheck::doApply() // Note that we use the value from the sequence or ticket as the // Check sequence. For more explanation see comments in SeqProxy.h. - std::uint32_t const seq = ctx_.tx.getSeqProxy().value(); + std::uint32_t const seq = ctx_.tx.getSeqValue(); Keylet const checkKeylet = keylet::check(account_, seq); auto sleCheck = std::make_shared(checkKeylet); diff --git a/src/xrpld/app/tx/detail/CreateOffer.cpp b/src/xrpld/app/tx/detail/CreateOffer.cpp index 92ba54f077..d9bd57ec3c 100644 --- a/src/xrpld/app/tx/detail/CreateOffer.cpp +++ b/src/xrpld/app/tx/detail/CreateOffer.cpp @@ -969,7 +969,7 @@ CreateOffer::applyGuts(Sandbox& sb, Sandbox& sbCancel) // Note that we we use the value from the sequence or ticket as the // offer sequence. For more explanation see comments in SeqProxy.h. - auto const offerSequence = ctx_.tx.getSeqProxy().value(); + auto const offerSequence = ctx_.tx.getSeqValue(); // This is the original rate of the offer, and is the rate at which // it will be placed, even if crossing offers change the amounts that diff --git a/src/xrpld/app/tx/detail/Escrow.cpp b/src/xrpld/app/tx/detail/Escrow.cpp index bc9ad0a11f..0b58957fcf 100644 --- a/src/xrpld/app/tx/detail/Escrow.cpp +++ b/src/xrpld/app/tx/detail/Escrow.cpp @@ -148,7 +148,12 @@ EscrowCreate::preclaim(PreclaimContext const& ctx) auto const sled = ctx.view.read(keylet::account(ctx.tx[sfDestination])); if (!sled) return tecNO_DST; - if (sled->isFieldPresent(sfAMMID)) + + // Pseudo-accounts cannot receive escrow. Note, this is not amendment-gated + // because all writes to pseudo-account discriminator fields **are** + // amendment gated, hence the behaviour of this check will always match the + // currently active amendments. + if (isPseudoAccount(sled)) return tecNO_PERMISSION; return tesSUCCESS; @@ -228,8 +233,7 @@ EscrowCreate::doApply() // Create escrow in ledger. Note that we we use the value from the // sequence or ticket. For more explanation see comments in SeqProxy.h. - Keylet const escrowKeylet = - keylet::escrow(account, ctx_.tx.getSeqProxy().value()); + Keylet const escrowKeylet = keylet::escrow(account, ctx_.tx.getSeqValue()); auto const slep = std::make_shared(escrowKeylet); (*slep)[sfAmount] = ctx_.tx[sfAmount]; (*slep)[sfAccount] = account; diff --git a/src/xrpld/app/tx/detail/InvariantCheck.cpp b/src/xrpld/app/tx/detail/InvariantCheck.cpp index 2441cb040a..aa1464ec2a 100644 --- a/src/xrpld/app/tx/detail/InvariantCheck.cpp +++ b/src/xrpld/app/tx/detail/InvariantCheck.cpp @@ -329,7 +329,8 @@ AccountRootsNotDeleted::finalize( // A successful AccountDelete or AMMDelete MUST delete exactly // one account root. if ((tx.getTxnType() == ttACCOUNT_DELETE || - tx.getTxnType() == ttAMM_DELETE) && + tx.getTxnType() == ttAMM_DELETE || + tx.getTxnType() == ttVAULT_DELETE) && result == tesSUCCESS) { if (accountsDeleted_ == 1) @@ -490,6 +491,7 @@ LedgerEntryTypesMatch::visitEntry( case ltMPTOKEN: case ltCREDENTIAL: case ltPERMISSIONED_DOMAIN: + case ltVAULT: break; default: invalidTypeAdded_ = true; @@ -884,6 +886,8 @@ ValidNewAccountRoot::visitEntry( { accountsCreated_++; accountSeq_ = (*after)[sfSequence]; + pseudoAccount_ = isPseudoAccount(after); + flags_ = after->getFlags(); } } @@ -907,12 +911,28 @@ ValidNewAccountRoot::finalize( // From this point on we know exactly one account was created. if ((tx.getTxnType() == ttPAYMENT || tx.getTxnType() == ttAMM_CREATE || + tx.getTxnType() == ttVAULT_CREATE || tx.getTxnType() == ttXCHAIN_ADD_CLAIM_ATTESTATION || tx.getTxnType() == ttXCHAIN_ADD_ACCOUNT_CREATE_ATTESTATION) && result == tesSUCCESS) { - std::uint32_t const startingSeq{ - view.rules().enabled(featureDeletableAccounts) ? view.seq() : 1}; + bool const pseudoAccount = + (pseudoAccount_ && view.rules().enabled(featureSingleAssetVault)); + + if (pseudoAccount && tx.getTxnType() != ttAMM_CREATE && + tx.getTxnType() != ttVAULT_CREATE) + { + JLOG(j.fatal()) << "Invariant failed: pseudo-account created by a " + "wrong transaction type"; + return false; + } + + std::uint32_t const startingSeq = // + pseudoAccount // + ? 0 // + : view.rules().enabled(featureDeletableAccounts) // + ? view.seq() // + : 1; if (accountSeq_ != startingSeq) { @@ -920,12 +940,24 @@ ValidNewAccountRoot::finalize( "wrong starting sequence number"; return false; } + + if (pseudoAccount) + { + std::uint32_t const expected = + (lsfDisableMaster | lsfDefaultRipple | lsfDepositAuth); + if (flags_ != expected) + { + JLOG(j.fatal()) + << "Invariant failed: pseudo-account created with " + "wrong flags"; + return false; + } + } + return true; } - JLOG(j.fatal()) << "Invariant failed: account root created " - "by a non-Payment, by an unsuccessful transaction, " - "or by AMM"; + JLOG(j.fatal()) << "Invariant failed: account root created illegally"; return false; } @@ -1313,28 +1345,30 @@ ValidMPTIssuance::finalize( { if (result == tesSUCCESS) { - if (tx.getTxnType() == ttMPTOKEN_ISSUANCE_CREATE) + if (tx.getTxnType() == ttMPTOKEN_ISSUANCE_CREATE || + tx.getTxnType() == ttVAULT_CREATE) { if (mptIssuancesCreated_ == 0) { - JLOG(j.fatal()) << "Invariant failed: MPT issuance creation " + JLOG(j.fatal()) << "Invariant failed: transaction " "succeeded without creating a MPT issuance"; } else if (mptIssuancesDeleted_ != 0) { - JLOG(j.fatal()) << "Invariant failed: MPT issuance creation " + JLOG(j.fatal()) << "Invariant failed: transaction " "succeeded while removing MPT issuances"; } else if (mptIssuancesCreated_ > 1) { - JLOG(j.fatal()) << "Invariant failed: MPT issuance creation " + JLOG(j.fatal()) << "Invariant failed: transaction " "succeeded but created multiple issuances"; } return mptIssuancesCreated_ == 1 && mptIssuancesDeleted_ == 0; } - if (tx.getTxnType() == ttMPTOKEN_ISSUANCE_DESTROY) + if (tx.getTxnType() == ttMPTOKEN_ISSUANCE_DESTROY || + tx.getTxnType() == ttVAULT_DELETE) { if (mptIssuancesDeleted_ == 0) { @@ -1355,7 +1389,8 @@ ValidMPTIssuance::finalize( return mptIssuancesCreated_ == 0 && mptIssuancesDeleted_ == 1; } - if (tx.getTxnType() == ttMPTOKEN_AUTHORIZE) + if (tx.getTxnType() == ttMPTOKEN_AUTHORIZE || + tx.getTxnType() == ttVAULT_DEPOSIT) { bool const submittedByIssuer = tx.isFieldPresent(sfHolder); @@ -1381,7 +1416,7 @@ ValidMPTIssuance::finalize( return false; } else if ( - !submittedByIssuer && + !submittedByIssuer && (tx.getTxnType() != ttVAULT_DEPOSIT) && (mptokensCreated_ + mptokensDeleted_ != 1)) { // if the holder submitted this tx, then a mptoken must be diff --git a/src/xrpld/app/tx/detail/InvariantCheck.h b/src/xrpld/app/tx/detail/InvariantCheck.h index cb06b0fb05..6819780114 100644 --- a/src/xrpld/app/tx/detail/InvariantCheck.h +++ b/src/xrpld/app/tx/detail/InvariantCheck.h @@ -438,6 +438,8 @@ class ValidNewAccountRoot { std::uint32_t accountsCreated_ = 0; std::uint32_t accountSeq_ = 0; + bool pseudoAccount_ = false; + std::uint32_t flags_ = 0; public: void diff --git a/src/xrpld/app/tx/detail/MPTokenAuthorize.cpp b/src/xrpld/app/tx/detail/MPTokenAuthorize.cpp index cfc098ab0f..748c05869f 100644 --- a/src/xrpld/app/tx/detail/MPTokenAuthorize.cpp +++ b/src/xrpld/app/tx/detail/MPTokenAuthorize.cpp @@ -83,6 +83,10 @@ MPTokenAuthorize::preclaim(PreclaimContext const& ctx) return tecHAS_OBLIGATIONS; } + if (ctx.view.rules().enabled(featureSingleAssetVault) && + sleMpt->isFlag(lsfMPTLocked)) + return tecNO_PERMISSION; + return tesSUCCESS; } @@ -160,14 +164,14 @@ MPTokenAuthorize::authorize( keylet::mptoken(args.mptIssuanceID, args.account); auto const sleMpt = view.peek(mptokenKey); if (!sleMpt || (*sleMpt)[sfMPTAmount] != 0) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE if (!view.dirRemove( keylet::ownerDir(args.account), (*sleMpt)[sfOwnerNode], sleMpt->key(), false)) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE adjustOwnerCount(view, sleAcct, -1, journal); @@ -194,20 +198,13 @@ MPTokenAuthorize::authorize( auto const mptokenKey = keylet::mptoken(args.mptIssuanceID, args.account); - - auto const ownerNode = view.dirInsert( - keylet::ownerDir(args.account), - mptokenKey, - describeOwnerDir(args.account)); - - if (!ownerNode) - return tecDIR_FULL; - auto mptoken = std::make_shared(mptokenKey); + if (auto ter = dirLink(view, args.account, mptoken)) + return ter; // LCOV_EXCL_LINE + (*mptoken)[sfAccount] = args.account; (*mptoken)[sfMPTokenIssuanceID] = args.mptIssuanceID; (*mptoken)[sfFlags] = 0; - (*mptoken)[sfOwnerNode] = *ownerNode; view.insert(mptoken); // Update owner count. diff --git a/src/xrpld/app/tx/detail/MPTokenAuthorize.h b/src/xrpld/app/tx/detail/MPTokenAuthorize.h index 79dc1734b5..e2b135a22a 100644 --- a/src/xrpld/app/tx/detail/MPTokenAuthorize.h +++ b/src/xrpld/app/tx/detail/MPTokenAuthorize.h @@ -27,10 +27,10 @@ namespace ripple { struct MPTAuthorizeArgs { XRPAmount const& priorBalance; - uint192 const& mptIssuanceID; + MPTID const& mptIssuanceID; AccountID const& account; - std::uint32_t flags; - std::optional holderID; + std::uint32_t flags{}; + std::optional holderID{}; }; class MPTokenAuthorize : public Transactor diff --git a/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.cpp b/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.cpp index 1bd3fcadd7..1b96b27f24 100644 --- a/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.cpp +++ b/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.cpp @@ -67,7 +67,7 @@ MPTokenIssuanceCreate::preflight(PreflightContext const& ctx) return preflight2(ctx); } -TER +Expected MPTokenIssuanceCreate::create( ApplyView& view, beast::Journal journal, @@ -75,14 +75,15 @@ MPTokenIssuanceCreate::create( { auto const acct = view.peek(keylet::account(args.account)); if (!acct) - return tecINTERNAL; + return Unexpected(tecINTERNAL); // LCOV_EXCL_LINE - if (args.priorBalance < - view.fees().accountReserve((*acct)[sfOwnerCount] + 1)) - return tecINSUFFICIENT_RESERVE; + if (args.priorBalance && + *(args.priorBalance) < + view.fees().accountReserve((*acct)[sfOwnerCount] + 1)) + return Unexpected(tecINSUFFICIENT_RESERVE); - auto const mptIssuanceKeylet = - keylet::mptIssuance(args.sequence, args.account); + auto const mptId = makeMptID(args.sequence, args.account); + auto const mptIssuanceKeylet = keylet::mptIssuance(mptId); // create the MPTokenIssuance { @@ -92,7 +93,7 @@ MPTokenIssuanceCreate::create( describeOwnerDir(args.account)); if (!ownerNode) - return tecDIR_FULL; + return Unexpected(tecDIR_FULL); // LCOV_EXCL_LINE auto mptIssuance = std::make_shared(mptIssuanceKeylet); (*mptIssuance)[sfFlags] = args.flags & ~tfUniversal; @@ -113,30 +114,36 @@ MPTokenIssuanceCreate::create( if (args.metadata) (*mptIssuance)[sfMPTokenMetadata] = *args.metadata; + if (args.domainId) + (*mptIssuance)[sfDomainID] = *args.domainId; + view.insert(mptIssuance); } // Update owner count. adjustOwnerCount(view, acct, 1, journal); - return tesSUCCESS; + return mptId; } TER MPTokenIssuanceCreate::doApply() { auto const& tx = ctx_.tx; - return create( - ctx_.view(), - ctx_.journal, - {.priorBalance = mPriorBalance, - .account = account_, - .sequence = tx.getSeqProxy().value(), - .flags = tx.getFlags(), - .maxAmount = tx[~sfMaximumAmount], - .assetScale = tx[~sfAssetScale], - .transferFee = tx[~sfTransferFee], - .metadata = tx[~sfMPTokenMetadata]}); + auto const result = create( + view(), + j_, + { + .priorBalance = mPriorBalance, + .account = account_, + .sequence = tx.getSeqValue(), + .flags = tx.getFlags(), + .maxAmount = tx[~sfMaximumAmount], + .assetScale = tx[~sfAssetScale], + .transferFee = tx[~sfTransferFee], + .metadata = tx[~sfMPTokenMetadata], + }); + return result ? tesSUCCESS : result.error(); } } // namespace ripple diff --git a/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.h b/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.h index 1346c3e31d..ea01908dff 100644 --- a/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.h +++ b/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.h @@ -22,18 +22,22 @@ #include +#include +#include + namespace ripple { struct MPTCreateArgs { - XRPAmount const& priorBalance; + std::optional priorBalance; AccountID const& account; std::uint32_t sequence; - std::uint32_t flags; - std::optional maxAmount; - std::optional assetScale; - std::optional transferFee; - std::optional const& metadata; + std::uint32_t flags = 0; + std::optional maxAmount{}; + std::optional assetScale{}; + std::optional transferFee{}; + std::optional const& metadata{}; + std::optional domainId{}; }; class MPTokenIssuanceCreate : public Transactor @@ -51,7 +55,7 @@ public: TER doApply() override; - static TER + static Expected create(ApplyView& view, beast::Journal journal, MPTCreateArgs const& args); }; diff --git a/src/xrpld/app/tx/detail/MPTokenIssuanceDestroy.cpp b/src/xrpld/app/tx/detail/MPTokenIssuanceDestroy.cpp index ed5d3c4f96..d06ea3473e 100644 --- a/src/xrpld/app/tx/detail/MPTokenIssuanceDestroy.cpp +++ b/src/xrpld/app/tx/detail/MPTokenIssuanceDestroy.cpp @@ -55,7 +55,7 @@ MPTokenIssuanceDestroy::preclaim(PreclaimContext const& ctx) return tecNO_PERMISSION; // ensure it has no outstanding balances - if ((*sleMPT)[~sfOutstandingAmount] != 0) + if ((*sleMPT)[sfOutstandingAmount] != 0) return tecHAS_OBLIGATIONS; return tesSUCCESS; diff --git a/src/xrpld/app/tx/detail/PayChan.cpp b/src/xrpld/app/tx/detail/PayChan.cpp index 25cdd0e69a..a42902f6ac 100644 --- a/src/xrpld/app/tx/detail/PayChan.cpp +++ b/src/xrpld/app/tx/detail/PayChan.cpp @@ -237,7 +237,13 @@ PayChanCreate::preclaim(PreclaimContext const& ctx) (flags & lsfDisallowXRP)) return tecNO_TARGET; - if (sled->isFieldPresent(sfAMMID)) + // Pseudo-accounts cannot receive payment channels, other than native + // to their underlying ledger object - implemented in their respective + // transaction types. Note, this is not amendment-gated because all + // writes to pseudo-account discriminator fields **are** amendment + // gated, hence the behaviour of this check will always match the + // currently active amendments. + if (isPseudoAccount(sled)) return tecNO_PERMISSION; } @@ -266,7 +272,7 @@ PayChanCreate::doApply() // Note that we we use the value from the sequence or ticket as the // payChan sequence. For more explanation see comments in SeqProxy.h. Keylet const payChanKeylet = - keylet::payChan(account, dst, ctx_.tx.getSeqProxy().value()); + keylet::payChan(account, dst, ctx_.tx.getSeqValue()); auto const slep = std::make_shared(payChanKeylet); // Funds held in this channel diff --git a/src/xrpld/app/tx/detail/Payment.cpp b/src/xrpld/app/tx/detail/Payment.cpp index f2f4ac4f7c..a97e472841 100644 --- a/src/xrpld/app/tx/detail/Payment.cpp +++ b/src/xrpld/app/tx/detail/Payment.cpp @@ -520,8 +520,7 @@ Payment::doApply() // - can't send between holders // - holder can send back to issuer // - issuer can send to holder - if (isFrozen(view(), account_, mptIssue) || - isFrozen(view(), dstAccountID, mptIssue)) + if (isAnyFrozen(view(), {account_, dstAccountID}, mptIssue)) return tecLOCKED; // Get the rate for a payment between the holders. @@ -591,9 +590,12 @@ Payment::doApply() return tecUNFUNDED_PAYMENT; } - // AMMs can never receive an XRP payment. - // Must use AMMDeposit transaction instead. - if (sleDst->isFieldPresent(sfAMMID)) + // Pseudo-accounts cannot receive payments, other than these native to + // their underlying ledger object - implemented in their respective + // transaction types. Note, this is not amendment-gated because all writes + // to pseudo-account discriminator fields **are** amendment gated, hence the + // behaviour of this check will always match the active amendments. + if (isPseudoAccount(sleDst)) return tecNO_PERMISSION; // The source account does have enough money. Make sure the diff --git a/src/xrpld/app/tx/detail/SetTrust.cpp b/src/xrpld/app/tx/detail/SetTrust.cpp index 9fe267b8e1..5e83c201fa 100644 --- a/src/xrpld/app/tx/detail/SetTrust.cpp +++ b/src/xrpld/app/tx/detail/SetTrust.cpp @@ -26,6 +26,8 @@ #include #include #include +#include +#include namespace { @@ -241,14 +243,16 @@ SetTrust::preclaim(PreclaimContext const& ctx) // This might be nullptr auto const sleDst = ctx.view.read(keylet::account(uDstAccountID)); + if ((ctx.view.rules().enabled(featureDisallowIncoming) || + ammEnabled(ctx.view.rules()) || + ctx.view.rules().enabled(featureSingleAssetVault)) && + sleDst == nullptr) + return tecNO_DST; // If the destination has opted to disallow incoming trustlines // then honour that flag if (ctx.view.rules().enabled(featureDisallowIncoming)) { - if (!sleDst) - return tecNO_DST; - if (sleDst->getFlags() & lsfDisallowIncomingTrustline) { // The original implementation of featureDisallowIncoming was @@ -266,18 +270,22 @@ SetTrust::preclaim(PreclaimContext const& ctx) } } - // If destination is AMM and the trustline doesn't exist then only - // allow SetTrust if the asset is AMM LP token and AMM is not - // in empty state. - if (ammEnabled(ctx.view.rules())) + // In general, trust lines to pseudo accounts are not permitted, unless + // enabled in the code section below, for specific cases. This block is not + // amendment-gated because sleDst will not have a pseudo-account designator + // field populated, unless the appropriate amendment was already enabled. + if (sleDst && isPseudoAccount(sleDst)) { - if (!sleDst) - return tecNO_DST; - - if (sleDst->isFieldPresent(sfAMMID) && - !ctx.view.read(keylet::line(id, uDstAccountID, currency))) + // If destination is AMM and the trustline doesn't exist then only allow + // SetTrust if the asset is AMM LP token and AMM is not in empty state. + if (sleDst->isFieldPresent(sfAMMID)) { - if (auto const ammSle = + if (ctx.view.exists(keylet::line(id, uDstAccountID, currency))) + { + // pass + } + else if ( + auto const ammSle = ctx.view.read({ltAMM, sleDst->getFieldH256(sfAMMID)})) { if (auto const lpTokens = @@ -288,8 +296,16 @@ SetTrust::preclaim(PreclaimContext const& ctx) return tecNO_PERMISSION; } else - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE } + else if (sleDst->isFieldPresent(sfVaultID)) + { + if (!ctx.view.exists(keylet::line(id, uDstAccountID, currency))) + return tecNO_PERMISSION; + // else pass + } + else + return tecPSEUDO_ACCOUNT; } // Checking all freeze/deep freeze flag invariants. diff --git a/src/xrpld/app/tx/detail/VaultClawback.cpp b/src/xrpld/app/tx/detail/VaultClawback.cpp new file mode 100644 index 0000000000..f9bd0c7629 --- /dev/null +++ b/src/xrpld/app/tx/detail/VaultClawback.cpp @@ -0,0 +1,239 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { + +NotTEC +VaultClawback::preflight(PreflightContext const& ctx) +{ + if (!ctx.rules.enabled(featureSingleAssetVault)) + return temDISABLED; + + if (auto const ter = preflight1(ctx)) + return ter; + + if (ctx.tx.getFlags() & tfUniversalMask) + return temINVALID_FLAG; + + if (ctx.tx[sfVaultID] == beast::zero) + { + JLOG(ctx.j.debug()) << "VaultClawback: zero/empty vault ID."; + return temMALFORMED; + } + + AccountID const issuer = ctx.tx[sfAccount]; + AccountID const holder = ctx.tx[sfHolder]; + + if (issuer == holder) + { + JLOG(ctx.j.debug()) << "VaultClawback: issuer cannot be holder."; + return temMALFORMED; + } + + auto const amount = ctx.tx[~sfAmount]; + if (amount) + { + // Note, zero amount is valid, it means "all". It is also the default. + if (*amount < beast::zero) + return temBAD_AMOUNT; + else if (isXRP(amount->asset())) + { + JLOG(ctx.j.debug()) << "VaultClawback: cannot clawback XRP."; + return temMALFORMED; + } + else if (amount->asset().getIssuer() != issuer) + { + JLOG(ctx.j.debug()) + << "VaultClawback: only asset issuer can clawback."; + return temMALFORMED; + } + } + + return preflight2(ctx); +} + +TER +VaultClawback::preclaim(PreclaimContext const& ctx) +{ + auto const vault = ctx.view.read(keylet::vault(ctx.tx[sfVaultID])); + if (!vault) + return tecNO_ENTRY; + + auto account = ctx.tx[sfAccount]; + auto const issuer = ctx.view.read(keylet::account(account)); + if (!issuer) + { + // LCOV_EXCL_START + JLOG(ctx.j.error()) << "VaultClawback: missing issuer account."; + return tefINTERNAL; + // LCOV_EXCL_STOP + } + + Asset const vaultAsset = vault->at(sfAsset); + if (auto const amount = ctx.tx[~sfAmount]; + amount && vaultAsset != amount->asset()) + return tecWRONG_ASSET; + + if (vaultAsset.native()) + { + JLOG(ctx.j.debug()) << "VaultClawback: cannot clawback XRP."; + return tecNO_PERMISSION; // Cannot clawback XRP. + } + else if (vaultAsset.getIssuer() != account) + { + JLOG(ctx.j.debug()) << "VaultClawback: only asset issuer can clawback."; + return tecNO_PERMISSION; // Only issuers can clawback. + } + + if (vaultAsset.holds()) + { + auto const mpt = vaultAsset.get(); + auto const mptIssue = + ctx.view.read(keylet::mptIssuance(mpt.getMptID())); + if (mptIssue == nullptr) + return tecOBJECT_NOT_FOUND; + + std::uint32_t const issueFlags = mptIssue->getFieldU32(sfFlags); + if (!(issueFlags & lsfMPTCanClawback)) + { + JLOG(ctx.j.debug()) + << "VaultClawback: cannot clawback MPT vault asset."; + return tecNO_PERMISSION; + } + } + else if (vaultAsset.holds()) + { + std::uint32_t const issuerFlags = issuer->getFieldU32(sfFlags); + if (!(issuerFlags & lsfAllowTrustLineClawback) || + (issuerFlags & lsfNoFreeze)) + { + JLOG(ctx.j.debug()) + << "VaultClawback: cannot clawback IOU vault asset."; + return tecNO_PERMISSION; + } + } + + return tesSUCCESS; +} + +TER +VaultClawback::doApply() +{ + auto const& tx = ctx_.tx; + auto const vault = view().peek(keylet::vault(tx[sfVaultID])); + if (!vault) + return tefINTERNAL; // LCOV_EXCL_LINE + + auto const mptIssuanceID = (*vault)[sfShareMPTID]; + auto const sleIssuance = view().read(keylet::mptIssuance(mptIssuanceID)); + if (!sleIssuance) + { + // LCOV_EXCL_START + JLOG(j_.error()) << "VaultClawback: missing issuance of vault shares."; + return tefINTERNAL; + // LCOV_EXCL_STOP + } + + Asset const asset = vault->at(sfAsset); + STAmount const amount = [&]() -> STAmount { + auto const maybeAmount = tx[~sfAmount]; + if (maybeAmount) + return *maybeAmount; + return {sfAmount, asset, 0}; + }(); + XRPL_ASSERT( + amount.asset() == asset, + "ripple::VaultClawback::doApply : matching asset"); + + AccountID holder = tx[sfHolder]; + STAmount assets, shares; + if (amount == beast::zero) + { + Asset share = *(*vault)[sfShareMPTID]; + shares = accountHolds( + view(), + holder, + share, + FreezeHandling::fhIGNORE_FREEZE, + AuthHandling::ahIGNORE_AUTH, + j_); + assets = sharesToAssetsWithdraw(vault, sleIssuance, shares); + } + else + { + assets = amount; + shares = assetsToSharesWithdraw(vault, sleIssuance, assets); + } + + // Clamp to maximum. + Number maxAssets = *vault->at(sfAssetsAvailable); + if (assets > maxAssets) + { + assets = maxAssets; + shares = assetsToSharesWithdraw(vault, sleIssuance, assets); + } + + if (shares == beast::zero) + return tecINSUFFICIENT_FUNDS; + + vault->at(sfAssetsTotal) -= assets; + vault->at(sfAssetsAvailable) -= assets; + view().update(vault); + + auto const& vaultAccount = vault->at(sfAccount); + // Transfer shares from holder to vault. + if (auto ter = accountSend( + view(), holder, vaultAccount, shares, j_, WaiveTransferFee::Yes)) + return ter; + + // Transfer assets from vault to issuer. + if (auto ter = accountSend( + view(), vaultAccount, account_, assets, j_, WaiveTransferFee::Yes)) + return ter; + + // Sanity check + if (accountHolds( + view(), + vaultAccount, + assets.asset(), + FreezeHandling::fhIGNORE_FREEZE, + AuthHandling::ahIGNORE_AUTH, + j_) < beast::zero) + { + // LCOV_EXCL_START + JLOG(j_.error()) << "VaultClawback: negative balance of vault assets."; + return tefINTERNAL; + // LCOV_EXCL_STOP + } + + return tesSUCCESS; +} + +} // namespace ripple diff --git a/src/xrpld/app/tx/detail/VaultClawback.h b/src/xrpld/app/tx/detail/VaultClawback.h new file mode 100644 index 0000000000..65f0164686 --- /dev/null +++ b/src/xrpld/app/tx/detail/VaultClawback.h @@ -0,0 +1,48 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2024 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_TX_VAULTCLAWBACK_H_INCLUDED +#define RIPPLE_TX_VAULTCLAWBACK_H_INCLUDED + +#include + +namespace ripple { + +class VaultClawback : public Transactor +{ +public: + static constexpr ConsequencesFactoryType ConsequencesFactory{Normal}; + + explicit VaultClawback(ApplyContext& ctx) : Transactor(ctx) + { + } + + static NotTEC + preflight(PreflightContext const& ctx); + + static TER + preclaim(PreclaimContext const& ctx); + + TER + doApply() override; +}; + +} // namespace ripple + +#endif diff --git a/src/xrpld/app/tx/detail/VaultCreate.cpp b/src/xrpld/app/tx/detail/VaultCreate.cpp new file mode 100644 index 0000000000..cb6a994e7e --- /dev/null +++ b/src/xrpld/app/tx/detail/VaultCreate.cpp @@ -0,0 +1,244 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { + +NotTEC +VaultCreate::preflight(PreflightContext const& ctx) +{ + if (!ctx.rules.enabled(featureSingleAssetVault) || + !ctx.rules.enabled(featureMPTokensV1)) + return temDISABLED; + + if (ctx.tx.isFieldPresent(sfDomainID) && + !ctx.rules.enabled(featurePermissionedDomains)) + return temDISABLED; + + if (auto const ter = preflight1(ctx)) + return ter; + + if (ctx.tx.getFlags() & tfVaultCreateMask) + return temINVALID_FLAG; + + if (auto const data = ctx.tx[~sfData]) + { + if (data->empty() || data->length() > maxDataPayloadLength) + return temMALFORMED; + } + + if (auto const withdrawalPolicy = ctx.tx[~sfWithdrawalPolicy]) + { + // Enforce valid withdrawal policy + if (*withdrawalPolicy != vaultStrategyFirstComeFirstServe) + return temMALFORMED; + } + + if (auto const domain = ctx.tx[~sfDomainID]) + { + if (*domain == beast::zero) + return temMALFORMED; + else if ((ctx.tx.getFlags() & tfVaultPrivate) == 0) + return temMALFORMED; // DomainID only allowed on private vaults + } + + if (auto const assetMax = ctx.tx[~sfAssetsMaximum]) + { + if (*assetMax < beast::zero) + return temMALFORMED; + } + + if (auto const metadata = ctx.tx[~sfMPTokenMetadata]) + { + if (metadata->length() == 0 || + metadata->length() > maxMPTokenMetadataLength) + return temMALFORMED; + } + + return preflight2(ctx); +} + +XRPAmount +VaultCreate::calculateBaseFee(ReadView const& view, STTx const& tx) +{ + // One reserve increment is typically much greater than one base fee. + return view.fees().increment; +} + +TER +VaultCreate::preclaim(PreclaimContext const& ctx) +{ + auto vaultAsset = ctx.tx[sfAsset]; + auto account = ctx.tx[sfAccount]; + + if (vaultAsset.native()) + ; // No special checks for XRP + else if (vaultAsset.holds()) + { + auto mptID = vaultAsset.get().getMptID(); + auto issuance = ctx.view.read(keylet::mptIssuance(mptID)); + if (!issuance) + return tecOBJECT_NOT_FOUND; + if (!issuance->isFlag(lsfMPTCanTransfer)) + { + // NOTE: flag lsfMPTCanTransfer is immutable, so this is debug in + // VaultCreate only; in other vault function it's an error. + JLOG(ctx.j.debug()) + << "VaultCreate: vault assets are non-transferable."; + return tecNO_AUTH; + } + } + else if (vaultAsset.holds()) + { + auto const issuer = + ctx.view.read(keylet::account(vaultAsset.getIssuer())); + if (!issuer) + return terNO_ACCOUNT; + else if (!issuer->isFlag(lsfDefaultRipple)) + return terNO_RIPPLE; + } + + // Check for pseudo-account issuers - we do not want a vault to hold such + // assets (e.g. MPT shares to other vaults or AMM LPTokens) as they would be + // impossible to clawback (should the need arise) + if (!vaultAsset.native()) + { + if (isPseudoAccount(ctx.view, vaultAsset.getIssuer())) + return tecWRONG_ASSET; + } + + // Cannot create Vault for an Asset frozen for the vault owner + if (isFrozen(ctx.view, account, vaultAsset)) + return vaultAsset.holds() ? tecFROZEN : tecLOCKED; + + if (auto const domain = ctx.tx[~sfDomainID]) + { + auto const sleDomain = + ctx.view.read(keylet::permissionedDomain(*domain)); + if (!sleDomain) + return tecOBJECT_NOT_FOUND; + } + + auto sequence = ctx.tx.getSeqValue(); + if (auto const accountId = pseudoAccountAddress( + ctx.view, keylet::vault(account, sequence).key); + accountId == beast::zero) + return terADDRESS_COLLISION; + + return tesSUCCESS; +} + +TER +VaultCreate::doApply() +{ + // All return codes in `doApply` must be `tec`, `ter`, or `tes`. + // As we move checks into `preflight` and `preclaim`, + // we can consider downgrading them to `tef` or `tem`. + + auto const& tx = ctx_.tx; + auto sequence = tx.getSeqValue(); + auto owner = view().peek(keylet::account(account_)); + if (owner == nullptr) + return tefINTERNAL; // LCOV_EXCL_LINE + + auto vault = std::make_shared(keylet::vault(account_, sequence)); + + if (auto ter = dirLink(view(), account_, vault)) + return ter; + adjustOwnerCount(view(), owner, 1, j_); + auto ownerCount = owner->at(sfOwnerCount); + if (mPriorBalance < view().fees().accountReserve(ownerCount)) + return tecINSUFFICIENT_RESERVE; + + auto maybePseudo = createPseudoAccount(view(), vault->key(), sfVaultID); + if (!maybePseudo) + return maybePseudo.error(); // LCOV_EXCL_LINE + auto& pseudo = *maybePseudo; + auto pseudoId = pseudo->at(sfAccount); + auto asset = tx[sfAsset]; + + if (auto ter = addEmptyHolding(view(), pseudoId, mPriorBalance, asset, j_); + !isTesSuccess(ter)) + return ter; + + auto txFlags = tx.getFlags(); + std::uint32_t mptFlags = 0; + if ((txFlags & tfVaultShareNonTransferable) == 0) + mptFlags |= (lsfMPTCanEscrow | lsfMPTCanTrade | lsfMPTCanTransfer); + if (txFlags & tfVaultPrivate) + mptFlags |= lsfMPTRequireAuth; + + // Note, here we are **not** creating an MPToken for the assets held in + // the vault. That MPToken or TrustLine/RippleState is created above, in + // addEmptyHolding. Here we are creating MPTokenIssuance for the shares + // in the vault + auto maybeShare = MPTokenIssuanceCreate::create( + view(), + j_, + { + .priorBalance = std::nullopt, + .account = pseudoId->value(), + .sequence = 1, + .flags = mptFlags, + .metadata = tx[~sfMPTokenMetadata], + .domainId = tx[~sfDomainID], + }); + if (!maybeShare) + return maybeShare.error(); // LCOV_EXCL_LINE + auto& share = *maybeShare; + + vault->setFieldIssue(sfAsset, STIssue{sfAsset, asset}); + vault->at(sfFlags) = txFlags & tfVaultPrivate; + vault->at(sfSequence) = sequence; + vault->at(sfOwner) = account_; + vault->at(sfAccount) = pseudoId; + vault->at(sfAssetsTotal) = Number(0); + vault->at(sfAssetsAvailable) = Number(0); + vault->at(sfLossUnrealized) = Number(0); + // Leave default values for AssetTotal and AssetAvailable, both zero. + if (auto value = tx[~sfAssetsMaximum]) + vault->at(sfAssetsMaximum) = *value; + vault->at(sfShareMPTID) = share; + if (auto value = tx[~sfData]) + vault->at(sfData) = *value; + // Required field, default to vaultStrategyFirstComeFirstServe + if (auto value = tx[~sfWithdrawalPolicy]) + vault->at(sfWithdrawalPolicy) = *value; + else + vault->at(sfWithdrawalPolicy) = vaultStrategyFirstComeFirstServe; + // No `LossUnrealized`. + view().insert(vault); + + return tesSUCCESS; +} + +} // namespace ripple diff --git a/src/xrpld/app/tx/detail/VaultCreate.h b/src/xrpld/app/tx/detail/VaultCreate.h new file mode 100644 index 0000000000..5555644629 --- /dev/null +++ b/src/xrpld/app/tx/detail/VaultCreate.h @@ -0,0 +1,51 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2024 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_TX_VAULTCREATE_H_INCLUDED +#define RIPPLE_TX_VAULTCREATE_H_INCLUDED + +#include + +namespace ripple { + +class VaultCreate : public Transactor +{ +public: + static constexpr ConsequencesFactoryType ConsequencesFactory{Normal}; + + explicit VaultCreate(ApplyContext& ctx) : Transactor(ctx) + { + } + + static NotTEC + preflight(PreflightContext const& ctx); + + static XRPAmount + calculateBaseFee(ReadView const& view, STTx const& tx); + + static TER + preclaim(PreclaimContext const& ctx); + + TER + doApply() override; +}; + +} // namespace ripple + +#endif diff --git a/src/xrpld/app/tx/detail/VaultDelete.cpp b/src/xrpld/app/tx/detail/VaultDelete.cpp new file mode 100644 index 0000000000..7861e9e9b6 --- /dev/null +++ b/src/xrpld/app/tx/detail/VaultDelete.cpp @@ -0,0 +1,189 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +#include +#include +#include +#include + +namespace ripple { + +NotTEC +VaultDelete::preflight(PreflightContext const& ctx) +{ + if (!ctx.rules.enabled(featureSingleAssetVault)) + return temDISABLED; + + if (auto const ter = preflight1(ctx)) + return ter; + + if (ctx.tx.getFlags() & tfUniversalMask) + return temINVALID_FLAG; + + if (ctx.tx[sfVaultID] == beast::zero) + { + JLOG(ctx.j.debug()) << "VaultDelete: zero/empty vault ID."; + return temMALFORMED; + } + + return preflight2(ctx); +} + +TER +VaultDelete::preclaim(PreclaimContext const& ctx) +{ + auto const vault = ctx.view.read(keylet::vault(ctx.tx[sfVaultID])); + if (!vault) + return tecNO_ENTRY; + + if (vault->at(sfOwner) != ctx.tx[sfAccount]) + { + JLOG(ctx.j.debug()) << "VaultDelete: account is not an owner."; + return tecNO_PERMISSION; + } + + if (vault->at(sfAssetsAvailable) != 0) + { + JLOG(ctx.j.debug()) << "VaultDelete: nonzero assets available."; + return tecHAS_OBLIGATIONS; + } + + if (vault->at(sfAssetsTotal) != 0) + { + JLOG(ctx.j.debug()) << "VaultDelete: nonzero assets total."; + return tecHAS_OBLIGATIONS; + } + + // Verify we can destroy MPTokenIssuance + auto const sleMPT = + ctx.view.read(keylet::mptIssuance(vault->at(sfShareMPTID))); + + if (!sleMPT) + { + // LCOV_EXCL_START + JLOG(ctx.j.error()) + << "VaultDeposit: missing issuance of vault shares."; + return tecOBJECT_NOT_FOUND; + // LCOV_EXCL_STOP + } + + if (sleMPT->at(sfIssuer) != vault->getAccountID(sfAccount)) + { + // LCOV_EXCL_START + JLOG(ctx.j.error()) << "VaultDeposit: invalid owner of vault shares."; + return tecNO_PERMISSION; + // LCOV_EXCL_STOP + } + + if (sleMPT->at(sfOutstandingAmount) != 0) + { + JLOG(ctx.j.debug()) << "VaultDelete: nonzero outstanding shares."; + return tecHAS_OBLIGATIONS; + } + + return tesSUCCESS; +} + +TER +VaultDelete::doApply() +{ + auto const vault = view().peek(keylet::vault(ctx_.tx[sfVaultID])); + if (!vault) + return tefINTERNAL; // LCOV_EXCL_LINE + + // Destroy the asset holding. + auto asset = vault->at(sfAsset); + if (auto ter = removeEmptyHolding(view(), vault->at(sfAccount), asset, j_); + !isTesSuccess(ter)) + return ter; + + auto const& pseudoID = vault->at(sfAccount); + auto const pseudoAcct = view().peek(keylet::account(pseudoID)); + if (!pseudoAcct) + { + // LCOV_EXCL_START + JLOG(j_.error()) << "VaultDelete: missing vault pseudo-account."; + return tefBAD_LEDGER; + // LCOV_EXCL_STOP + } + + // Destroy the share issuance. Do not use MPTokenIssuanceDestroy for this, + // no special logic needed. First run few checks, duplicated from preclaim. + auto const mpt = view().peek(keylet::mptIssuance(vault->at(sfShareMPTID))); + if (!mpt) + { + // LCOV_EXCL_START + JLOG(j_.error()) << "VaultDelete: missing issuance of vault shares."; + return tefINTERNAL; + // LCOV_EXCL_STOP + } + + if (!view().dirRemove( + keylet::ownerDir(pseudoID), (*mpt)[sfOwnerNode], mpt->key(), false)) + { + // LCOV_EXCL_START + JLOG(j_.error()) << "VaultDelete: failed to delete issuance object."; + return tefBAD_LEDGER; + // LCOV_EXCL_STOP + } + adjustOwnerCount(view(), pseudoAcct, -1, j_); + + view().erase(mpt); + + // The pseudo-account's directory should have been deleted already. + if (view().peek(keylet::ownerDir(pseudoID))) + return tecHAS_OBLIGATIONS; // LCOV_EXCL_LINE + + // Destroy the pseudo-account. + view().erase(view().peek(keylet::account(pseudoID))); + + // Remove the vault from its owner's directory. + auto const ownerID = vault->at(sfOwner); + if (!view().dirRemove( + keylet::ownerDir(ownerID), + vault->at(sfOwnerNode), + vault->key(), + false)) + { + // LCOV_EXCL_START + JLOG(j_.error()) << "VaultDelete: failed to delete vault object."; + return tefBAD_LEDGER; + // LCOV_EXCL_STOP + } + + auto const owner = view().peek(keylet::account(ownerID)); + if (!owner) + { + // LCOV_EXCL_START + JLOG(j_.error()) << "VaultDelete: missing vault owner account."; + return tefBAD_LEDGER; + // LCOV_EXCL_STOP + } + adjustOwnerCount(view(), owner, -1, j_); + + // Destroy the vault. + view().erase(vault); + + return tesSUCCESS; +} + +} // namespace ripple diff --git a/src/xrpld/app/tx/detail/VaultDelete.h b/src/xrpld/app/tx/detail/VaultDelete.h new file mode 100644 index 0000000000..2b77e84469 --- /dev/null +++ b/src/xrpld/app/tx/detail/VaultDelete.h @@ -0,0 +1,48 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2024 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_TX_VAULTDELETE_H_INCLUDED +#define RIPPLE_TX_VAULTDELETE_H_INCLUDED + +#include + +namespace ripple { + +class VaultDelete : public Transactor +{ +public: + static constexpr ConsequencesFactoryType ConsequencesFactory{Normal}; + + explicit VaultDelete(ApplyContext& ctx) : Transactor(ctx) + { + } + + static NotTEC + preflight(PreflightContext const& ctx); + + static TER + preclaim(PreclaimContext const& ctx); + + TER + doApply() override; +}; + +} // namespace ripple + +#endif diff --git a/src/xrpld/app/tx/detail/VaultDeposit.cpp b/src/xrpld/app/tx/detail/VaultDeposit.cpp new file mode 100644 index 0000000000..0efddb0ff7 --- /dev/null +++ b/src/xrpld/app/tx/detail/VaultDeposit.cpp @@ -0,0 +1,283 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { + +NotTEC +VaultDeposit::preflight(PreflightContext const& ctx) +{ + if (!ctx.rules.enabled(featureSingleAssetVault)) + return temDISABLED; + + if (auto const ter = preflight1(ctx)) + return ter; + + if (ctx.tx.getFlags() & tfUniversalMask) + return temINVALID_FLAG; + + if (ctx.tx[sfVaultID] == beast::zero) + { + JLOG(ctx.j.debug()) << "VaultDeposit: zero/empty vault ID."; + return temMALFORMED; + } + + if (ctx.tx[sfAmount] <= beast::zero) + return temBAD_AMOUNT; + + return preflight2(ctx); +} + +TER +VaultDeposit::preclaim(PreclaimContext const& ctx) +{ + auto const vault = ctx.view.read(keylet::vault(ctx.tx[sfVaultID])); + if (!vault) + return tecNO_ENTRY; + + auto const account = ctx.tx[sfAccount]; + auto const assets = ctx.tx[sfAmount]; + auto const vaultAsset = vault->at(sfAsset); + if (assets.asset() != vaultAsset) + return tecWRONG_ASSET; + + if (vaultAsset.native()) + ; // No special checks for XRP + else if (vaultAsset.holds()) + { + auto mptID = vaultAsset.get().getMptID(); + auto issuance = ctx.view.read(keylet::mptIssuance(mptID)); + if (!issuance) + return tecOBJECT_NOT_FOUND; + if (!issuance->isFlag(lsfMPTCanTransfer)) + { + // LCOV_EXCL_START + JLOG(ctx.j.error()) + << "VaultDeposit: vault assets are non-transferable."; + return tecNO_AUTH; + // LCOV_EXCL_STOP + } + } + else if (vaultAsset.holds()) + { + auto const issuer = + ctx.view.read(keylet::account(vaultAsset.getIssuer())); + if (!issuer) + { + // LCOV_EXCL_START + JLOG(ctx.j.error()) + << "VaultDeposit: missing issuer of vault assets."; + return tefINTERNAL; + // LCOV_EXCL_STOP + } + } + + auto const mptIssuanceID = vault->at(sfShareMPTID); + auto const vaultShare = MPTIssue(mptIssuanceID); + if (vaultShare == assets.asset()) + { + // LCOV_EXCL_START + JLOG(ctx.j.error()) + << "VaultDeposit: vault shares and assets cannot be same."; + return tefINTERNAL; + // LCOV_EXCL_STOP + } + + auto const sleIssuance = ctx.view.read(keylet::mptIssuance(mptIssuanceID)); + if (!sleIssuance) + { + // LCOV_EXCL_START + JLOG(ctx.j.error()) + << "VaultDeposit: missing issuance of vault shares."; + return tefINTERNAL; + // LCOV_EXCL_STOP + } + + if (sleIssuance->isFlag(lsfMPTLocked)) + { + // LCOV_EXCL_START + JLOG(ctx.j.error()) + << "VaultDeposit: issuance of vault shares is locked."; + return tefINTERNAL; + // LCOV_EXCL_STOP + } + + // Cannot deposit inside Vault an Asset frozen for the depositor + if (isFrozen(ctx.view, account, vaultAsset)) + return vaultAsset.holds() ? tecFROZEN : tecLOCKED; + + // Cannot deposit if the shares of the vault are frozen + if (isFrozen(ctx.view, account, vaultShare)) + return tecLOCKED; + + if (vault->isFlag(tfVaultPrivate) && account != vault->at(sfOwner)) + { + auto const maybeDomainID = sleIssuance->at(~sfDomainID); + // Since this is a private vault and the account is not its owner, we + // perform authorization check based on DomainID read from sleIssuance. + // Had the vault shares been a regular MPToken, we would allow + // authorization granted by the Issuer explicitly, but Vault uses Issuer + // pseudo-account, which cannot grant an authorization. + if (maybeDomainID) + { + // As per validDomain documentation, we suppress tecEXPIRED error + // here, so we can delete any expired credentials inside doApply. + if (auto const err = + credentials::validDomain(ctx.view, *maybeDomainID, account); + !isTesSuccess(err) && err != tecEXPIRED) + return err; + } + else + return tecNO_AUTH; + } + + // Source MPToken must exist (if asset is an MPT) + if (auto const ter = requireAuth(ctx.view, vaultAsset, account); + !isTesSuccess(ter)) + return ter; + + if (accountHolds( + ctx.view, + account, + vaultAsset, + FreezeHandling::fhZERO_IF_FROZEN, + AuthHandling::ahZERO_IF_UNAUTHORIZED, + ctx.j) < assets) + return tecINSUFFICIENT_FUNDS; + + return tesSUCCESS; +} + +TER +VaultDeposit::doApply() +{ + auto const vault = view().peek(keylet::vault(ctx_.tx[sfVaultID])); + if (!vault) + return tefINTERNAL; // LCOV_EXCL_LINE + + auto const assets = ctx_.tx[sfAmount]; + // Make sure the depositor can hold shares. + auto const mptIssuanceID = (*vault)[sfShareMPTID]; + auto const sleIssuance = view().read(keylet::mptIssuance(mptIssuanceID)); + if (!sleIssuance) + { + // LCOV_EXCL_START + JLOG(j_.error()) << "VaultDeposit: missing issuance of vault shares."; + return tefINTERNAL; + // LCOV_EXCL_STOP + } + + auto const& vaultAccount = vault->at(sfAccount); + // Note, vault owner is always authorized + if ((vault->getFlags() & tfVaultPrivate) && account_ != vault->at(sfOwner)) + { + if (auto const err = enforceMPTokenAuthorization( + ctx_.view(), mptIssuanceID, account_, mPriorBalance, j_); + !isTesSuccess(err)) + return err; + } + else + { + // No authorization needed, but must ensure there is MPToken + auto sleMpt = view().read(keylet::mptoken(mptIssuanceID, account_)); + if (!sleMpt) + { + if (auto const err = MPTokenAuthorize::authorize( + view(), + ctx_.journal, + {.priorBalance = mPriorBalance, + .mptIssuanceID = mptIssuanceID->value(), + .account = account_}); + !isTesSuccess(err)) + return err; + } + + // If the vault is private, set the authorized flag for the vault owner + if (vault->isFlag(tfVaultPrivate)) + { + if (auto const err = MPTokenAuthorize::authorize( + view(), + ctx_.journal, + { + .priorBalance = mPriorBalance, + .mptIssuanceID = mptIssuanceID->value(), + .account = sleIssuance->at(sfIssuer), + .holderID = account_, + }); + !isTesSuccess(err)) + return err; + } + } + + // Compute exchange before transferring any amounts. + auto const shares = assetsToSharesDeposit(vault, sleIssuance, assets); + XRPL_ASSERT( + shares.asset() != assets.asset(), + "ripple::VaultDeposit::doApply : assets are not shares"); + + vault->at(sfAssetsTotal) += assets; + vault->at(sfAssetsAvailable) += assets; + view().update(vault); + + // A deposit must not push the vault over its limit. + auto const maximum = *vault->at(sfAssetsMaximum); + if (maximum != 0 && *vault->at(sfAssetsTotal) > maximum) + return tecLIMIT_EXCEEDED; + + // Transfer assets from depositor to vault. + if (auto ter = accountSend( + view(), account_, vaultAccount, assets, j_, WaiveTransferFee::Yes)) + return ter; + + // Sanity check + if (accountHolds( + view(), + account_, + assets.asset(), + FreezeHandling::fhIGNORE_FREEZE, + AuthHandling::ahIGNORE_AUTH, + j_) < beast::zero) + { + // LCOV_EXCL_START + JLOG(j_.error()) << "VaultDeposit: negative balance of account assets."; + return tefINTERNAL; + // LCOV_EXCL_STOP + } + + // Transfer shares from vault to depositor. + if (auto ter = accountSend( + view(), vaultAccount, account_, shares, j_, WaiveTransferFee::Yes)) + return ter; + + return tesSUCCESS; +} + +} // namespace ripple diff --git a/src/xrpld/app/tx/detail/VaultDeposit.h b/src/xrpld/app/tx/detail/VaultDeposit.h new file mode 100644 index 0000000000..50515ce3d8 --- /dev/null +++ b/src/xrpld/app/tx/detail/VaultDeposit.h @@ -0,0 +1,48 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2024 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_TX_VAULTDEPOSIT_H_INCLUDED +#define RIPPLE_TX_VAULTDEPOSIT_H_INCLUDED + +#include + +namespace ripple { + +class VaultDeposit : public Transactor +{ +public: + static constexpr ConsequencesFactoryType ConsequencesFactory{Normal}; + + explicit VaultDeposit(ApplyContext& ctx) : Transactor(ctx) + { + } + + static NotTEC + preflight(PreflightContext const& ctx); + + static TER + preclaim(PreclaimContext const& ctx); + + TER + doApply() override; +}; + +} // namespace ripple + +#endif diff --git a/src/xrpld/app/tx/detail/VaultSet.cpp b/src/xrpld/app/tx/detail/VaultSet.cpp new file mode 100644 index 0000000000..a13ce6d10e --- /dev/null +++ b/src/xrpld/app/tx/detail/VaultSet.cpp @@ -0,0 +1,197 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { + +NotTEC +VaultSet::preflight(PreflightContext const& ctx) +{ + if (!ctx.rules.enabled(featureSingleAssetVault)) + return temDISABLED; + + if (ctx.tx.isFieldPresent(sfDomainID) && + !ctx.rules.enabled(featurePermissionedDomains)) + return temDISABLED; + + if (auto const ter = preflight1(ctx)) + return ter; + + if (ctx.tx[sfVaultID] == beast::zero) + { + JLOG(ctx.j.debug()) << "VaultSet: zero/empty vault ID."; + return temMALFORMED; + } + + if (ctx.tx.getFlags() & tfUniversalMask) + return temINVALID_FLAG; + + if (auto const data = ctx.tx[~sfData]) + { + if (data->empty() || data->length() > maxDataPayloadLength) + { + JLOG(ctx.j.debug()) << "VaultSet: invalid data payload size."; + return temMALFORMED; + } + } + + if (auto const assetMax = ctx.tx[~sfAssetsMaximum]) + { + if (*assetMax < beast::zero) + { + JLOG(ctx.j.debug()) << "VaultSet: invalid max assets."; + return temMALFORMED; + } + } + + if (!ctx.tx.isFieldPresent(sfDomainID) && + !ctx.tx.isFieldPresent(sfAssetsMaximum) && + !ctx.tx.isFieldPresent(sfData)) + { + JLOG(ctx.j.debug()) << "VaultSet: nothing is being updated."; + return temMALFORMED; + } + + return preflight2(ctx); +} + +TER +VaultSet::preclaim(PreclaimContext const& ctx) +{ + auto const vault = ctx.view.read(keylet::vault(ctx.tx[sfVaultID])); + if (!vault) + return tecNO_ENTRY; + + // Assert that submitter is the Owner. + if (ctx.tx[sfAccount] != vault->at(sfOwner)) + { + JLOG(ctx.j.debug()) << "VaultSet: account is not an owner."; + return tecNO_PERMISSION; + } + + auto const mptIssuanceID = (*vault)[sfShareMPTID]; + auto const sleIssuance = ctx.view.read(keylet::mptIssuance(mptIssuanceID)); + if (!sleIssuance) + { + // LCOV_EXCL_START + JLOG(ctx.j.error()) << "VaultSet: missing issuance of vault shares."; + return tefINTERNAL; + // LCOV_EXCL_STOP + } + + if (auto const domain = ctx.tx[~sfDomainID]) + { + // We can only set domain if private flag was originally set + if ((vault->getFlags() & tfVaultPrivate) == 0) + { + JLOG(ctx.j.debug()) << "VaultSet: vault is not private"; + return tecNO_PERMISSION; + } + + if (*domain != beast::zero) + { + auto const sleDomain = + ctx.view.read(keylet::permissionedDomain(*domain)); + if (!sleDomain) + return tecOBJECT_NOT_FOUND; + } + + // Sanity check only, this should be enforced by VaultCreate + if ((sleIssuance->getFlags() & lsfMPTRequireAuth) == 0) + { + // LCOV_EXCL_START + JLOG(ctx.j.error()) + << "VaultSet: issuance of vault shares is not private."; + return tefINTERNAL; + // LCOV_EXCL_STOP + } + } + + return tesSUCCESS; +} + +TER +VaultSet::doApply() +{ + // All return codes in `doApply` must be `tec`, `ter`, or `tes`. + // As we move checks into `preflight` and `preclaim`, + // we can consider downgrading them to `tef` or `tem`. + + auto const& tx = ctx_.tx; + + // Update existing object. + auto vault = view().peek(keylet::vault(tx[sfVaultID])); + if (!vault) + return tefINTERNAL; // LCOV_EXCL_LINE + + auto const mptIssuanceID = (*vault)[sfShareMPTID]; + auto const sleIssuance = view().peek(keylet::mptIssuance(mptIssuanceID)); + if (!sleIssuance) + { + // LCOV_EXCL_START + JLOG(j_.error()) << "VaultSet: missing issuance of vault shares."; + return tefINTERNAL; + // LCOV_EXCL_STOP + } + + // Update mutable flags and fields if given. + if (tx.isFieldPresent(sfData)) + vault->at(sfData) = tx[sfData]; + if (tx.isFieldPresent(sfAssetsMaximum)) + { + if (tx[sfAssetsMaximum] != 0 && + tx[sfAssetsMaximum] < *vault->at(sfAssetsTotal)) + return tecLIMIT_EXCEEDED; + vault->at(sfAssetsMaximum) = tx[sfAssetsMaximum]; + } + + if (auto const domainId = tx[~sfDomainID]; domainId) + { + if (*domainId != beast::zero) + { + // In VaultSet::preclaim we enforce that tfVaultPrivate must have + // been set in the vault. We currently do not support making such a + // vault public (i.e. removal of tfVaultPrivate flag). The + // sfDomainID flag must be set in the MPTokenIssuance object and can + // be freely updated. + sleIssuance->setFieldH256(sfDomainID, *domainId); + } + else if (sleIssuance->isFieldPresent(sfDomainID)) + { + sleIssuance->makeFieldAbsent(sfDomainID); + } + view().update(sleIssuance); + } + + view().update(vault); + + return tesSUCCESS; +} + +} // namespace ripple diff --git a/src/xrpld/app/tx/detail/VaultSet.h b/src/xrpld/app/tx/detail/VaultSet.h new file mode 100644 index 0000000000..f16aa6c284 --- /dev/null +++ b/src/xrpld/app/tx/detail/VaultSet.h @@ -0,0 +1,48 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2024 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_TX_VAULTSET_H_INCLUDED +#define RIPPLE_TX_VAULTSET_H_INCLUDED + +#include + +namespace ripple { + +class VaultSet : public Transactor +{ +public: + static constexpr ConsequencesFactoryType ConsequencesFactory{Normal}; + + explicit VaultSet(ApplyContext& ctx) : Transactor(ctx) + { + } + + static NotTEC + preflight(PreflightContext const& ctx); + + static TER + preclaim(PreclaimContext const& ctx); + + TER + doApply() override; +}; + +} // namespace ripple + +#endif diff --git a/src/xrpld/app/tx/detail/VaultWithdraw.cpp b/src/xrpld/app/tx/detail/VaultWithdraw.cpp new file mode 100644 index 0000000000..7a8605cdbd --- /dev/null +++ b/src/xrpld/app/tx/detail/VaultWithdraw.cpp @@ -0,0 +1,258 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace ripple { + +NotTEC +VaultWithdraw::preflight(PreflightContext const& ctx) +{ + if (!ctx.rules.enabled(featureSingleAssetVault)) + return temDISABLED; + + if (auto const ter = preflight1(ctx)) + return ter; + + if (ctx.tx.getFlags() & tfUniversalMask) + return temINVALID_FLAG; + + if (ctx.tx[sfVaultID] == beast::zero) + { + JLOG(ctx.j.debug()) << "VaultWithdraw: zero/empty vault ID."; + return temMALFORMED; + } + + if (ctx.tx[sfAmount] <= beast::zero) + return temBAD_AMOUNT; + + if (auto const destination = ctx.tx[~sfDestination]; + destination && *destination == beast::zero) + { + JLOG(ctx.j.debug()) << "VaultWithdraw: zero/empty destination account."; + return temMALFORMED; + } + + return preflight2(ctx); +} + +TER +VaultWithdraw::preclaim(PreclaimContext const& ctx) +{ + auto const vault = ctx.view.read(keylet::vault(ctx.tx[sfVaultID])); + if (!vault) + return tecNO_ENTRY; + + auto const assets = ctx.tx[sfAmount]; + auto const vaultAsset = vault->at(sfAsset); + auto const vaultShare = vault->at(sfShareMPTID); + if (assets.asset() != vaultAsset && assets.asset() != vaultShare) + return tecWRONG_ASSET; + + if (vaultAsset.native()) + ; // No special checks for XRP + else if (vaultAsset.holds()) + { + auto mptID = vaultAsset.get().getMptID(); + auto issuance = ctx.view.read(keylet::mptIssuance(mptID)); + if (!issuance) + return tecOBJECT_NOT_FOUND; + if (!issuance->isFlag(lsfMPTCanTransfer)) + { + // LCOV_EXCL_START + JLOG(ctx.j.error()) + << "VaultWithdraw: vault assets are non-transferable."; + return tecNO_AUTH; + // LCOV_EXCL_STOP + } + } + else if (vaultAsset.holds()) + { + auto const issuer = + ctx.view.read(keylet::account(vaultAsset.getIssuer())); + if (!issuer) + { + // LCOV_EXCL_START + JLOG(ctx.j.error()) + << "VaultWithdraw: missing issuer of vault assets."; + return tefINTERNAL; + // LCOV_EXCL_STOP + } + } + + // Enforce valid withdrawal policy + if (vault->at(sfWithdrawalPolicy) != vaultStrategyFirstComeFirstServe) + { + // LCOV_EXCL_START + JLOG(ctx.j.error()) << "VaultWithdraw: invalid withdrawal policy."; + return tefINTERNAL; + // LCOV_EXCL_STOP + } + + auto const account = ctx.tx[sfAccount]; + auto const dstAcct = [&]() -> AccountID { + if (ctx.tx.isFieldPresent(sfDestination)) + return ctx.tx.getAccountID(sfDestination); + return account; + }(); + + // Withdrawal to a 3rd party destination account is essentially a transfer, + // via shares in the vault. Enforce all the usual asset transfer checks. + if (account != dstAcct) + { + auto const sleDst = ctx.view.read(keylet::account(dstAcct)); + if (sleDst == nullptr) + return tecNO_DST; + + if (sleDst->getFlags() & lsfRequireDestTag) + return tecDST_TAG_NEEDED; // Cannot send without a tag + + if (sleDst->getFlags() & lsfDepositAuth) + { + if (!ctx.view.exists(keylet::depositPreauth(dstAcct, account))) + return tecNO_PERMISSION; + } + } + + // Destination MPToken must exist (if asset is an MPT) + if (auto const ter = requireAuth(ctx.view, vaultAsset, dstAcct); + !isTesSuccess(ter)) + return ter; + + // Cannot withdraw from a Vault an Asset frozen for the destination account + if (isFrozen(ctx.view, dstAcct, vaultAsset)) + return vaultAsset.holds() ? tecFROZEN : tecLOCKED; + + if (isFrozen(ctx.view, account, vaultShare)) + return tecLOCKED; + + return tesSUCCESS; +} + +TER +VaultWithdraw::doApply() +{ + auto const vault = view().peek(keylet::vault(ctx_.tx[sfVaultID])); + if (!vault) + return tefINTERNAL; // LCOV_EXCL_LINE + + auto const mptIssuanceID = (*vault)[sfShareMPTID]; + auto const sleIssuance = view().read(keylet::mptIssuance(mptIssuanceID)); + if (!sleIssuance) + { + // LCOV_EXCL_START + JLOG(j_.error()) << "VaultWithdraw: missing issuance of vault shares."; + return tefINTERNAL; + // LCOV_EXCL_STOP + } + + // Note, we intentionally do not check lsfVaultPrivate flag on the Vault. If + // you have a share in the vault, it means you were at some point authorized + // to deposit into it, and this means you are also indefinitely authorized + // to withdraw from it. + + auto amount = ctx_.tx[sfAmount]; + auto const asset = vault->at(sfAsset); + auto const share = MPTIssue(mptIssuanceID); + STAmount shares, assets; + if (amount.asset() == asset) + { + // Fixed assets, variable shares. + assets = amount; + shares = assetsToSharesWithdraw(vault, sleIssuance, assets); + } + else if (amount.asset() == share) + { + // Fixed shares, variable assets. + shares = amount; + assets = sharesToAssetsWithdraw(vault, sleIssuance, shares); + } + else + return tefINTERNAL; // LCOV_EXCL_LINE + + if (accountHolds( + view(), + account_, + share, + FreezeHandling::fhZERO_IF_FROZEN, + AuthHandling::ahIGNORE_AUTH, + j_) < shares) + { + JLOG(j_.debug()) << "VaultWithdraw: account doesn't hold enough shares"; + return tecINSUFFICIENT_FUNDS; + } + + // The vault must have enough assets on hand. The vault may hold assets that + // it has already pledged. That is why we look at AssetAvailable instead of + // the pseudo-account balance. + if (*vault->at(sfAssetsAvailable) < assets) + { + JLOG(j_.debug()) << "VaultWithdraw: vault doesn't hold enough assets"; + return tecINSUFFICIENT_FUNDS; + } + + vault->at(sfAssetsTotal) -= assets; + vault->at(sfAssetsAvailable) -= assets; + view().update(vault); + + auto const& vaultAccount = vault->at(sfAccount); + // Transfer shares from depositor to vault. + if (auto ter = accountSend( + view(), account_, vaultAccount, shares, j_, WaiveTransferFee::Yes)) + return ter; + + auto const dstAcct = [&]() -> AccountID { + if (ctx_.tx.isFieldPresent(sfDestination)) + return ctx_.tx.getAccountID(sfDestination); + return account_; + }(); + + // Transfer assets from vault to depositor or destination account. + if (auto ter = accountSend( + view(), vaultAccount, dstAcct, assets, j_, WaiveTransferFee::Yes)) + return ter; + + // Sanity check + if (accountHolds( + view(), + vaultAccount, + assets.asset(), + FreezeHandling::fhIGNORE_FREEZE, + AuthHandling::ahIGNORE_AUTH, + j_) < beast::zero) + { + // LCOV_EXCL_START + JLOG(j_.error()) << "VaultWithdraw: negative balance of vault assets."; + return tefINTERNAL; + // LCOV_EXCL_STOP + } + + return tesSUCCESS; +} + +} // namespace ripple diff --git a/src/xrpld/app/tx/detail/VaultWithdraw.h b/src/xrpld/app/tx/detail/VaultWithdraw.h new file mode 100644 index 0000000000..0b713d403b --- /dev/null +++ b/src/xrpld/app/tx/detail/VaultWithdraw.h @@ -0,0 +1,48 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2024 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_TX_VAULTWITHDRAW_H_INCLUDED +#define RIPPLE_TX_VAULTWITHDRAW_H_INCLUDED + +#include + +namespace ripple { + +class VaultWithdraw : public Transactor +{ +public: + static constexpr ConsequencesFactoryType ConsequencesFactory{Normal}; + + explicit VaultWithdraw(ApplyContext& ctx) : Transactor(ctx) + { + } + + static NotTEC + preflight(PreflightContext const& ctx); + + static TER + preclaim(PreclaimContext const& ctx); + + TER + doApply() override; +}; + +} // namespace ripple + +#endif diff --git a/src/xrpld/app/tx/detail/applySteps.cpp b/src/xrpld/app/tx/detail/applySteps.cpp index b20b5a29f6..5e8c125e83 100644 --- a/src/xrpld/app/tx/detail/applySteps.cpp +++ b/src/xrpld/app/tx/detail/applySteps.cpp @@ -61,6 +61,12 @@ #include #include #include +#include +#include +#include +#include +#include +#include #include #include diff --git a/src/xrpld/ledger/View.h b/src/xrpld/ledger/View.h index 4c70f5dc48..387aedecfc 100644 --- a/src/xrpld/ledger/View.h +++ b/src/xrpld/ledger/View.h @@ -25,6 +25,7 @@ #include #include +#include #include #include #include @@ -34,6 +35,7 @@ #include #include +#include #include #include @@ -87,6 +89,14 @@ isGlobalFrozen(ReadView const& view, MPTIssue const& mptIssue); [[nodiscard]] bool isGlobalFrozen(ReadView const& view, Asset const& asset); +// Note, depth parameter is used to limit the recursion depth +[[nodiscard]] bool +isVaultPseudoAccountFrozen( + ReadView const& view, + AccountID const& account, + MPTIssue const& mptShare, + int depth); + [[nodiscard]] bool isIndividualFrozen( ReadView const& view, @@ -130,7 +140,11 @@ isFrozen( AccountID const& issuer); [[nodiscard]] inline bool -isFrozen(ReadView const& view, AccountID const& account, Issue const& issue) +isFrozen( + ReadView const& view, + AccountID const& account, + Issue const& issue, + int = 0 /*ignored*/) { return isFrozen(view, account, issue.currency, issue.account); } @@ -139,13 +153,63 @@ isFrozen(ReadView const& view, AccountID const& account, Issue const& issue) isFrozen( ReadView const& view, AccountID const& account, - MPTIssue const& mptIssue); + MPTIssue const& mptIssue, + int depth = 0); +/** + * isFrozen check is recursive for MPT shares in a vault, descending to + * assets in the vault, up to maxAssetCheckDepth recursion depth. This is + * purely defensive, as we currently do not allow such vaults to be created. + */ [[nodiscard]] inline bool -isFrozen(ReadView const& view, AccountID const& account, Asset const& asset) +isFrozen( + ReadView const& view, + AccountID const& account, + Asset const& asset, + int depth = 0) { return std::visit( - [&](auto const& issue) { return isFrozen(view, account, issue); }, + [&](auto const& issue) { + return isFrozen(view, account, issue, depth); + }, + asset.value()); +} + +[[nodiscard]] bool +isAnyFrozen( + ReadView const& view, + std::initializer_list const& accounts, + MPTIssue const& mptIssue, + int depth = 0); + +[[nodiscard]] inline bool +isAnyFrozen( + ReadView const& view, + std::initializer_list const& accounts, + Issue const& issue) +{ + for (auto const& account : accounts) + { + if (isFrozen(view, account, issue.currency, issue.account)) + return true; + } + return false; +} + +[[nodiscard]] inline bool +isAnyFrozen( + ReadView const& view, + std::initializer_list const& accounts, + Asset const& asset, + int depth = 0) +{ + return std::visit( + [&](TIss const& issue) { + if constexpr (std::is_same_v) + return isAnyFrozen(view, accounts, issue); + else + return isAnyFrozen(view, accounts, issue, depth); + }, asset.value()); } @@ -192,6 +256,15 @@ accountHolds( AuthHandling zeroIfUnauthorized, beast::Journal j); +[[nodiscard]] STAmount +accountHolds( + ReadView const& view, + AccountID const& account, + Asset const& asset, + FreezeHandling zeroIfFrozen, + AuthHandling zeroIfUnauthorized, + beast::Journal j); + // Returns the amount an account can spend of the currency type saDefault, or // returns saDefault if this account is the issuer of the currency in // question. Should be used in favor of accountHolds when questioning how much @@ -430,6 +503,73 @@ dirNext( [[nodiscard]] std::function describeOwnerDir(AccountID const& account); +[[nodiscard]] TER +dirLink(ApplyView& view, AccountID const& owner, std::shared_ptr& object); + +AccountID +pseudoAccountAddress(ReadView const& view, uint256 const& pseudoOwnerKey); + +/** + * + * Create pseudo-account, storing pseudoOwnerKey into ownerField. + * + * The list of valid ownerField is maintained in View.cpp and the caller to + * this function must perform necessary amendment check(s) before using a + * field. The amendment check is **not** performed in createPseudoAccount. + */ +[[nodiscard]] Expected, TER> +createPseudoAccount( + ApplyView& view, + uint256 const& pseudoOwnerKey, + SField const& ownerField); + +// Returns true iff sleAcct is a pseudo-account. +// +// Returns false if sleAcct is +// * NOT a pseudo-account OR +// * NOT a ltACCOUNT_ROOT OR +// * null pointer +[[nodiscard]] bool +isPseudoAccount(std::shared_ptr sleAcct); + +[[nodiscard]] inline bool +isPseudoAccount(ReadView const& view, AccountID accountId) +{ + return isPseudoAccount(view.read(keylet::account(accountId))); +} + +[[nodiscard]] TER +addEmptyHolding( + ApplyView& view, + AccountID const& accountID, + XRPAmount priorBalance, + Issue const& issue, + beast::Journal journal); + +[[nodiscard]] TER +addEmptyHolding( + ApplyView& view, + AccountID const& accountID, + XRPAmount priorBalance, + MPTIssue const& mptIssue, + beast::Journal journal); + +[[nodiscard]] inline TER +addEmptyHolding( + ApplyView& view, + AccountID const& accountID, + XRPAmount priorBalance, + Asset const& asset, + beast::Journal journal) +{ + return std::visit( + [&](TIss const& issue) -> TER { + return addEmptyHolding( + view, accountID, priorBalance, issue, journal); + }, + asset.value()); +} + // VFALCO NOTE Both STAmount parameters should just // be "Amount", a unit-less number. // @@ -457,6 +597,34 @@ trustCreate( std::uint32_t uSrcQualityOut, beast::Journal j); +[[nodiscard]] TER +removeEmptyHolding( + ApplyView& view, + AccountID const& accountID, + Issue const& issue, + beast::Journal journal); + +[[nodiscard]] TER +removeEmptyHolding( + ApplyView& view, + AccountID const& accountID, + MPTIssue const& mptIssue, + beast::Journal journal); + +[[nodiscard]] inline TER +removeEmptyHolding( + ApplyView& view, + AccountID const& accountID, + Asset const& asset, + beast::Journal journal) +{ + return std::visit( + [&](TIss const& issue) -> TER { + return removeEmptyHolding(view, accountID, issue, journal); + }, + asset.value()); +} + [[nodiscard]] TER trustDelete( ApplyView& view, @@ -535,17 +703,92 @@ transferXRP( STAmount const& amount, beast::Journal j); +/* Check if MPToken exists: + * - StrongAuth - before checking lsfMPTRequireAuth is set + * - WeakAuth - after checking if lsfMPTRequireAuth is set + */ +enum class MPTAuthType : bool { StrongAuth = true, WeakAuth = false }; + /** Check if the account lacks required authorization. + * * Return tecNO_AUTH or tecNO_LINE if it does * and tesSUCCESS otherwise. */ [[nodiscard]] TER requireAuth(ReadView const& view, Issue const& issue, AccountID const& account); + +/** Check if the account lacks required authorization. + * + * This will also check for expired credentials. If it is called directly + * from preclaim, the user should convert result tecEXPIRED to tesSUCCESS and + * proceed to also check permissions with enforceMPTokenAuthorization inside + * doApply. This will ensure that any expired credentials are deleted. + * + * requireAuth check is recursive for MPT shares in a vault, descending to + * assets in the vault, up to maxAssetCheckDepth recursion depth. This is + * purely defensive, as we currently do not allow such vaults to be created. + * + * If StrongAuth then return tecNO_AUTH if MPToken doesn't exist or + * lsfMPTRequireAuth is set and MPToken is not authorized. If WeakAuth then + * return tecNO_AUTH if lsfMPTRequireAuth is set and MPToken doesn't exist or is + * not authorized (explicitly or via credentials, if DomainID is set in + * MPTokenIssuance). Consequently, if WeakAuth and lsfMPTRequireAuth is *not* + * set, this function will return true even if MPToken does *not* exist. + */ [[nodiscard]] TER requireAuth( ReadView const& view, MPTIssue const& mptIssue, - AccountID const& account); + AccountID const& account, + MPTAuthType authType = MPTAuthType::StrongAuth, + int depth = 0); + +[[nodiscard]] TER inline requireAuth( + ReadView const& view, + Asset const& asset, + AccountID const& account, + MPTAuthType authType = MPTAuthType::StrongAuth) +{ + return std::visit( + [&](TIss const& issue_) { + if constexpr (std::is_same_v) + return requireAuth(view, issue_, account); + else + return requireAuth(view, issue_, account, authType); + }, + asset.value()); +} + +/** Enforce account has MPToken to match its authorization. + * + * Called from doApply - it will check for expired (and delete if found any) + * credentials matching DomainID set in MPTokenIssuance. Must be called if + * requireAuth(...MPTIssue...) returned tesSUCCESS or tecEXPIRED in preclaim, + * which implies that preclaim should replace `tecEXPIRED` with `tesSUCCESS` + * in order for the transactor to proceed to doApply. + * + * This function will create MPToken (if needed) on the basis of any + * non-expired credentials and will delete any expired credentials, indirectly + * via verifyValidDomain, as per DomainID (if set in MPTokenIssuance). + * + * The caller does NOT need to ensure that DomainID is actually set - this + * function handles gracefully both cases when DomainID is set and when not. + * + * The caller does NOT need to look for existing MPToken to match + * mptIssue/account - this function checks lsfMPTAuthorized of an existing + * MPToken iff DomainID is not set. + * + * Do not use for accounts which hold implied permission e.g. object owners or + * if MPTokenIssuance does not require authorization. In both cases use + * MPTokenAuthorize::authorize if MPToken does not yet exist. + */ +[[nodiscard]] TER +enforceMPTokenAuthorization( + ApplyView& view, + MPTID const& mptIssuanceID, + AccountID const& account, + XRPAmount const& priorBalance, + beast::Journal j); /** Check if the destination account is allowed * to receive MPT. Return tecNO_AUTH if it doesn't @@ -592,6 +835,33 @@ deleteAMMTrustLine( std::optional const& ammAccountID, beast::Journal j); +// From the perspective of a vault, +// return the number of shares to give the depositor +// when they deposit a fixed amount of assets. +[[nodiscard]] STAmount +assetsToSharesDeposit( + std::shared_ptr const& vault, + std::shared_ptr const& issuance, + STAmount const& assets); + +// From the perspective of a vault, +// return the number of shares to demand from the depositor +// when they ask to withdraw a fixed amount of assets. +[[nodiscard]] STAmount +assetsToSharesWithdraw( + std::shared_ptr const& vault, + std::shared_ptr const& issuance, + STAmount const& assets); + +// From the perspective of a vault, +// return the number of assets to give the depositor +// when they redeem a fixed amount of shares. +[[nodiscard]] STAmount +sharesToAssetsWithdraw( + std::shared_ptr const& vault, + std::shared_ptr const& issuance, + STAmount const& shares); + /** Has the specified time passed? @param now the current time diff --git a/src/xrpld/ledger/detail/View.cpp b/src/xrpld/ledger/detail/View.cpp index af81a6b7bb..d248d37e18 100644 --- a/src/xrpld/ledger/detail/View.cpp +++ b/src/xrpld/ledger/detail/View.cpp @@ -17,18 +17,29 @@ */ //============================================================================== +#include +#include #include #include +#include #include #include #include #include +#include +#include +#include #include #include +#include +#include +#include #include #include +#include +#include namespace ripple { @@ -184,7 +195,7 @@ bool isGlobalFrozen(ReadView const& view, MPTIssue const& mptIssue) { if (auto const sle = view.read(keylet::mptIssuance(mptIssue.getMptID()))) - return sle->getFlags() & lsfMPTLocked; + return sle->isFlag(lsfMPTLocked); return false; } @@ -229,7 +240,7 @@ isIndividualFrozen( { if (auto const sle = view.read(keylet::mptoken(mptIssue.getMptID(), account))) - return sle->getFlags() & lsfMPTLocked; + return sle->isFlag(lsfMPTLocked); return false; } @@ -262,10 +273,77 @@ bool isFrozen( ReadView const& view, AccountID const& account, - MPTIssue const& mptIssue) + MPTIssue const& mptIssue, + int depth) { return isGlobalFrozen(view, mptIssue) || - isIndividualFrozen(view, account, mptIssue); + isIndividualFrozen(view, account, mptIssue) || + isVaultPseudoAccountFrozen(view, account, mptIssue, depth); +} + +[[nodiscard]] bool +isAnyFrozen( + ReadView const& view, + std::initializer_list const& accounts, + MPTIssue const& mptIssue, + int depth) +{ + if (isGlobalFrozen(view, mptIssue)) + return true; + + for (auto const& account : accounts) + { + if (isIndividualFrozen(view, account, mptIssue)) + return true; + } + + for (auto const& account : accounts) + { + if (isVaultPseudoAccountFrozen(view, account, mptIssue, depth)) + return true; + } + + return false; +} + +bool +isVaultPseudoAccountFrozen( + ReadView const& view, + AccountID const& account, + MPTIssue const& mptShare, + int depth) +{ + if (!view.rules().enabled(featureSingleAssetVault)) + return false; + + if (depth >= maxAssetCheckDepth) + return true; // LCOV_EXCL_LINE + + auto const mptIssuance = + view.read(keylet::mptIssuance(mptShare.getMptID())); + if (mptIssuance == nullptr) + return false; // zero MPToken won't block deletion of MPTokenIssuance + + auto const issuer = mptIssuance->getAccountID(sfIssuer); + auto const mptIssuer = view.read(keylet::account(issuer)); + if (mptIssuer == nullptr) + { // LCOV_EXCL_START + UNREACHABLE("ripple::isVaultPseudoAccountFrozen : null MPToken issuer"); + return false; + } // LCOV_EXCL_STOP + + if (!mptIssuer->isFieldPresent(sfVaultID)) + return false; // not a Vault pseudo-account, common case + + auto const vault = + view.read(keylet::vault(mptIssuer->getFieldH256(sfVaultID))); + if (vault == nullptr) + { // LCOV_EXCL_START + UNREACHABLE("ripple::isVaultPseudoAccountFrozen : null vault"); + return false; + } // LCOV_EXCL_STOP + + return isAnyFrozen(view, {issuer, account}, vault->at(sfAsset), depth + 1); } bool @@ -413,6 +491,7 @@ accountHolds( auto const sleMpt = view.read(keylet::mptoken(mptIssue.getMptID(), account)); + if (!sleMpt) amount.clear(mptIssue); else if ( @@ -422,9 +501,17 @@ accountHolds( { amount = STAmount{mptIssue, sleMpt->getFieldU64(sfMPTAmount)}; - // only if auth check is needed, as it needs to do an additional read - // operation - if (zeroIfUnauthorized == ahZERO_IF_UNAUTHORIZED) + // Only if auth check is needed, as it needs to do an additional read + // operation. Note featureSingleAssetVault will affect error codes. + if (zeroIfUnauthorized == ahZERO_IF_UNAUTHORIZED && + view.rules().enabled(featureSingleAssetVault)) + { + if (auto const err = requireAuth( + view, mptIssue, account, MPTAuthType::StrongAuth); + !isTesSuccess(err)) + amount.clear(mptIssue); + } + else if (zeroIfUnauthorized == ahZERO_IF_UNAUTHORIZED) { auto const sleIssuance = view.read(keylet::mptIssuance(mptIssue.getMptID())); @@ -440,6 +527,29 @@ accountHolds( return amount; } +[[nodiscard]] STAmount +accountHolds( + ReadView const& view, + AccountID const& account, + Asset const& asset, + FreezeHandling zeroIfFrozen, + AuthHandling zeroIfUnauthorized, + beast::Journal j) +{ + return std::visit( + [&](auto const& value) { + if constexpr (std::is_same_v< + std::remove_cvref_t, + Issue>) + { + return accountHolds(view, account, value, zeroIfFrozen, j); + } + return accountHolds( + view, account, value, zeroIfFrozen, zeroIfUnauthorized, j); + }, + asset.value()); +} + STAmount accountFunds( ReadView const& view, @@ -931,6 +1041,176 @@ describeOwnerDir(AccountID const& account) }; } +TER +dirLink(ApplyView& view, AccountID const& owner, std::shared_ptr& object) +{ + auto const page = view.dirInsert( + keylet::ownerDir(owner), object->key(), describeOwnerDir(owner)); + if (!page) + return tecDIR_FULL; // LCOV_EXCL_LINE + object->setFieldU64(sfOwnerNode, *page); + return tesSUCCESS; +} + +AccountID +pseudoAccountAddress(ReadView const& view, uint256 const& pseudoOwnerKey) +{ + // This number must not be changed without an amendment + constexpr int maxAccountAttempts = 256; + for (auto i = 0; i < maxAccountAttempts; ++i) + { + ripesha_hasher rsh; + auto const hash = sha512Half(i, view.info().parentHash, pseudoOwnerKey); + rsh(hash.data(), hash.size()); + AccountID const ret{static_cast(rsh)}; + if (!view.read(keylet::account(ret))) + return ret; + } + return beast::zero; +} + +// Note, the list of the pseudo-account designator fields below MUST be +// maintained but it does NOT need to be amendment-gated, since a +// non-active amendment will not set any field, by definition. Specific +// properties of a pseudo-account are NOT checked here, that's what +// InvariantCheck is for. +static std::array const pseudoAccountOwnerFields = { + &sfAMMID, // + &sfVaultID, // +}; + +Expected, TER> +createPseudoAccount( + ApplyView& view, + uint256 const& pseudoOwnerKey, + SField const& ownerField) +{ + XRPL_ASSERT( + std::count_if( + pseudoAccountOwnerFields.begin(), + pseudoAccountOwnerFields.end(), + [&ownerField](SField const* sf) -> bool { + return *sf == ownerField; + }) == 1, + "ripple::createPseudoAccount : valid owner field"); + + auto const accountId = pseudoAccountAddress(view, pseudoOwnerKey); + if (accountId == beast::zero) + return Unexpected(tecDUPLICATE); + + // Create pseudo-account. + auto account = std::make_shared(keylet::account(accountId)); + account->setAccountID(sfAccount, accountId); + account->setFieldAmount(sfBalance, STAmount{}); + + // Pseudo-accounts can't submit transactions, so set the sequence number + // to 0 to make them easier to spot and verify, and add an extra level + // of protection. + std::uint32_t const seqno = // + view.rules().enabled(featureSingleAssetVault) // + ? 0 // + : view.seq(); + account->setFieldU32(sfSequence, seqno); + // Ignore reserves requirement, disable the master key, allow default + // rippling, and enable deposit authorization to prevent payments into + // pseudo-account. + account->setFieldU32( + sfFlags, lsfDisableMaster | lsfDefaultRipple | lsfDepositAuth); + // Link the pseudo-account with its owner object. + account->setFieldH256(ownerField, pseudoOwnerKey); + + view.insert(account); + + return account; +} + +[[nodiscard]] bool +isPseudoAccount(std::shared_ptr sleAcct) +{ + // Intentionally use defensive coding here because it's cheap and makes the + // semantics of true return value clean. + return sleAcct && sleAcct->getType() == ltACCOUNT_ROOT && + std::count_if( + pseudoAccountOwnerFields.begin(), + pseudoAccountOwnerFields.end(), + [&sleAcct](SField const* sf) -> bool { + return sleAcct->isFieldPresent(*sf); + }) > 0; +} + +[[nodiscard]] TER +addEmptyHolding( + ApplyView& view, + AccountID const& accountID, + XRPAmount priorBalance, + Issue const& issue, + beast::Journal journal) +{ + // Every account can hold XRP. + if (issue.native()) + return tesSUCCESS; + + auto const& issuerId = issue.getIssuer(); + auto const& currency = issue.currency; + if (isGlobalFrozen(view, issuerId)) + return tecFROZEN; // LCOV_EXCL_LINE + + auto const& srcId = issuerId; + auto const& dstId = accountID; + auto const high = srcId > dstId; + auto const index = keylet::line(srcId, dstId, currency); + auto const sleSrc = view.peek(keylet::account(srcId)); + auto const sleDst = view.peek(keylet::account(dstId)); + if (!sleDst || !sleSrc) + return tefINTERNAL; // LCOV_EXCL_LINE + if (!sleSrc->isFlag(lsfDefaultRipple)) + return tecINTERNAL; // LCOV_EXCL_LINE + // If the line already exists, don't create it again. + if (view.read(index)) + return tecDUPLICATE; + return trustCreate( + view, + high, + srcId, + dstId, + index.key, + sleDst, + /*auth=*/false, + /*noRipple=*/true, + /*freeze=*/false, + /*deepFreeze*/ false, + /*balance=*/STAmount{Issue{currency, noAccount()}}, + /*limit=*/STAmount{Issue{currency, dstId}}, + /*qualityIn=*/0, + /*qualityOut=*/0, + journal); +} + +[[nodiscard]] TER +addEmptyHolding( + ApplyView& view, + AccountID const& accountID, + XRPAmount priorBalance, + MPTIssue const& mptIssue, + beast::Journal journal) +{ + auto const& mptID = mptIssue.getMptID(); + auto const mpt = view.peek(keylet::mptIssuance(mptID)); + if (!mpt) + return tefINTERNAL; // LCOV_EXCL_LINE + if (mpt->isFlag(lsfMPTLocked)) + return tefINTERNAL; // LCOV_EXCL_LINE + if (view.peek(keylet::mptoken(mptID, accountID))) + return tecDUPLICATE; + + return MPTokenAuthorize::authorize( + view, + journal, + {.priorBalance = priorBalance, + .mptIssuanceID = mptID, + .account = accountID}); +} + TER trustCreate( ApplyView& view, @@ -1050,6 +1330,91 @@ trustCreate( return tesSUCCESS; } +[[nodiscard]] TER +removeEmptyHolding( + ApplyView& view, + AccountID const& accountID, + Issue const& issue, + beast::Journal journal) +{ + if (issue.native()) + { + auto const sle = view.read(keylet::account(accountID)); + if (!sle) + return tecINTERNAL; + auto const balance = sle->getFieldAmount(sfBalance); + if (balance.xrp() != 0) + return tecHAS_OBLIGATIONS; + return tesSUCCESS; + } + + // `asset` is an IOU. + auto const line = view.peek(keylet::line(accountID, issue)); + if (!line) + return tecOBJECT_NOT_FOUND; + if (line->at(sfBalance)->iou() != beast::zero) + return tecHAS_OBLIGATIONS; + + // Adjust the owner count(s) + if (line->isFlag(lsfLowReserve)) + { + // Clear reserve for low account. + auto sleLowAccount = + view.peek(keylet::account(line->at(sfLowLimit)->getIssuer())); + if (!sleLowAccount) + return tecINTERNAL; + adjustOwnerCount(view, sleLowAccount, -1, journal); + // It's not really necessary to clear the reserve flag, since the line + // is about to be deleted, but this will make the metadata reflect an + // accurate state at the time of deletion. + line->clearFlag(lsfLowReserve); + } + + if (line->isFlag(lsfHighReserve)) + { + // Clear reserve for high account. + auto sleHighAccount = + view.peek(keylet::account(line->at(sfHighLimit)->getIssuer())); + if (!sleHighAccount) + return tecINTERNAL; + adjustOwnerCount(view, sleHighAccount, -1, journal); + // It's not really necessary to clear the reserve flag, since the line + // is about to be deleted, but this will make the metadata reflect an + // accurate state at the time of deletion. + line->clearFlag(lsfHighReserve); + } + + return trustDelete( + view, + line, + line->at(sfLowLimit)->getIssuer(), + line->at(sfHighLimit)->getIssuer(), + journal); +} + +[[nodiscard]] TER +removeEmptyHolding( + ApplyView& view, + AccountID const& accountID, + MPTIssue const& mptIssue, + beast::Journal journal) +{ + auto const& mptID = mptIssue.getMptID(); + auto const mptoken = view.peek(keylet::mptoken(mptID, accountID)); + if (!mptoken) + return tecOBJECT_NOT_FOUND; + if (mptoken->at(sfMPTAmount) != 0) + return tecHAS_OBLIGATIONS; + + return MPTokenAuthorize::authorize( + view, + journal, + {.priorBalance = {}, + .mptIssuanceID = mptID, + .account = accountID, + .flags = tfMPTUnauthorize}); +} + TER trustDelete( ApplyView& view, @@ -1464,6 +1829,7 @@ rippleCreditMPT( STAmount const& saAmount, beast::Journal j) { + // Do not check MPT authorization here - it must have been checked earlier auto const mptID = keylet::mptIssuance(saAmount.get().getMptID()); auto const issuer = saAmount.getIssuer(); auto sleIssuance = view.peek(mptID); @@ -1513,6 +1879,7 @@ rippleCreditMPT( else return tecNO_AUTH; } + return tesSUCCESS; } @@ -1921,35 +2288,179 @@ TER requireAuth( ReadView const& view, MPTIssue const& mptIssue, - AccountID const& account) + AccountID const& account, + MPTAuthType authType, + int depth) { auto const mptID = keylet::mptIssuance(mptIssue.getMptID()); auto const sleIssuance = view.read(mptID); - if (!sleIssuance) return tecOBJECT_NOT_FOUND; auto const mptIssuer = sleIssuance->getAccountID(sfIssuer); // issuer is always "authorized" - if (mptIssuer == account) + if (mptIssuer == account) // Issuer won't have MPToken return tesSUCCESS; + if (view.rules().enabled(featureSingleAssetVault)) + { + if (depth >= maxAssetCheckDepth) + return tecINTERNAL; // LCOV_EXCL_LINE + + // requireAuth is recursive if the issuer is a vault pseudo-account + auto const sleIssuer = view.read(keylet::account(mptIssuer)); + if (!sleIssuer) + return tefINTERNAL; // LCOV_EXCL_LINE + + if (sleIssuer->isFieldPresent(sfVaultID)) + { + auto const sleVault = + view.read(keylet::vault(sleIssuer->getFieldH256(sfVaultID))); + if (!sleVault) + return tefINTERNAL; // LCOV_EXCL_LINE + + auto const asset = sleVault->at(sfAsset); + if (auto const err = std::visit( + [&](TIss const& issue) { + if constexpr (std::is_same_v) + return requireAuth(view, issue, account); + else + return requireAuth( + view, issue, account, authType, depth + 1); + }, + asset.value()); + !isTesSuccess(err)) + return err; + } + } + auto const mptokenID = keylet::mptoken(mptID.key, account); auto const sleToken = view.read(mptokenID); // if account has no MPToken, fail - if (!sleToken) + if (!sleToken && authType == MPTAuthType::StrongAuth) return tecNO_AUTH; + // Note, this check is not amendment-gated because DomainID will be always + // empty **unless** writing to it has been enabled by an amendment + auto const maybeDomainID = sleIssuance->at(~sfDomainID); + if (maybeDomainID) + { + XRPL_ASSERT( + sleIssuance->getFieldU32(sfFlags) & lsfMPTRequireAuth, + "ripple::requireAuth : issuance requires authorization"); + // ter = tefINTERNAL | tecOBJECT_NOT_FOUND | tecNO_AUTH | tecEXPIRED + if (auto const ter = + credentials::validDomain(view, *maybeDomainID, account); + isTesSuccess(ter)) + return ter; // Note: sleToken might be null + else if (!sleToken) + return ter; + // We ignore error from validDomain if we found sleToken, as it could + // belong to someone who is explicitly authorized e.g. a vault owner. + } + // mptoken must be authorized if issuance enabled requireAuth - if (sleIssuance->getFieldU32(sfFlags) & lsfMPTRequireAuth && - !(sleToken->getFlags() & lsfMPTAuthorized)) + if (sleIssuance->isFlag(lsfMPTRequireAuth) && + (!sleToken || !sleToken->isFlag(lsfMPTAuthorized))) return tecNO_AUTH; - return tesSUCCESS; + return tesSUCCESS; // Note: sleToken might be null } +[[nodiscard]] TER +enforceMPTokenAuthorization( + ApplyView& view, + MPTID const& mptIssuanceID, + AccountID const& account, + XRPAmount const& priorBalance, // for MPToken authorization + beast::Journal j) +{ + auto const sleIssuance = view.read(keylet::mptIssuance(mptIssuanceID)); + if (!sleIssuance) + return tefINTERNAL; // LCOV_EXCL_LINE + + XRPL_ASSERT( + sleIssuance->isFlag(lsfMPTRequireAuth), + "ripple::enforceMPTokenAuthorization : authorization required"); + + if (account == sleIssuance->at(sfIssuer)) + return tefINTERNAL; // LCOV_EXCL_LINE + + auto const keylet = keylet::mptoken(mptIssuanceID, account); + auto const sleToken = view.read(keylet); // NOTE: might be null + auto const maybeDomainID = sleIssuance->at(~sfDomainID); + bool const authorizedByDomain = maybeDomainID.has_value() && + verifyValidDomain(view, account, *maybeDomainID, j) == tesSUCCESS; + + if (!authorizedByDomain && sleToken == nullptr) + { + // Could not find MPToken and won't create one, could be either of: + // + // 1. Field sfDomainID not set in MPTokenIssuance or + // 2. Account has no matching and accepted credentials or + // 3. Account has all expired credentials (deleted in verifyValidDomain) + // + // Either way, return tecNO_AUTH and there is nothing else to do + return tecNO_AUTH; + } + else if (!authorizedByDomain && maybeDomainID.has_value()) + { + // Found an MPToken but the account is not authorized and we expect + // it to have been authorized by the domain. This could be because the + // credentials used to create the MPToken have expired or been deleted. + return tecNO_AUTH; + } + else if (!authorizedByDomain) + { + // We found an MPToken, but sfDomainID is not set, so this is a classic + // MPToken which requires authorization by the token issuer. + XRPL_ASSERT( + sleToken != nullptr && !maybeDomainID.has_value(), + "ripple::enforceMPTokenAuthorization : found MPToken"); + if (sleToken->isFlag(lsfMPTAuthorized)) + return tesSUCCESS; + + return tecNO_AUTH; + } + else if (authorizedByDomain && sleToken != nullptr) + { + // Found an MPToken, authorized by the domain. Ignore authorization flag + // lsfMPTAuthorized because it is meaningless. Return tesSUCCESS + XRPL_ASSERT( + maybeDomainID.has_value(), + "ripple::enforceMPTokenAuthorization : found MPToken for domain"); + return tesSUCCESS; + } + else if (authorizedByDomain) + { + // Could not find MPToken but there should be one because we are + // authorized by domain. Proceed to create it, then return tesSUCCESS + XRPL_ASSERT( + maybeDomainID.has_value() && sleToken == nullptr, + "ripple::enforceMPTokenAuthorization : new MPToken for domain"); + if (auto const err = MPTokenAuthorize::authorize( + view, + j, + { + .priorBalance = priorBalance, + .mptIssuanceID = mptIssuanceID, + .account = account, + .flags = 0, + }); + !isTesSuccess(err)) + return err; + + return tesSUCCESS; + } + + // LCOV_EXCL_START + UNREACHABLE( + "ripple::enforceMPTokenAuthorization : condition list is incomplete"); + return tefINTERNAL; +} // LCOV_EXCL_STOP + TER canTransfer( ReadView const& view, @@ -2125,6 +2636,62 @@ rippleCredit( saAmount.asset().value()); } +[[nodiscard]] STAmount +assetsToSharesDeposit( + std::shared_ptr const& vault, + std::shared_ptr const& issuance, + STAmount const& assets) +{ + XRPL_ASSERT( + assets.asset() == vault->at(sfAsset), + "ripple::assetsToSharesDeposit : assets and vault match"); + Number assetTotal = vault->at(sfAssetsTotal); + STAmount shares{vault->at(sfShareMPTID), static_cast(assets)}; + if (assetTotal == 0) + return shares; + Number shareTotal = issuance->at(sfOutstandingAmount); + shares = shareTotal * (assets / assetTotal); + return shares; +} + +[[nodiscard]] STAmount +assetsToSharesWithdraw( + std::shared_ptr const& vault, + std::shared_ptr const& issuance, + STAmount const& assets) +{ + XRPL_ASSERT( + assets.asset() == vault->at(sfAsset), + "ripple::assetsToSharesWithdraw : assets and vault match"); + Number assetTotal = vault->at(sfAssetsTotal); + assetTotal -= vault->at(sfLossUnrealized); + STAmount shares{vault->at(sfShareMPTID)}; + if (assetTotal == 0) + return shares; + Number shareTotal = issuance->at(sfOutstandingAmount); + shares = shareTotal * (assets / assetTotal); + return shares; +} + +[[nodiscard]] STAmount +sharesToAssetsWithdraw( + std::shared_ptr const& vault, + std::shared_ptr const& issuance, + STAmount const& shares) +{ + XRPL_ASSERT( + shares.asset() == vault->at(sfShareMPTID), + "ripple::sharesToAssetsWithdraw : shares and vault match"); + Number assetTotal = vault->at(sfAssetsTotal); + assetTotal -= vault->at(sfLossUnrealized); + STAmount assets{vault->at(sfAsset)}; + if (assetTotal == 0) + return assets; + Number shareTotal = issuance->at(sfOutstandingAmount); + assets = assetTotal * (shares / shareTotal); + return assets; +} + bool after(NetClock::time_point now, std::uint32_t mark) { diff --git a/src/xrpld/net/detail/RPCCall.cpp b/src/xrpld/net/detail/RPCCall.cpp index dd1208aa24..0cc3cb6618 100644 --- a/src/xrpld/net/detail/RPCCall.cpp +++ b/src/xrpld/net/detail/RPCCall.cpp @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -862,6 +863,23 @@ private: return jvRequest; } + Json::Value + parseVault(Json::Value const& jvParams) + { + std::string strVaultID = jvParams[0u].asString(); + uint256 id = beast::zero; + if (!id.parseHex(strVaultID)) + return rpcError(rpcINVALID_PARAMS); + + Json::Value jvRequest(Json::objectValue); + jvRequest[jss::vault_id] = strVaultID; + + if (jvParams.size() > 1) + jvParseLedger(jvRequest, jvParams[1u].asString()); + + return jvRequest; + } + // peer_reservations_add [] Json::Value parsePeerReservationsAdd(Json::Value const& jvParams) @@ -1208,6 +1226,7 @@ public: {"account_offers", &RPCParser::parseAccountItems, 1, 4}, {"account_tx", &RPCParser::parseAccountTransactions, 1, 8}, {"amm_info", &RPCParser::parseAsIs, 1, 2}, + {"vault_info", &RPCParser::parseVault, 1, 2}, {"book_changes", &RPCParser::parseLedgerId, 1, 1}, {"book_offers", &RPCParser::parseBookOffers, 2, 7}, {"can_delete", &RPCParser::parseCanDelete, 0, 1}, diff --git a/src/xrpld/rpc/detail/Handler.cpp b/src/xrpld/rpc/detail/Handler.cpp index dd670529a5..3b32524ee2 100644 --- a/src/xrpld/rpc/detail/Handler.cpp +++ b/src/xrpld/rpc/detail/Handler.cpp @@ -188,6 +188,7 @@ Handler const handlerArray[]{ Role::ADMIN, NO_CONDITION}, {"validator_info", byRef(&doValidatorInfo), Role::ADMIN, NO_CONDITION}, + {"vault_info", byRef(&doVaultInfo), Role::USER, NO_CONDITION}, {"wallet_propose", byRef(&doWalletPropose), Role::ADMIN, NO_CONDITION}, // Evented methods {"subscribe", byRef(&doSubscribe), Role::USER, NO_CONDITION}, diff --git a/src/xrpld/rpc/detail/RPCHelpers.cpp b/src/xrpld/rpc/detail/RPCHelpers.cpp index 347a984d15..b98f31340a 100644 --- a/src/xrpld/rpc/detail/RPCHelpers.cpp +++ b/src/xrpld/rpc/detail/RPCHelpers.cpp @@ -1140,5 +1140,6 @@ getLedgerByContext(RPC::JsonContext& context) return RPC::make_error( rpcNOT_READY, "findCreate failed to return an inbound ledger"); } + } // namespace RPC } // namespace ripple diff --git a/src/xrpld/rpc/detail/RPCHelpers.h b/src/xrpld/rpc/detail/RPCHelpers.h index 31b9761058..1d33d69459 100644 --- a/src/xrpld/rpc/detail/RPCHelpers.h +++ b/src/xrpld/rpc/detail/RPCHelpers.h @@ -269,7 +269,9 @@ keypairForSignature( Json::Value const& params, Json::Value& error, unsigned int apiVersion = apiVersionIfUnspecified); + } // namespace RPC + } // namespace ripple #endif diff --git a/src/xrpld/rpc/handlers/AccountObjects.cpp b/src/xrpld/rpc/handlers/AccountObjects.cpp index 03ea427b12..2b2496a1dd 100644 --- a/src/xrpld/rpc/handlers/AccountObjects.cpp +++ b/src/xrpld/rpc/handlers/AccountObjects.cpp @@ -224,7 +224,9 @@ doAccountObjects(RPC::JsonContext& context) {jss::bridge, ltBRIDGE}, {jss::mpt_issuance, ltMPTOKEN_ISSUANCE}, {jss::mptoken, ltMPTOKEN}, - {jss::permissioned_domain, ltPERMISSIONED_DOMAIN}}; + {jss::permissioned_domain, ltPERMISSIONED_DOMAIN}, + {jss::vault, ltVAULT}, + }; typeFilter.emplace(); typeFilter->reserve(std::size(deletionBlockers)); diff --git a/src/xrpld/rpc/handlers/Handlers.h b/src/xrpld/rpc/handlers/Handlers.h index 12e493576b..b76cbea8cd 100644 --- a/src/xrpld/rpc/handlers/Handlers.h +++ b/src/xrpld/rpc/handlers/Handlers.h @@ -166,6 +166,8 @@ Json::Value doValidatorListSites(RPC::JsonContext&); Json::Value doValidatorInfo(RPC::JsonContext&); +Json::Value +doVaultInfo(RPC::JsonContext&); } // namespace ripple #endif diff --git a/src/xrpld/rpc/handlers/LedgerEntry.cpp b/src/xrpld/rpc/handlers/LedgerEntry.cpp index d2f188aef3..fb82788907 100644 --- a/src/xrpld/rpc/handlers/LedgerEntry.cpp +++ b/src/xrpld/rpc/handlers/LedgerEntry.cpp @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -744,6 +745,39 @@ parseTicket(Json::Value const& params, Json::Value& jvResult) return getTicketIndex(*id, params[jss::ticket_seq].asUInt()); } +static std::optional +parseVault(Json::Value const& params, Json::Value& jvResult) +{ + if (!params.isObject()) + { + uint256 uNodeIndex; + if (!uNodeIndex.parseHex(params.asString())) + { + jvResult[jss::error] = "malformedRequest"; + return std::nullopt; + } + return uNodeIndex; + } + + if (!params.isMember(jss::owner) || !params.isMember(jss::seq) || + !(params[jss::seq].isInt() || params[jss::seq].isUInt()) || + params[jss::seq].asDouble() <= 0.0 || + params[jss::seq].asDouble() > double(Json::Value::maxUInt)) + { + jvResult[jss::error] = "malformedRequest"; + return std::nullopt; + } + + auto const id = parseBase58(params[jss::owner].asString()); + if (!id) + { + jvResult[jss::error] = "malformedOwner"; + return std::nullopt; + } + + return keylet::vault(*id, params[jss::seq].asUInt()).key; +} + static std::optional parseXChainOwnedClaimID(Json::Value const& claim_id, Json::Value& jvResult) { @@ -951,6 +985,7 @@ doLedgerEntry(RPC::JsonContext& context) {jss::xchain_owned_create_account_claim_id, parseXChainOwnedCreateAccountClaimID, ltXCHAIN_OWNED_CREATE_ACCOUNT_CLAIM_ID}, + {jss::vault, parseVault, ltVAULT}, }); uint256 uNodeIndex; diff --git a/src/xrpld/rpc/handlers/VaultInfo.cpp b/src/xrpld/rpc/handlers/VaultInfo.cpp new file mode 100644 index 0000000000..417bbd38e3 --- /dev/null +++ b/src/xrpld/rpc/handlers/VaultInfo.cpp @@ -0,0 +1,114 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace ripple { + +static std::optional +parseVault(Json::Value const& params, Json::Value& jvResult) +{ + auto const hasVaultId = params.isMember(jss::vault_id); + auto const hasOwner = params.isMember(jss::owner); + auto const hasSeq = params.isMember(jss::seq); + + uint256 uNodeIndex = beast::zero; + if (hasVaultId && !hasOwner && !hasSeq) + { + if (!uNodeIndex.parseHex(params[jss::vault_id].asString())) + { + RPC::inject_error(rpcINVALID_PARAMS, jvResult); + return std::nullopt; + } + // else uNodeIndex holds the value we need + } + else if (!hasVaultId && hasOwner && hasSeq) + { + auto const id = parseBase58(params[jss::owner].asString()); + if (!id) + { + RPC::inject_error(rpcACT_MALFORMED, jvResult); + return std::nullopt; + } + else if ( + !(params[jss::seq].isInt() || params[jss::seq].isUInt()) || + params[jss::seq].asDouble() <= 0.0 || + params[jss::seq].asDouble() > double(Json::Value::maxUInt)) + { + RPC::inject_error(rpcINVALID_PARAMS, jvResult); + return std::nullopt; + } + + uNodeIndex = keylet::vault(*id, params[jss::seq].asUInt()).key; + } + else + { + // Invalid combination of fields vault_id/owner/seq + RPC::inject_error(rpcINVALID_PARAMS, jvResult); + return std::nullopt; + } + + return uNodeIndex; +} + +Json::Value +doVaultInfo(RPC::JsonContext& context) +{ + std::shared_ptr lpLedger; + auto jvResult = RPC::lookupLedger(lpLedger, context); + + if (!lpLedger) + return jvResult; + + auto const uNodeIndex = + parseVault(context.params, jvResult).value_or(beast::zero); + if (uNodeIndex == beast::zero) + { + jvResult[jss::error] = "malformedRequest"; + return jvResult; + } + + auto const sleVault = lpLedger->read(keylet::vault(uNodeIndex)); + auto const sleIssuance = sleVault == nullptr // + ? nullptr + : lpLedger->read(keylet::mptIssuance(sleVault->at(sfShareMPTID))); + if (!sleVault || !sleIssuance) + { + jvResult[jss::error] = "entryNotFound"; + return jvResult; + } + + Json::Value& vault = jvResult[jss::vault]; + vault = sleVault->getJson(JsonOptions::none); + auto& share = vault[jss::shares]; + share = sleIssuance->getJson(JsonOptions::none); + + jvResult[jss::vault] = vault; + return jvResult; +} + +} // namespace ripple From 70371a4344a22439f3a24b9fc69a3c2576425774 Mon Sep 17 00:00:00 2001 From: Olek <115580134+oleks-rip@users.noreply.github.com> Date: Wed, 21 May 2025 13:28:18 -0400 Subject: [PATCH 029/244] Fix initializer list initialization for GCC-15 (#5443) --- include/xrpl/beast/hash/uhash.h | 2 +- include/xrpl/beast/net/IPEndpoint.h | 4 ++-- include/xrpl/protocol/AccountID.h | 2 +- include/xrpl/protocol/Book.h | 8 ++++---- include/xrpl/protocol/UintTypes.h | 8 ++++---- include/xrpl/resource/detail/Key.h | 2 +- src/xrpld/app/paths/RippleLineCache.h | 2 +- 7 files changed, 14 insertions(+), 14 deletions(-) diff --git a/include/xrpl/beast/hash/uhash.h b/include/xrpl/beast/hash/uhash.h index ab3eaad039..ac4ba7256d 100644 --- a/include/xrpl/beast/hash/uhash.h +++ b/include/xrpl/beast/hash/uhash.h @@ -30,7 +30,7 @@ namespace beast { template struct uhash { - explicit uhash() = default; + uhash() = default; using result_type = typename Hasher::result_type; diff --git a/include/xrpl/beast/net/IPEndpoint.h b/include/xrpl/beast/net/IPEndpoint.h index e66e7f4caa..345ba4b8da 100644 --- a/include/xrpl/beast/net/IPEndpoint.h +++ b/include/xrpl/beast/net/IPEndpoint.h @@ -215,7 +215,7 @@ namespace std { template <> struct hash<::beast::IP::Endpoint> { - explicit hash() = default; + hash() = default; std::size_t operator()(::beast::IP::Endpoint const& endpoint) const @@ -230,7 +230,7 @@ namespace boost { template <> struct hash<::beast::IP::Endpoint> { - explicit hash() = default; + hash() = default; std::size_t operator()(::beast::IP::Endpoint const& endpoint) const diff --git a/include/xrpl/protocol/AccountID.h b/include/xrpl/protocol/AccountID.h index 2677dd76bc..295cf41e4f 100644 --- a/include/xrpl/protocol/AccountID.h +++ b/include/xrpl/protocol/AccountID.h @@ -149,7 +149,7 @@ namespace std { template <> struct hash : ripple::AccountID::hasher { - explicit hash() = default; + hash() = default; }; } // namespace std diff --git a/include/xrpl/protocol/Book.h b/include/xrpl/protocol/Book.h index 0a04deb277..0fcff0df80 100644 --- a/include/xrpl/protocol/Book.h +++ b/include/xrpl/protocol/Book.h @@ -104,7 +104,7 @@ private: boost::base_from_member, 1>; public: - explicit hash() = default; + hash() = default; using value_type = std::size_t; using argument_type = ripple::Issue; @@ -131,7 +131,7 @@ private: hasher m_hasher; public: - explicit hash() = default; + hash() = default; using value_type = std::size_t; using argument_type = ripple::Book; @@ -154,7 +154,7 @@ namespace boost { template <> struct hash : std::hash { - explicit hash() = default; + hash() = default; using Base = std::hash; // VFALCO NOTE broken in vs2012 @@ -164,7 +164,7 @@ struct hash : std::hash template <> struct hash : std::hash { - explicit hash() = default; + hash() = default; using Base = std::hash; // VFALCO NOTE broken in vs2012 diff --git a/include/xrpl/protocol/UintTypes.h b/include/xrpl/protocol/UintTypes.h index 9a7284158e..d6cdc9350e 100644 --- a/include/xrpl/protocol/UintTypes.h +++ b/include/xrpl/protocol/UintTypes.h @@ -119,25 +119,25 @@ namespace std { template <> struct hash : ripple::Currency::hasher { - explicit hash() = default; + hash() = default; }; template <> struct hash : ripple::NodeID::hasher { - explicit hash() = default; + hash() = default; }; template <> struct hash : ripple::Directory::hasher { - explicit hash() = default; + hash() = default; }; template <> struct hash : ripple::uint256::hasher { - explicit hash() = default; + hash() = default; }; } // namespace std diff --git a/include/xrpl/resource/detail/Key.h b/include/xrpl/resource/detail/Key.h index f953d5103e..188ee142da 100644 --- a/include/xrpl/resource/detail/Key.h +++ b/include/xrpl/resource/detail/Key.h @@ -53,7 +53,7 @@ struct Key struct key_equal { - explicit key_equal() = default; + key_equal() = default; bool operator()(Key const& lhs, Key const& rhs) const diff --git a/src/xrpld/app/paths/RippleLineCache.h b/src/xrpld/app/paths/RippleLineCache.h index 5a3188c810..6196211a70 100644 --- a/src/xrpld/app/paths/RippleLineCache.h +++ b/src/xrpld/app/paths/RippleLineCache.h @@ -104,7 +104,7 @@ private: struct Hash { - explicit Hash() = default; + Hash() = default; std::size_t operator()(AccountKey const& key) const noexcept From 7713ff8c5c18f9f814e0cce61772af972333bf8f Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Thu, 22 May 2025 15:43:41 +0100 Subject: [PATCH 030/244] Add codecov badge, raise .codecov.yml thresholds (#5428) --- .codecov.yml | 4 ++-- README.md | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.codecov.yml b/.codecov.yml index 6df3786197..b97039e8b6 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -7,13 +7,13 @@ comment: show_carryforward_flags: false coverage: - range: "60..80" + range: "70..85" precision: 1 round: nearest status: project: default: - target: 60% + target: 75% threshold: 2% patch: default: diff --git a/README.md b/README.md index cc002a2dd8..0315c37428 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,5 @@ +[![codecov](https://codecov.io/gh/XRPLF/rippled/graph/badge.svg?token=WyFr5ajq3O)](https://codecov.io/gh/XRPLF/rippled) + # The XRP Ledger The [XRP Ledger](https://xrpl.org/) is a decentralized cryptographic ledger powered by a network of peer-to-peer nodes. The XRP Ledger uses a novel Byzantine Fault Tolerant consensus algorithm to settle and record transactions in a secure distributed database without a central operator. From 40ce8a883343bdc20e767998cee7f3b02421047c Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Fri, 23 May 2025 15:05:36 +0100 Subject: [PATCH 031/244] fix: Fix pseudo-account ID calculation (#5447) Before #5224, the pseudoaccount ID was calculated using prefix expressed in `std::uint16_t`. The refactoring to move the pseudoaccount ID calculation to View.cpp had accidentally changed the prefix type to `int` (derived from `auto i = 0`) which in turn changed the length of the input to `sha512Half` from 2 bytes to 4, altering the result. This resulted in a different ID of the pseudoaccount calculated from the function after the refactoring, breaking the ledger. This impacts AMMCreate, even when the `SingleAssetVault` amendment is not active. This change restores the prefix type to `std::uint16_t`. --- src/xrpld/ledger/detail/View.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/xrpld/ledger/detail/View.cpp b/src/xrpld/ledger/detail/View.cpp index d248d37e18..aa6e2dda8f 100644 --- a/src/xrpld/ledger/detail/View.cpp +++ b/src/xrpld/ledger/detail/View.cpp @@ -1056,8 +1056,8 @@ AccountID pseudoAccountAddress(ReadView const& view, uint256 const& pseudoOwnerKey) { // This number must not be changed without an amendment - constexpr int maxAccountAttempts = 256; - for (auto i = 0; i < maxAccountAttempts; ++i) + constexpr std::uint16_t maxAccountAttempts = 256; + for (std::uint16_t i = 0; i < maxAccountAttempts; ++i) { ripesha_hasher rsh; auto const hash = sha512Half(i, view.info().parentHash, pseudoOwnerKey); From 2a61aee5620725f8249e3066616ef6e20c99c857 Mon Sep 17 00:00:00 2001 From: Denis Angell Date: Fri, 23 May 2025 21:53:53 +0200 Subject: [PATCH 032/244] Add Batch feature (XLS-56) (#5060) - Specification: [XRPLF/XRPL-Standards 56](https://github.com/XRPLF/XRPL-Standards/blob/master/XLS-0056d-batch/README.md) - Amendment: `Batch` - Implements execution of multiple transactions within a single batch transaction with four execution modes: `tfAllOrNothing`, `tfOnlyOne`, `tfUntilFailure`, and `tfIndependent`. - Enables atomic multi-party transactions where multiple accounts can participate in a single batch, with up to 8 inner transactions and 8 batch signers per batch transaction. - Inner transactions use `tfInnerBatchTxn` flag with zero fees, no signature, and empty signing public key. - Inner transactions are applied after the outer batch succeeds via the `applyBatchTransactions` function in apply.cpp. - Network layer prevents relay of transactions with `tfInnerBatchTxn` flag - each peer applies inner transactions locally from the batch. - Batch transactions are excluded from AccountDelegate permissions but inner transactions retain full delegation support. - Metadata includes `ParentBatchID` linking inner transactions to their containing batch for traceability and auditing. - Extended STTx with batch-specific signature verification methods and added protocol structures (`sfRawTransactions`, `sfBatchSigners`). --- include/xrpl/protocol/Batch.h | 37 + include/xrpl/protocol/HashPrefix.h | 3 + include/xrpl/protocol/Protocol.h | 3 + include/xrpl/protocol/STTx.h | 21 + include/xrpl/protocol/TER.h | 24 +- include/xrpl/protocol/TxFlags.h | 18 +- include/xrpl/protocol/TxMeta.h | 27 +- include/xrpl/protocol/detail/features.macro | 1 + include/xrpl/protocol/detail/sfields.macro | 5 + .../xrpl/protocol/detail/transactions.macro | 7 +- include/xrpl/protocol/jss.h | 2 + src/libxrpl/protocol/InnerObjectFormats.cpp | 7 + src/libxrpl/protocol/STTx.cpp | 253 +- src/libxrpl/protocol/TER.cpp | 1 + src/libxrpl/protocol/TxMeta.cpp | 16 +- src/test/app/AccountDelete_test.cpp | 29 - src/test/app/Batch_test.cpp | 3860 +++++++++++++++++ src/test/app/Delegate_test.cpp | 1 + src/test/app/MultiSign_test.cpp | 28 +- src/test/app/TxQ_test.cpp | 760 ++-- src/test/jtx.h | 2 + src/test/jtx/SignerUtils.h | 56 + src/test/jtx/TestHelpers.h | 106 +- src/test/jtx/acctdelete.h | 11 + src/test/jtx/batch.h | 169 + src/test/jtx/impl/AMM.cpp | 1 - src/test/jtx/impl/TestHelpers.cpp | 6 - src/test/jtx/impl/acctdelete.cpp | 23 + src/test/jtx/impl/batch.cpp | 154 + src/test/jtx/impl/check.cpp | 3 - src/test/jtx/impl/creds.cpp | 5 - src/test/jtx/impl/dids.cpp | 3 - src/test/jtx/impl/ledgerStateFixes.cpp | 1 - src/test/jtx/impl/multisign.cpp | 11 - src/test/jtx/impl/pay.cpp | 2 +- src/test/jtx/impl/txflags.cpp | 2 +- src/test/jtx/impl/xchain_bridge.cpp | 8 - src/test/jtx/multisign.h | 42 +- src/test/rpc/AccountLines_test.cpp | 2 - src/test/rpc/AccountObjects_test.cpp | 3 - src/test/rpc/AccountTx_test.cpp | 5 - src/test/rpc/JSONRPC_test.cpp | 122 + src/test/rpc/LedgerData_test.cpp | 2 - src/test/rpc/LedgerEntry_test.cpp | 1 - src/test/rpc/Simulate_test.cpp | 31 + src/xrpld/app/ledger/detail/BuildLedger.cpp | 14 +- src/xrpld/app/ledger/detail/OpenLedger.cpp | 14 + src/xrpld/app/misc/NetworkOPs.cpp | 49 +- src/xrpld/app/misc/detail/TxQ.cpp | 14 +- src/xrpld/app/tx/applySteps.h | 18 + src/xrpld/app/tx/detail/ApplyContext.cpp | 8 +- src/xrpld/app/tx/detail/ApplyContext.h | 28 +- src/xrpld/app/tx/detail/Batch.cpp | 482 ++ src/xrpld/app/tx/detail/Batch.h | 55 + src/xrpld/app/tx/detail/SetAccount.cpp | 2 +- src/xrpld/app/tx/detail/Transactor.cpp | 218 +- src/xrpld/app/tx/detail/Transactor.h | 73 +- src/xrpld/app/tx/detail/apply.cpp | 134 +- src/xrpld/app/tx/detail/applySteps.cpp | 71 +- src/xrpld/ledger/ApplyView.h | 3 + src/xrpld/ledger/ApplyViewImpl.h | 1 + src/xrpld/ledger/OpenView.h | 30 +- src/xrpld/ledger/detail/ApplyStateTable.cpp | 5 +- src/xrpld/ledger/detail/ApplyStateTable.h | 1 + src/xrpld/ledger/detail/ApplyViewImpl.cpp | 3 +- src/xrpld/ledger/detail/OpenView.cpp | 6 +- src/xrpld/overlay/detail/PeerImp.cpp | 25 + src/xrpld/rpc/detail/TransactionSign.cpp | 11 +- src/xrpld/rpc/handlers/Simulate.cpp | 5 + 69 files changed, 6400 insertions(+), 744 deletions(-) create mode 100644 include/xrpl/protocol/Batch.h create mode 100644 src/test/app/Batch_test.cpp create mode 100644 src/test/jtx/SignerUtils.h create mode 100644 src/test/jtx/batch.h create mode 100644 src/test/jtx/impl/batch.cpp create mode 100644 src/xrpld/app/tx/detail/Batch.cpp create mode 100644 src/xrpld/app/tx/detail/Batch.h diff --git a/include/xrpl/protocol/Batch.h b/include/xrpl/protocol/Batch.h new file mode 100644 index 0000000000..1388bbd2f1 --- /dev/null +++ b/include/xrpl/protocol/Batch.h @@ -0,0 +1,37 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2024 Ripple Labs Inc. + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include + +namespace ripple { + +inline void +serializeBatch( + Serializer& msg, + std::uint32_t const& flags, + std::vector const& txids) +{ + msg.add32(HashPrefix::batch); + msg.add32(flags); + msg.add32(std::uint32_t(txids.size())); + for (auto const& txid : txids) + msg.addBitString(txid); +} + +} // namespace ripple \ No newline at end of file diff --git a/include/xrpl/protocol/HashPrefix.h b/include/xrpl/protocol/HashPrefix.h index ab825658e8..7e486af4c0 100644 --- a/include/xrpl/protocol/HashPrefix.h +++ b/include/xrpl/protocol/HashPrefix.h @@ -88,6 +88,9 @@ enum class HashPrefix : std::uint32_t { /** Credentials signature */ credential = detail::make_hash_prefix('C', 'R', 'D'), + + /** Batch */ + batch = detail::make_hash_prefix('B', 'C', 'H'), }; template diff --git a/include/xrpl/protocol/Protocol.h b/include/xrpl/protocol/Protocol.h index 49bad8a076..898fd06fbd 100644 --- a/include/xrpl/protocol/Protocol.h +++ b/include/xrpl/protocol/Protocol.h @@ -169,6 +169,9 @@ std::size_t constexpr maxTrim = 25; */ std::size_t constexpr permissionMaxSize = 10; +/** The maximum number of transactions that can be in a batch. */ +std::size_t constexpr maxBatchTxCount = 8; + } // namespace ripple #endif diff --git a/include/xrpl/protocol/STTx.h b/include/xrpl/protocol/STTx.h index b00495bf76..f0d2157283 100644 --- a/include/xrpl/protocol/STTx.h +++ b/include/xrpl/protocol/STTx.h @@ -125,10 +125,16 @@ public: @return `true` if valid signature. If invalid, the error message string. */ enum class RequireFullyCanonicalSig : bool { no, yes }; + Expected checkSign(RequireFullyCanonicalSig requireCanonicalSig, Rules const& rules) const; + Expected + checkBatchSign( + RequireFullyCanonicalSig requireCanonicalSig, + Rules const& rules) const; + // SQL Functions with metadata. static std::string const& getMetaSQLInsertReplaceHeader(); @@ -144,6 +150,9 @@ public: char status, std::string const& escapedMetaData) const; + std::vector + getBatchTransactionIDs() const; + private: Expected checkSingleSign(RequireFullyCanonicalSig requireCanonicalSig) const; @@ -153,12 +162,24 @@ private: RequireFullyCanonicalSig requireCanonicalSig, Rules const& rules) const; + Expected + checkBatchSingleSign( + STObject const& batchSigner, + RequireFullyCanonicalSig requireCanonicalSig) const; + + Expected + checkBatchMultiSign( + STObject const& batchSigner, + RequireFullyCanonicalSig requireCanonicalSig, + Rules const& rules) const; + STBase* copy(std::size_t n, void* buf) const override; STBase* move(std::size_t n, void* buf) override; friend class detail::STVar; + mutable std::vector batch_txn_ids_; }; bool diff --git a/include/xrpl/protocol/TER.h b/include/xrpl/protocol/TER.h index b87bc3f8a4..4483d6251a 100644 --- a/include/xrpl/protocol/TER.h +++ b/include/xrpl/protocol/TER.h @@ -139,8 +139,8 @@ enum TEMcodes : TERUnderlyingType { temARRAY_EMPTY, temARRAY_TOO_LARGE, - temBAD_TRANSFER_FEE, + temINVALID_INNER_BATCH, }; //------------------------------------------------------------------------------ @@ -645,37 +645,37 @@ using TER = TERSubset; //------------------------------------------------------------------------------ inline bool -isTelLocal(TER x) +isTelLocal(TER x) noexcept { - return ((x) >= telLOCAL_ERROR && (x) < temMALFORMED); + return (x >= telLOCAL_ERROR && x < temMALFORMED); } inline bool -isTemMalformed(TER x) +isTemMalformed(TER x) noexcept { - return ((x) >= temMALFORMED && (x) < tefFAILURE); + return (x >= temMALFORMED && x < tefFAILURE); } inline bool -isTefFailure(TER x) +isTefFailure(TER x) noexcept { - return ((x) >= tefFAILURE && (x) < terRETRY); + return (x >= tefFAILURE && x < terRETRY); } inline bool -isTerRetry(TER x) +isTerRetry(TER x) noexcept { - return ((x) >= terRETRY && (x) < tesSUCCESS); + return (x >= terRETRY && x < tesSUCCESS); } inline bool -isTesSuccess(TER x) +isTesSuccess(TER x) noexcept { - return ((x) == tesSUCCESS); + return (x == tesSUCCESS); } inline bool -isTecClaim(TER x) +isTecClaim(TER x) noexcept { return ((x) >= tecCLAIM); } diff --git a/include/xrpl/protocol/TxFlags.h b/include/xrpl/protocol/TxFlags.h index 505000cfd6..31c3ffa205 100644 --- a/include/xrpl/protocol/TxFlags.h +++ b/include/xrpl/protocol/TxFlags.h @@ -58,7 +58,8 @@ namespace ripple { // clang-format off // Universal Transaction flags: constexpr std::uint32_t tfFullyCanonicalSig = 0x80000000; -constexpr std::uint32_t tfUniversal = tfFullyCanonicalSig; +constexpr std::uint32_t tfInnerBatchTxn = 0x40000000; +constexpr std::uint32_t tfUniversal = tfFullyCanonicalSig | tfInnerBatchTxn; constexpr std::uint32_t tfUniversalMask = ~tfUniversal; // AccountSet flags: @@ -97,6 +98,7 @@ constexpr std::uint32_t tfPassive = 0x00010000; constexpr std::uint32_t tfImmediateOrCancel = 0x00020000; constexpr std::uint32_t tfFillOrKill = 0x00040000; constexpr std::uint32_t tfSell = 0x00080000; + constexpr std::uint32_t tfOfferCreateMask = ~(tfUniversal | tfPassive | tfImmediateOrCancel | tfFillOrKill | tfSell); @@ -239,6 +241,20 @@ constexpr std::uint32_t const tfVaultPrivate = 0x00010000; static_assert(tfVaultPrivate == lsfVaultPrivate); constexpr std::uint32_t const tfVaultShareNonTransferable = 0x00020000; constexpr std::uint32_t const tfVaultCreateMask = ~(tfUniversal | tfVaultPrivate | tfVaultShareNonTransferable); + +// Batch Flags: +constexpr std::uint32_t tfAllOrNothing = 0x00010000; +constexpr std::uint32_t tfOnlyOne = 0x00020000; +constexpr std::uint32_t tfUntilFailure = 0x00040000; +constexpr std::uint32_t tfIndependent = 0x00080000; +/** + * @note If nested Batch transactions are supported in the future, the tfInnerBatchTxn flag + * will need to be removed from this mask to allow Batch transaction to be inside + * the sfRawTransactions array. + */ +constexpr std::uint32_t const tfBatchMask = + ~(tfUniversal | tfAllOrNothing | tfOnlyOne | tfUntilFailure | tfIndependent) | tfInnerBatchTxn; + // clang-format on } // namespace ripple diff --git a/include/xrpl/protocol/TxMeta.h b/include/xrpl/protocol/TxMeta.h index 9422d697ca..02fde2ffe5 100644 --- a/include/xrpl/protocol/TxMeta.h +++ b/include/xrpl/protocol/TxMeta.h @@ -46,7 +46,10 @@ private: CtorHelper); public: - TxMeta(uint256 const& transactionID, std::uint32_t ledger); + TxMeta( + uint256 const& transactionID, + std::uint32_t ledger, + std::optional parentBatchId = std::nullopt); TxMeta(uint256 const& txID, std::uint32_t ledger, Blob const&); TxMeta(uint256 const& txID, std::uint32_t ledger, std::string const&); TxMeta(uint256 const& txID, std::uint32_t ledger, STObject const&); @@ -130,6 +133,27 @@ public: return static_cast(mDelivered); } + void + setParentBatchId(uint256 const& parentBatchId) + { + mParentBatchId = parentBatchId; + } + + uint256 + getParentBatchId() const + { + XRPL_ASSERT( + hasParentBatchId(), + "ripple::TxMeta::getParentBatchId : non-null batch id"); + return *mParentBatchId; + } + + bool + hasParentBatchId() const + { + return static_cast(mParentBatchId); + } + private: uint256 mTransactionID; std::uint32_t mLedger; @@ -137,6 +161,7 @@ private: int mResult; std::optional mDelivered; + std::optional mParentBatchId; STArray mNodes; }; diff --git a/include/xrpl/protocol/detail/features.macro b/include/xrpl/protocol/detail/features.macro index 3be0fd426c..e61d3a8005 100644 --- a/include/xrpl/protocol/detail/features.macro +++ b/include/xrpl/protocol/detail/features.macro @@ -32,6 +32,7 @@ // If you add an amendment here, then do not forget to increment `numFeatures` // in include/xrpl/protocol/Feature.h. +XRPL_FEATURE(Batch, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(SingleAssetVault, Supported::no, VoteBehavior::DefaultNo) XRPL_FEATURE(PermissionDelegation, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (PayChanCancelAfter, Supported::yes, VoteBehavior::DefaultNo) diff --git a/include/xrpl/protocol/detail/sfields.macro b/include/xrpl/protocol/detail/sfields.macro index 63bc52de6a..dbef597ea0 100644 --- a/include/xrpl/protocol/detail/sfields.macro +++ b/include/xrpl/protocol/detail/sfields.macro @@ -195,6 +195,7 @@ TYPED_SFIELD(sfHookNamespace, UINT256, 32) TYPED_SFIELD(sfHookSetTxnID, UINT256, 33) TYPED_SFIELD(sfDomainID, UINT256, 34) TYPED_SFIELD(sfVaultID, UINT256, 35) +TYPED_SFIELD(sfParentBatchID, UINT256, 36) // number (common) TYPED_SFIELD(sfNumber, NUMBER, 1) @@ -357,6 +358,8 @@ UNTYPED_SFIELD(sfXChainClaimAttestationCollectionElement, OBJECT, 30) UNTYPED_SFIELD(sfXChainCreateAccountAttestationCollectionElement, OBJECT, 31) UNTYPED_SFIELD(sfPriceData, OBJECT, 32) UNTYPED_SFIELD(sfCredential, OBJECT, 33) +UNTYPED_SFIELD(sfRawTransaction, OBJECT, 34) +UNTYPED_SFIELD(sfBatchSigner, OBJECT, 35) // array of objects (common) // ARRAY/1 is reserved for end of array @@ -388,3 +391,5 @@ UNTYPED_SFIELD(sfAuthorizeCredentials, ARRAY, 26) UNTYPED_SFIELD(sfUnauthorizeCredentials, ARRAY, 27) UNTYPED_SFIELD(sfAcceptedCredentials, ARRAY, 28) UNTYPED_SFIELD(sfPermissions, ARRAY, 29) +UNTYPED_SFIELD(sfRawTransactions, ARRAY, 30) +UNTYPED_SFIELD(sfBatchSigners, ARRAY, 31, SField::sMD_Default, SField::notSigning) diff --git a/include/xrpl/protocol/detail/transactions.macro b/include/xrpl/protocol/detail/transactions.macro index 0f614df692..5d5faae505 100644 --- a/include/xrpl/protocol/detail/transactions.macro +++ b/include/xrpl/protocol/detail/transactions.macro @@ -514,6 +514,12 @@ TRANSACTION(ttVAULT_CLAWBACK, 70, VaultClawback, Delegation::delegatable, ({ {sfAmount, soeOPTIONAL, soeMPTSupported}, })) +/** This transaction type batches together transactions. */ +TRANSACTION(ttBATCH, 71, Batch, Delegation::notDelegatable, ({ + {sfRawTransactions, soeREQUIRED}, + {sfBatchSigners, soeOPTIONAL}, +})) + /** This system-generated transaction type is used to update the status of the various amendments. For details, see: https://xrpl.org/amendments.html @@ -548,4 +554,3 @@ TRANSACTION(ttUNL_MODIFY, 102, UNLModify, Delegation::notDelegatable, ({ {sfLedgerSequence, soeREQUIRED}, {sfUNLModifyValidator, soeREQUIRED}, })) - diff --git a/include/xrpl/protocol/jss.h b/include/xrpl/protocol/jss.h index de3560d7f9..9dff4cc4f3 100644 --- a/include/xrpl/protocol/jss.h +++ b/include/xrpl/protocol/jss.h @@ -83,6 +83,8 @@ JSS(PriceDataSeries); // field. JSS(PriceData); // field. JSS(Provider); // field. JSS(QuoteAsset); // in: Oracle. +JSS(RawTransaction); // in: Batch +JSS(RawTransactions); // in: Batch JSS(SLE_hit_rate); // out: GetCounts. JSS(Scale); // field. JSS(SettleDelay); // in: TransactionSign diff --git a/src/libxrpl/protocol/InnerObjectFormats.cpp b/src/libxrpl/protocol/InnerObjectFormats.cpp index ecfca9743d..3f3b1e00c0 100644 --- a/src/libxrpl/protocol/InnerObjectFormats.cpp +++ b/src/libxrpl/protocol/InnerObjectFormats.cpp @@ -158,6 +158,13 @@ InnerObjectFormats::InnerObjectFormats() add(sfPermission.jsonName.c_str(), sfPermission.getCode(), {{sfPermissionValue, soeREQUIRED}}); + + add(sfBatchSigner.jsonName.c_str(), + sfBatchSigner.getCode(), + {{sfAccount, soeREQUIRED}, + {sfSigningPubKey, soeOPTIONAL}, + {sfTxnSignature, soeOPTIONAL}, + {sfSigners, soeOPTIONAL}}); } InnerObjectFormats const& diff --git a/src/libxrpl/protocol/STTx.cpp b/src/libxrpl/protocol/STTx.cpp index 7b6b4c1ee2..ee26dd69de 100644 --- a/src/libxrpl/protocol/STTx.cpp +++ b/src/libxrpl/protocol/STTx.cpp @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -29,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -42,6 +44,7 @@ #include #include #include +#include #include #include #include @@ -262,6 +265,42 @@ STTx::checkSign( return Unexpected("Internal signature check failure."); } +Expected +STTx::checkBatchSign( + RequireFullyCanonicalSig requireCanonicalSig, + Rules const& rules) const +{ + try + { + XRPL_ASSERT( + getTxnType() == ttBATCH, + "STTx::checkBatchSign : not a batch transaction"); + if (getTxnType() != ttBATCH) + { + JLOG(debugLog().fatal()) << "not a batch transaction"; + return Unexpected("Not a batch transaction."); + } + STArray const& signers{getFieldArray(sfBatchSigners)}; + for (auto const& signer : signers) + { + Blob const& signingPubKey = signer.getFieldVL(sfSigningPubKey); + auto const result = signingPubKey.empty() + ? checkBatchMultiSign(signer, requireCanonicalSig, rules) + : checkBatchSingleSign(signer, requireCanonicalSig); + + if (!result) + return result; + } + return {}; + } + catch (std::exception const& e) + { + JLOG(debugLog().error()) + << "Batch signature check failed: " << e.what(); + } + return Unexpected("Internal batch signature check failure."); +} + Json::Value STTx::getJson(JsonOptions options) const { @@ -341,79 +380,90 @@ STTx::getMetaSQL( getFieldU32(sfSequence) % inLedger % status % rTxn % escapedMetaData); } -Expected -STTx::checkSingleSign(RequireFullyCanonicalSig requireCanonicalSig) const +static Expected +singleSignHelper( + STObject const& signer, + Slice const& data, + bool const fullyCanonical) { // We don't allow both a non-empty sfSigningPubKey and an sfSigners. // That would allow the transaction to be signed two ways. So if both // fields are present the signature is invalid. - if (isFieldPresent(sfSigners)) + if (signer.isFieldPresent(sfSigners)) return Unexpected("Cannot both single- and multi-sign."); bool validSig = false; try { - bool const fullyCanonical = (getFlags() & tfFullyCanonicalSig) || - (requireCanonicalSig == RequireFullyCanonicalSig::yes); - - auto const spk = getFieldVL(sfSigningPubKey); - + auto const spk = signer.getFieldVL(sfSigningPubKey); if (publicKeyType(makeSlice(spk))) { - Blob const signature = getFieldVL(sfTxnSignature); - Blob const data = getSigningData(*this); - + Blob const signature = signer.getFieldVL(sfTxnSignature); validSig = verify( PublicKey(makeSlice(spk)), - makeSlice(data), + data, makeSlice(signature), fullyCanonical); } } catch (std::exception const&) { - // Assume it was a signature failure. validSig = false; } - if (validSig == false) + + if (!validSig) return Unexpected("Invalid signature."); - // Signature was verified. + return {}; } Expected -STTx::checkMultiSign( - RequireFullyCanonicalSig requireCanonicalSig, - Rules const& rules) const +STTx::checkSingleSign(RequireFullyCanonicalSig requireCanonicalSig) const +{ + auto const data = getSigningData(*this); + bool const fullyCanonical = (getFlags() & tfFullyCanonicalSig) || + (requireCanonicalSig == STTx::RequireFullyCanonicalSig::yes); + return singleSignHelper(*this, makeSlice(data), fullyCanonical); +} + +Expected +STTx::checkBatchSingleSign( + STObject const& batchSigner, + RequireFullyCanonicalSig requireCanonicalSig) const +{ + Serializer msg; + serializeBatch(msg, getFlags(), getBatchTransactionIDs()); + bool const fullyCanonical = (getFlags() & tfFullyCanonicalSig) || + (requireCanonicalSig == STTx::RequireFullyCanonicalSig::yes); + return singleSignHelper(batchSigner, msg.slice(), fullyCanonical); +} + +Expected +multiSignHelper( + STObject const& signerObj, + bool const fullyCanonical, + std::function makeMsg, + Rules const& rules) { // Make sure the MultiSigners are present. Otherwise they are not // attempting multi-signing and we just have a bad SigningPubKey. - if (!isFieldPresent(sfSigners)) + if (!signerObj.isFieldPresent(sfSigners)) return Unexpected("Empty SigningPubKey."); // We don't allow both an sfSigners and an sfTxnSignature. Both fields // being present would indicate that the transaction is signed both ways. - if (isFieldPresent(sfTxnSignature)) + if (signerObj.isFieldPresent(sfTxnSignature)) return Unexpected("Cannot both single- and multi-sign."); - STArray const& signers{getFieldArray(sfSigners)}; + STArray const& signers{signerObj.getFieldArray(sfSigners)}; // There are well known bounds that the number of signers must be within. - if (signers.size() < minMultiSigners || - signers.size() > maxMultiSigners(&rules)) + if (signers.size() < STTx::minMultiSigners || + signers.size() > STTx::maxMultiSigners(&rules)) return Unexpected("Invalid Signers array size."); - // We can ease the computational load inside the loop a bit by - // pre-constructing part of the data that we hash. Fill a Serializer - // with the stuff that stays constant from signature to signature. - Serializer const dataStart{startMultiSigningData(*this)}; - // We also use the sfAccount field inside the loop. Get it once. - auto const txnAccountID = getAccountID(sfAccount); - - // Determine whether signatures must be full canonical. - bool const fullyCanonical = (getFlags() & tfFullyCanonicalSig) || - (requireCanonicalSig == RequireFullyCanonicalSig::yes); + auto const txnAccountID = signerObj.getAccountID(sfAccount); // Signers must be in sorted order by AccountID. AccountID lastAccountID(beast::zero); @@ -441,18 +491,13 @@ STTx::checkMultiSign( bool validSig = false; try { - Serializer s = dataStart; - finishMultiSigningData(accountID, s); - auto spk = signer.getFieldVL(sfSigningPubKey); - if (publicKeyType(makeSlice(spk))) { Blob const signature = signer.getFieldVL(sfTxnSignature); - validSig = verify( PublicKey(makeSlice(spk)), - s.slice(), + makeMsg(accountID).slice(), makeSlice(signature), fullyCanonical); } @@ -471,6 +516,90 @@ STTx::checkMultiSign( return {}; } +Expected +STTx::checkBatchMultiSign( + STObject const& batchSigner, + RequireFullyCanonicalSig requireCanonicalSig, + Rules const& rules) const +{ + bool const fullyCanonical = (getFlags() & tfFullyCanonicalSig) || + (requireCanonicalSig == RequireFullyCanonicalSig::yes); + + // We can ease the computational load inside the loop a bit by + // pre-constructing part of the data that we hash. Fill a Serializer + // with the stuff that stays constant from signature to signature. + Serializer dataStart; + serializeBatch(dataStart, getFlags(), getBatchTransactionIDs()); + return multiSignHelper( + batchSigner, + fullyCanonical, + [&dataStart](AccountID const& accountID) mutable -> Serializer { + Serializer s = dataStart; + finishMultiSigningData(accountID, s); + return s; + }, + rules); +} + +Expected +STTx::checkMultiSign( + RequireFullyCanonicalSig requireCanonicalSig, + Rules const& rules) const +{ + bool const fullyCanonical = (getFlags() & tfFullyCanonicalSig) || + (requireCanonicalSig == RequireFullyCanonicalSig::yes); + + // We can ease the computational load inside the loop a bit by + // pre-constructing part of the data that we hash. Fill a Serializer + // with the stuff that stays constant from signature to signature. + Serializer dataStart = startMultiSigningData(*this); + return multiSignHelper( + *this, + fullyCanonical, + [&dataStart](AccountID const& accountID) mutable -> Serializer { + Serializer s = dataStart; + finishMultiSigningData(accountID, s); + return s; + }, + rules); +} + +/** + * @brief Retrieves a batch of transaction IDs from the STTx. + * + * This function returns a vector of transaction IDs by extracting them from + * the field array `sfRawTransactions` within the STTx. If the batch + * transaction IDs have already been computed and cached in `batch_txn_ids_`, + * it returns the cached vector. Otherwise, it computes the transaction IDs, + * caches them, and then returns the vector. + * + * @return A vector of `uint256` containing the batch transaction IDs. + * + * @note The function asserts that the `sfRawTransactions` field array is not + * empty and that the size of the computed batch transaction IDs matches the + * size of the `sfRawTransactions` field array. + */ +std::vector +STTx::getBatchTransactionIDs() const +{ + XRPL_ASSERT( + getTxnType() == ttBATCH, + "STTx::getBatchTransactionIDs : not a batch transaction"); + XRPL_ASSERT( + getFieldArray(sfRawTransactions).size() != 0, + "STTx::getBatchTransactionIDs : empty raw transactions"); + if (batch_txn_ids_.size() != 0) + return batch_txn_ids_; + + for (STObject const& rb : getFieldArray(sfRawTransactions)) + batch_txn_ids_.push_back(rb.getHash(HashPrefix::transactionID)); + + XRPL_ASSERT( + batch_txn_ids_.size() == getFieldArray(sfRawTransactions).size(), + "STTx::getBatchTransactionIDs : batch transaction IDs size mismatch"); + return batch_txn_ids_; +} + //------------------------------------------------------------------------------ static bool @@ -606,6 +735,42 @@ invalidMPTAmountInTx(STObject const& tx) return false; } +static bool +isRawTransactionOkay(STObject const& st, std::string& reason) +{ + if (!st.isFieldPresent(sfRawTransactions)) + return true; + + if (st.isFieldPresent(sfBatchSigners) && + st.getFieldArray(sfBatchSigners).size() > maxBatchTxCount) + { + reason = "Batch Signers array exceeds max entries."; + return false; + } + + auto const& rawTxns = st.getFieldArray(sfRawTransactions); + if (rawTxns.size() > maxBatchTxCount) + { + reason = "Raw Transactions array exceeds max entries."; + return false; + } + for (STObject raw : rawTxns) + { + try + { + TxType const tt = + safe_cast(raw.getFieldU16(sfTransactionType)); + raw.applyTemplate(getTxFormat(tt)->getSOTemplate()); + } + catch (std::exception const& e) + { + reason = e.what(); + return false; + } + } + return true; +} + bool passesLocalChecks(STObject const& st, std::string& reason) { @@ -630,6 +795,9 @@ passesLocalChecks(STObject const& st, std::string& reason) return false; } + if (!isRawTransactionOkay(st, reason)) + return false; + return true; } @@ -645,10 +813,13 @@ sterilize(STTx const& stx) bool isPseudoTx(STObject const& tx) { - auto t = tx[~sfTransactionType]; + auto const t = tx[~sfTransactionType]; + if (!t) return false; - auto tt = safe_cast(*t); + + auto const tt = safe_cast(*t); + return tt == ttAMENDMENT || tt == ttFEE || tt == ttUNL_MODIFY; } diff --git a/src/libxrpl/protocol/TER.cpp b/src/libxrpl/protocol/TER.cpp index 943a0e601b..68125fab83 100644 --- a/src/libxrpl/protocol/TER.cpp +++ b/src/libxrpl/protocol/TER.cpp @@ -217,6 +217,7 @@ transResults() MAKE_ERROR(temARRAY_EMPTY, "Malformed: Array is empty."), MAKE_ERROR(temARRAY_TOO_LARGE, "Malformed: Array is too large."), MAKE_ERROR(temBAD_TRANSFER_FEE, "Malformed: Transfer fee is outside valid range."), + MAKE_ERROR(temINVALID_INNER_BATCH, "Malformed: Invalid inner batch transaction."), MAKE_ERROR(terRETRY, "Retry transaction."), MAKE_ERROR(terFUNDS_SPENT, "DEPRECATED."), diff --git a/src/libxrpl/protocol/TxMeta.cpp b/src/libxrpl/protocol/TxMeta.cpp index d9a9f0db87..2083fc8eaf 100644 --- a/src/libxrpl/protocol/TxMeta.cpp +++ b/src/libxrpl/protocol/TxMeta.cpp @@ -56,6 +56,9 @@ TxMeta::TxMeta( if (obj.isFieldPresent(sfDeliveredAmount)) setDeliveredAmount(obj.getFieldAmount(sfDeliveredAmount)); + + if (obj.isFieldPresent(sfParentBatchID)) + setParentBatchId(obj.getFieldH256(sfParentBatchID)); } TxMeta::TxMeta(uint256 const& txid, std::uint32_t ledger, STObject const& obj) @@ -76,6 +79,9 @@ TxMeta::TxMeta(uint256 const& txid, std::uint32_t ledger, STObject const& obj) if (obj.isFieldPresent(sfDeliveredAmount)) setDeliveredAmount(obj.getFieldAmount(sfDeliveredAmount)); + + if (obj.isFieldPresent(sfParentBatchID)) + setParentBatchId(obj.getFieldH256(sfParentBatchID)); } TxMeta::TxMeta(uint256 const& txid, std::uint32_t ledger, Blob const& vec) @@ -91,11 +97,15 @@ TxMeta::TxMeta( { } -TxMeta::TxMeta(uint256 const& transactionID, std::uint32_t ledger) +TxMeta::TxMeta( + uint256 const& transactionID, + std::uint32_t ledger, + std::optional parentBatchId) : mTransactionID(transactionID) , mLedger(ledger) , mIndex(static_cast(-1)) , mResult(255) + , mParentBatchId(parentBatchId) , mNodes(sfAffectedNodes) { mNodes.reserve(32); @@ -231,6 +241,10 @@ TxMeta::getAsObject() const metaData.emplace_back(mNodes); if (hasDeliveredAmount()) metaData.setFieldAmount(sfDeliveredAmount, getDeliveredAmount()); + + if (hasParentBatchId()) + metaData.setFieldH256(sfParentBatchID, getParentBatchId()); + return metaData; } diff --git a/src/test/app/AccountDelete_test.cpp b/src/test/app/AccountDelete_test.cpp index 4ae18d9d28..03283e4611 100644 --- a/src/test/app/AccountDelete_test.cpp +++ b/src/test/app/AccountDelete_test.cpp @@ -28,12 +28,6 @@ namespace test { class AccountDelete_test : public beast::unit_test::suite { private: - std::uint32_t - openLedgerSeq(jtx::Env& env) - { - return env.current()->seq(); - } - // Helper function that verifies the expected DeliveredAmount is present. // // NOTE: the function _infers_ the transaction to operate on by calling @@ -83,26 +77,6 @@ private: return jv; }; - // Close the ledger until the ledger sequence is large enough to close - // the account. If margin is specified, close the ledger so `margin` - // more closes are needed - void - incLgrSeqForAccDel( - jtx::Env& env, - jtx::Account const& acc, - std::uint32_t margin = 0) - { - int const delta = [&]() -> int { - if (env.seq(acc) + 255 > openLedgerSeq(env)) - return env.seq(acc) - openLedgerSeq(env) + 255 - margin; - return 0; - }(); - BEAST_EXPECT(margin == 0 || delta >= 0); - for (int i = 0; i < delta; ++i) - env.close(); - BEAST_EXPECT(openLedgerSeq(env) == env.seq(acc) + 255 - margin); - } - public: void testBasics() @@ -368,7 +342,6 @@ public: NetClock::time_point const& cancelAfter) { Json::Value jv; jv[jss::TransactionType] = jss::EscrowCreate; - jv[jss::Flags] = tfUniversal; jv[jss::Account] = account.human(); jv[jss::Destination] = to.human(); jv[jss::Amount] = amount.getJson(JsonOptions::none); @@ -398,7 +371,6 @@ public: [](Account const& account, Account const& from, std::uint32_t seq) { Json::Value jv; jv[jss::TransactionType] = jss::EscrowCancel; - jv[jss::Flags] = tfUniversal; jv[jss::Account] = account.human(); jv[sfOwner.jsonName] = from.human(); jv[sfOfferSequence.jsonName] = seq; @@ -536,7 +508,6 @@ public: auto payChanClaim = [&]() { Json::Value jv; jv[jss::TransactionType] = jss::PaymentChannelClaim; - jv[jss::Flags] = tfUniversal; jv[jss::Account] = alice.human(); jv[sfChannel.jsonName] = to_string(payChanKey.key); jv[sfBalance.jsonName] = diff --git a/src/test/app/Batch_test.cpp b/src/test/app/Batch_test.cpp new file mode 100644 index 0000000000..6874a42c9e --- /dev/null +++ b/src/test/app/Batch_test.cpp @@ -0,0 +1,3860 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2024 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace ripple { +namespace test { + +class Batch_test : public beast::unit_test::suite +{ + struct TestLedgerData + { + int index; + std::string txType; + std::string result; + std::string txHash; + std::optional batchID; + }; + + struct TestBatchData + { + std::string result; + std::string txHash; + }; + + Json::Value + getTxByIndex(Json::Value const& jrr, int const index) + { + for (auto const& txn : jrr[jss::result][jss::ledger][jss::transactions]) + { + if (txn[jss::metaData][sfTransactionIndex.jsonName] == index) + return txn; + } + return {}; + } + + Json::Value + getLastLedger(jtx::Env& env) + { + Json::Value params; + params[jss::ledger_index] = env.closed()->seq(); + params[jss::transactions] = true; + params[jss::expand] = true; + return env.rpc("json", "ledger", to_string(params)); + } + + void + validateInnerTxn( + jtx::Env& env, + std::string const& batchID, + TestLedgerData const& ledgerResult) + { + Json::Value const jrr = env.rpc("tx", ledgerResult.txHash)[jss::result]; + BEAST_EXPECT(jrr[sfTransactionType.jsonName] == ledgerResult.txType); + BEAST_EXPECT( + jrr[jss::meta][sfTransactionResult.jsonName] == + ledgerResult.result); + BEAST_EXPECT(jrr[jss::meta][sfParentBatchID.jsonName] == batchID); + } + + void + validateClosedLedger( + jtx::Env& env, + std::vector const& ledgerResults) + { + auto const jrr = getLastLedger(env); + auto const transactions = + jrr[jss::result][jss::ledger][jss::transactions]; + BEAST_EXPECT(transactions.size() == ledgerResults.size()); + for (TestLedgerData const& ledgerResult : ledgerResults) + { + auto const txn = getTxByIndex(jrr, ledgerResult.index); + BEAST_EXPECT(txn[jss::hash].asString() == ledgerResult.txHash); + BEAST_EXPECT(txn.isMember(jss::metaData)); + Json::Value const meta = txn[jss::metaData]; + BEAST_EXPECT( + txn[sfTransactionType.jsonName] == ledgerResult.txType); + BEAST_EXPECT( + meta[sfTransactionResult.jsonName] == ledgerResult.result); + if (ledgerResult.batchID) + validateInnerTxn(env, *ledgerResult.batchID, ledgerResult); + } + } + + template + std::pair, std::string> + submitBatch(jtx::Env& env, TER const& result, Args&&... args) + { + auto batchTxn = env.jt(std::forward(args)...); + env(batchTxn, jtx::ter(result)); + + auto const ids = batchTxn.stx->getBatchTransactionIDs(); + std::vector txIDs; + for (auto const& id : ids) + txIDs.push_back(strHex(id)); + TxID const batchID = batchTxn.stx->getTransactionID(); + return std::make_pair(txIDs, strHex(batchID)); + } + + static uint256 + getCheckIndex(AccountID const& account, std::uint32_t uSequence) + { + return keylet::check(account, uSequence).key; + } + + static std::unique_ptr + makeSmallQueueConfig( + std::map extraTxQ = {}, + std::map extraVoting = {}) + { + auto p = test::jtx::envconfig(); + auto& section = p->section("transaction_queue"); + section.set("ledgers_in_queue", "2"); + section.set("minimum_queue_size", "2"); + section.set("min_ledgers_to_compute_size_limit", "3"); + section.set("max_ledger_counts_to_store", "100"); + section.set("retry_sequence_percent", "25"); + section.set("normal_consensus_increase_percent", "0"); + + for (auto const& [k, v] : extraTxQ) + section.set(k, v); + + return p; + } + + auto + openLedgerFee(jtx::Env& env, XRPAmount const& batchFee) + { + using namespace jtx; + + auto const& view = *env.current(); + auto metrics = env.app().getTxQ().getMetrics(view); + return toDrops(metrics.openLedgerFeeLevel, batchFee) + 1; + } + + void + testEnable(FeatureBitset features) + { + testcase("enabled"); + + using namespace test::jtx; + using namespace std::literals; + + for (bool const withBatch : {true, false}) + { + auto const amend = withBatch ? features : features - featureBatch; + test::jtx::Env env{*this, envconfig(), amend}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const carol = Account("carol"); + env.fund(XRP(10000), alice, bob, carol); + env.close(); + + // ttBatch + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 2); + auto const txResult = + withBatch ? ter(tesSUCCESS) : ter(temDISABLED); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + batch::inner(pay(alice, bob, XRP(1)), seq + 2), + txResult); + env.close(); + } + + // tfInnerBatchTxn + // If the feature is disabled, the transaction fails with + // temINVALID_FLAG If the feature is enabled, the transaction fails + // early in checkValidity() + { + auto const txResult = + withBatch ? ter(telENV_RPC_FAILED) : ter(temINVALID_FLAG); + env(pay(alice, bob, XRP(1)), + txflags(tfInnerBatchTxn), + txResult); + env.close(); + } + + env.close(); + } + } + + void + testPreflight(FeatureBitset features) + { + testcase("preflight"); + + using namespace test::jtx; + using namespace std::literals; + + //---------------------------------------------------------------------- + // preflight + + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const carol = Account("carol"); + env.fund(XRP(10000), alice, bob, carol); + env.close(); + + // temBAD_FEE: preflight1 + { + env(batch::outer(alice, env.seq(alice), XRP(-1), tfAllOrNothing), + ter(temBAD_FEE)); + env.close(); + } + + // DEFENSIVE: temINVALID_FLAG: Batch: inner batch flag. + // ACTUAL: telENV_RPC_FAILED: checkValidity() + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 0); + env(batch::outer(alice, seq, batchFee, tfInnerBatchTxn), + ter(telENV_RPC_FAILED)); + env.close(); + } + + // temINVALID_FLAG: Batch: invalid flags. + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 0); + env(batch::outer(alice, seq, batchFee, tfDisallowXRP), + ter(temINVALID_FLAG)); + env.close(); + } + + // temINVALID_FLAG: Batch: too many flags. + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 0); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + txflags(tfAllOrNothing | tfOnlyOne), + ter(temINVALID_FLAG)); + env.close(); + } + + // temARRAY_EMPTY: Batch: txns array must have at least 2 entries. + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 0); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + ter(temARRAY_EMPTY)); + env.close(); + } + + // temARRAY_EMPTY: Batch: txns array must have at least 2 entries. + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 0); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + ter(temARRAY_EMPTY)); + env.close(); + } + + // DEFENSIVE: temARRAY_TOO_LARGE: Batch: txns array exceeds 8 entries. + // ACTUAL: telENV_RPC_FAILED: isRawTransactionOkay() + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 9); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + batch::inner(pay(alice, bob, XRP(1)), seq + 2), + batch::inner(pay(alice, bob, XRP(1)), seq + 3), + batch::inner(pay(alice, bob, XRP(1)), seq + 4), + batch::inner(pay(alice, bob, XRP(1)), seq + 5), + batch::inner(pay(alice, bob, XRP(1)), seq + 6), + batch::inner(pay(alice, bob, XRP(1)), seq + 7), + batch::inner(pay(alice, bob, XRP(1)), seq + 8), + batch::inner(pay(alice, bob, XRP(1)), seq + 9), + ter(telENV_RPC_FAILED)); + env.close(); + } + + // temREDUNDANT: Batch: duplicate Txn found. + { + auto const batchFee = batch::calcBatchFee(env, 1, 2); + auto const seq = env.seq(alice); + auto jt = env.jtnofill( + batch::outer(alice, env.seq(alice), batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), seq + 1), + batch::inner(pay(alice, bob, XRP(10)), seq + 1)); + + env(jt.jv, batch::sig(bob), ter(temREDUNDANT)); + env.close(); + } + + // temINVALID: Batch: batch cannot have inner batch txn. + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 2); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner( + batch::outer(alice, seq, batchFee, tfAllOrNothing), seq), + batch::inner(pay(alice, bob, XRP(1)), seq + 2), + ter(temINVALID)); + env.close(); + } + + // temINVALID_FLAG: Batch: inner txn must have the + // tfInnerBatchTxn flag. + { + auto const batchFee = batch::calcBatchFee(env, 1, 2); + auto const seq = env.seq(alice); + auto tx1 = batch::inner(pay(alice, bob, XRP(10)), seq + 1); + tx1[jss::Flags] = 0; + auto jt = env.jtnofill( + batch::outer(alice, seq, batchFee, tfAllOrNothing), + tx1, + batch::inner(pay(alice, bob, XRP(10)), seq + 2)); + + env(jt.jv, batch::sig(bob), ter(temINVALID_FLAG)); + env.close(); + } + + // temBAD_SIGNATURE: Batch: inner txn cannot include TxnSignature. + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 2); + auto jt = env.jt(pay(alice, bob, XRP(1))); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(jt.jv, seq + 1), + batch::inner(pay(alice, bob, XRP(1)), seq + 2), + ter(temBAD_SIGNATURE)); + env.close(); + } + + // temBAD_SIGNER: Batch: inner txn cannot include Signers. + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 2); + auto tx1 = pay(alice, bob, XRP(1)); + tx1[sfSigners.jsonName] = Json::arrayValue; + tx1[sfSigners.jsonName][0U][sfSigner.jsonName] = Json::objectValue; + tx1[sfSigners.jsonName][0U][sfSigner.jsonName][sfAccount.jsonName] = + alice.human(); + tx1[sfSigners.jsonName][0U][sfSigner.jsonName] + [sfSigningPubKey.jsonName] = strHex(alice.pk()); + tx1[sfSigners.jsonName][0U][sfSigner.jsonName] + [sfTxnSignature.jsonName] = "DEADBEEF"; + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(tx1, seq + 1), + batch::inner(pay(alice, bob, XRP(1)), seq + 2), + ter(temBAD_SIGNER)); + env.close(); + } + + // temBAD_REGKEY: Batch: inner txn must include empty + // SigningPubKey. + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 2); + auto tx1 = batch::inner(pay(alice, bob, XRP(1)), seq + 1); + tx1[jss::SigningPubKey] = strHex(alice.pk()); + auto jt = env.jtnofill( + batch::outer(alice, seq, batchFee, tfAllOrNothing), + tx1, + batch::inner(pay(alice, bob, XRP(1)), seq + 2)); + + env(jt.jv, ter(temBAD_REGKEY)); + env.close(); + } + + // temINVALID_INNER_BATCH: Batch: inner txn preflight failed. + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 2); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + // amount can't be negative + batch::inner(pay(alice, bob, XRP(-1)), seq + 2), + ter(temINVALID_INNER_BATCH)); + env.close(); + } + + // temBAD_FEE: Batch: inner txn must have a fee of 0. + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 2); + auto tx1 = batch::inner(pay(alice, bob, XRP(1)), seq + 1); + tx1[jss::Fee] = to_string(env.current()->fees().base); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + tx1, + batch::inner(pay(alice, bob, XRP(2)), seq + 2), + ter(temBAD_FEE)); + env.close(); + } + + // temSEQ_AND_TICKET: Batch: inner txn cannot have both Sequence + // and TicketSequence. + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 2); + auto tx1 = batch::inner(pay(alice, bob, XRP(1)), 0, 1); + tx1[jss::Sequence] = seq + 1; + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + tx1, + batch::inner(pay(alice, bob, XRP(2)), seq + 2), + ter(temSEQ_AND_TICKET)); + env.close(); + } + + // temSEQ_AND_TICKET: Batch: inner txn must have either Sequence or + // TicketSequence. + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 2); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), 0), + batch::inner(pay(alice, bob, XRP(2)), seq + 2), + ter(temSEQ_AND_TICKET)); + env.close(); + } + + // temREDUNDANT: Batch: duplicate sequence found: + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 2); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + batch::inner(pay(alice, bob, XRP(2)), seq + 1), + ter(temREDUNDANT)); + env.close(); + } + + // temREDUNDANT: Batch: duplicate ticket found: + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 2); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), 0, seq + 1), + batch::inner(pay(alice, bob, XRP(2)), 0, seq + 1), + ter(temREDUNDANT)); + env.close(); + } + + // temREDUNDANT: Batch: duplicate ticket & sequence found: + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 2); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), 0, seq + 1), + batch::inner(pay(alice, bob, XRP(2)), seq + 1), + ter(temREDUNDANT)); + env.close(); + } + + // DEFENSIVE: temARRAY_TOO_LARGE: Batch: signers array exceeds 8 + // entries. + // ACTUAL: telENV_RPC_FAILED: isRawTransactionOkay() + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 9, 2); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), seq + 1), + batch::inner(pay(alice, bob, XRP(5)), seq + 2), + batch::sig( + bob, + carol, + alice, + bob, + carol, + alice, + bob, + carol, + alice, + alice), + ter(telENV_RPC_FAILED)); + env.close(); + } + + // temBAD_SIGNER: Batch: signer cannot be the outer account + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 2, 2); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), seq + 1), + batch::inner(pay(bob, alice, XRP(5)), env.seq(bob)), + batch::sig(alice, bob), + ter(temBAD_SIGNER)); + env.close(); + } + + // temREDUNDANT: Batch: duplicate signer found + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 2, 2); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), seq + 1), + batch::inner(pay(bob, alice, XRP(5)), env.seq(bob)), + batch::sig(bob, bob), + ter(temREDUNDANT)); + env.close(); + } + + // temBAD_SIGNER: Batch: no account signature for inner txn. + // Note: Extra signature by bob + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 1, 2); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), seq + 1), + batch::inner(pay(alice, bob, XRP(5)), seq + 2), + batch::sig(bob), + ter(temBAD_SIGNER)); + env.close(); + } + + // temBAD_SIGNER: Batch: no account signature for inner txn. + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 1, 2); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), seq + 1), + batch::inner(pay(bob, alice, XRP(5)), env.seq(bob)), + batch::sig(carol), + ter(temBAD_SIGNER)); + env.close(); + } + + // temBAD_SIGNATURE: Batch: invalid batch txn signature. + { + auto const seq = env.seq(alice); + auto const bobSeq = env.seq(bob); + auto const batchFee = batch::calcBatchFee(env, 1, 2); + auto jt = env.jtnofill( + batch::outer(alice, env.seq(alice), batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), seq + 1), + batch::inner(pay(bob, alice, XRP(5)), bobSeq)); + + Serializer msg; + serializeBatch( + msg, tfAllOrNothing, jt.stx->getBatchTransactionIDs()); + auto const sig = ripple::sign(bob.pk(), bob.sk(), msg.slice()); + jt.jv[sfBatchSigners.jsonName][0u][sfBatchSigner.jsonName] + [sfAccount.jsonName] = bob.human(); + jt.jv[sfBatchSigners.jsonName][0u][sfBatchSigner.jsonName] + [sfSigningPubKey.jsonName] = strHex(alice.pk()); + jt.jv[sfBatchSigners.jsonName][0u][sfBatchSigner.jsonName] + [sfTxnSignature.jsonName] = + strHex(Slice{sig.data(), sig.size()}); + + env(jt.jv, ter(temBAD_SIGNATURE)); + env.close(); + } + + // temBAD_SIGNER: Batch: invalid batch signers. + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 2, 2); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), seq + 1), + batch::inner(pay(bob, alice, XRP(5)), env.seq(bob)), + batch::inner(pay(carol, alice, XRP(5)), env.seq(carol)), + batch::sig(bob), + ter(temBAD_SIGNER)); + env.close(); + } + } + + void + testPreclaim(FeatureBitset features) + { + testcase("preclaim"); + + using namespace test::jtx; + using namespace std::literals; + + //---------------------------------------------------------------------- + // preclaim + + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const carol = Account("carol"); + auto const dave = Account("dave"); + auto const elsa = Account("elsa"); + auto const frank = Account("frank"); + auto const phantom = Account("phantom"); + env.memoize(phantom); + + env.fund(XRP(10000), alice, bob, carol, dave, elsa, frank); + env.close(); + + //---------------------------------------------------------------------- + // checkSign.checkSingleSign + + // tefBAD_AUTH: Bob is not authorized to sign for Alice + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 3, 2); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), seq + 1), + batch::inner(pay(alice, bob, XRP(20)), seq + 2), + sig(bob), + ter(tefBAD_AUTH)); + env.close(); + } + + //---------------------------------------------------------------------- + // checkBatchSign.checkMultiSign + + // tefNOT_MULTI_SIGNING: SignersList not enabled + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 3, 2); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), seq + 1), + batch::inner(pay(bob, alice, XRP(5)), env.seq(bob)), + batch::msig(bob, {dave, carol}), + ter(tefNOT_MULTI_SIGNING)); + env.close(); + } + + env(signers(alice, 2, {{bob, 1}, {carol, 1}})); + env.close(); + + env(signers(bob, 2, {{carol, 1}, {dave, 1}, {elsa, 1}})); + env.close(); + + // tefBAD_SIGNATURE: Account not in SignersList + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 3, 2); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), seq + 1), + batch::inner(pay(bob, alice, XRP(5)), env.seq(bob)), + batch::msig(bob, {carol, frank}), + ter(tefBAD_SIGNATURE)); + env.close(); + } + + // tefBAD_SIGNATURE: Wrong publicKey type + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 3, 2); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), seq + 1), + batch::inner(pay(bob, alice, XRP(5)), env.seq(bob)), + batch::msig(bob, {carol, Account("dave", KeyType::ed25519)}), + ter(tefBAD_SIGNATURE)); + env.close(); + } + + // tefMASTER_DISABLED: Master key disabled + { + env(regkey(elsa, frank)); + env(fset(elsa, asfDisableMaster), sig(elsa)); + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 3, 2); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), seq + 1), + batch::inner(pay(bob, alice, XRP(5)), env.seq(bob)), + batch::msig(bob, {carol, elsa}), + ter(tefMASTER_DISABLED)); + env.close(); + } + + // tefBAD_SIGNATURE: Signer does not exist + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 3, 2); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), seq + 1), + batch::inner(pay(bob, alice, XRP(5)), env.seq(bob)), + batch::msig(bob, {carol, phantom}), + ter(tefBAD_SIGNATURE)); + env.close(); + } + + // tefBAD_SIGNATURE: Signer has not enabled RegularKey + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 3, 2); + Account const davo{"davo", KeyType::ed25519}; + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), seq + 1), + batch::inner(pay(bob, alice, XRP(5)), env.seq(bob)), + batch::msig(bob, {carol, Reg{dave, davo}}), + ter(tefBAD_SIGNATURE)); + env.close(); + } + + // tefBAD_SIGNATURE: Wrong RegularKey Set + { + env(regkey(dave, frank)); + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 3, 2); + Account const davo{"davo", KeyType::ed25519}; + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), seq + 1), + batch::inner(pay(bob, alice, XRP(5)), env.seq(bob)), + batch::msig(bob, {carol, Reg{dave, davo}}), + ter(tefBAD_SIGNATURE)); + env.close(); + } + + // tefBAD_QUORUM + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 2, 2); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), seq + 1), + batch::inner(pay(bob, alice, XRP(5)), env.seq(bob)), + batch::msig(bob, {carol}), + ter(tefBAD_QUORUM)); + env.close(); + } + + // tesSUCCESS: BatchSigners.Signers + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 3, 2); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), seq + 1), + batch::inner(pay(bob, alice, XRP(5)), env.seq(bob)), + batch::msig(bob, {carol, dave}), + ter(tesSUCCESS)); + env.close(); + } + + // tesSUCCESS: Multisign + BatchSigners.Signers + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 4, 2); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), seq + 1), + batch::inner(pay(bob, alice, XRP(5)), env.seq(bob)), + batch::msig(bob, {carol, dave}), + msig(bob, carol), + ter(tesSUCCESS)); + env.close(); + } + + //---------------------------------------------------------------------- + // checkBatchSign.checkSingleSign + + // tefBAD_AUTH: Inner Account is not signer + { + auto const ledSeq = env.current()->seq(); + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 1, 2); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, phantom, XRP(1000)), seq + 1), + batch::inner(noop(phantom), ledSeq), + batch::sig(Reg{phantom, carol}), + ter(tefBAD_AUTH)); + env.close(); + } + + // tefBAD_AUTH: Account is not signer + { + auto const ledSeq = env.current()->seq(); + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 1, 2); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1000)), seq + 1), + batch::inner(noop(bob), ledSeq), + batch::sig(Reg{bob, carol}), + ter(tefBAD_AUTH)); + env.close(); + } + + // tesSUCCESS: Signed With Regular Key + { + env(regkey(bob, carol)); + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 1, 2); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + batch::inner(pay(bob, alice, XRP(2)), env.seq(bob)), + batch::sig(Reg{bob, carol}), + ter(tesSUCCESS)); + env.close(); + } + + // tesSUCCESS: Signed With Master Key + { + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 1, 2); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + batch::inner(pay(bob, alice, XRP(2)), env.seq(bob)), + batch::sig(bob), + ter(tesSUCCESS)); + env.close(); + } + + // tefMASTER_DISABLED: Signed With Master Key Disabled + { + env(regkey(bob, carol)); + env(fset(bob, asfDisableMaster), sig(bob)); + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 1, 2); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + batch::inner(pay(bob, alice, XRP(2)), env.seq(bob)), + batch::sig(bob), + ter(tefMASTER_DISABLED)); + env.close(); + } + } + + void + testBadRawTxn(FeatureBitset features) + { + testcase("bad raw txn"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + + env.fund(XRP(10000), alice, bob); + + // Invalid: sfTransactionType + { + auto const batchFee = batch::calcBatchFee(env, 1, 2); + auto const seq = env.seq(alice); + auto tx1 = batch::inner(pay(alice, bob, XRP(10)), seq + 1); + tx1.removeMember(jss::TransactionType); + auto jt = env.jtnofill( + batch::outer(alice, seq, batchFee, tfAllOrNothing), + tx1, + batch::inner(pay(alice, bob, XRP(10)), seq + 2)); + + env(jt.jv, batch::sig(bob), ter(telENV_RPC_FAILED)); + env.close(); + } + + // Invalid: sfAccount + { + auto const batchFee = batch::calcBatchFee(env, 1, 2); + auto const seq = env.seq(alice); + auto tx1 = batch::inner(pay(alice, bob, XRP(10)), seq + 1); + tx1.removeMember(jss::Account); + auto jt = env.jtnofill( + batch::outer(alice, seq, batchFee, tfAllOrNothing), + tx1, + batch::inner(pay(alice, bob, XRP(10)), seq + 2)); + + env(jt.jv, batch::sig(bob), ter(telENV_RPC_FAILED)); + env.close(); + } + + // Invalid: sfSequence + { + auto const batchFee = batch::calcBatchFee(env, 1, 2); + auto const seq = env.seq(alice); + auto tx1 = batch::inner(pay(alice, bob, XRP(10)), seq + 1); + tx1.removeMember(jss::Sequence); + auto jt = env.jtnofill( + batch::outer(alice, seq, batchFee, tfAllOrNothing), + tx1, + batch::inner(pay(alice, bob, XRP(10)), seq + 2)); + + env(jt.jv, batch::sig(bob), ter(telENV_RPC_FAILED)); + env.close(); + } + + // Invalid: sfFee + { + auto const batchFee = batch::calcBatchFee(env, 1, 2); + auto const seq = env.seq(alice); + auto tx1 = batch::inner(pay(alice, bob, XRP(10)), seq + 1); + tx1.removeMember(jss::Fee); + auto jt = env.jtnofill( + batch::outer(alice, seq, batchFee, tfAllOrNothing), + tx1, + batch::inner(pay(alice, bob, XRP(10)), seq + 2)); + + env(jt.jv, batch::sig(bob), ter(telENV_RPC_FAILED)); + env.close(); + } + + // Invalid: sfSigningPubKey + { + auto const batchFee = batch::calcBatchFee(env, 1, 2); + auto const seq = env.seq(alice); + auto tx1 = batch::inner(pay(alice, bob, XRP(10)), seq + 1); + tx1.removeMember(jss::SigningPubKey); + auto jt = env.jtnofill( + batch::outer(alice, seq, batchFee, tfAllOrNothing), + tx1, + batch::inner(pay(alice, bob, XRP(10)), seq + 2)); + + env(jt.jv, batch::sig(bob), ter(telENV_RPC_FAILED)); + env.close(); + } + } + + void + testBadSequence(FeatureBitset features) + { + testcase("bad sequence"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + auto const USD = gw["USD"]; + + env.fund(XRP(10000), alice, bob, gw); + env.close(); + env.trust(USD(1000), alice, bob); + env(pay(gw, alice, USD(100))); + env(pay(gw, bob, USD(100))); + env.close(); + + env(noop(bob), ter(tesSUCCESS)); + env.close(); + + // Invalid: Alice Sequence is a past sequence + { + auto const preAliceSeq = env.seq(alice); + auto const preAlice = env.balance(alice); + auto const preAliceUSD = env.balance(alice, USD.issue()); + auto const preBobSeq = env.seq(bob); + auto const preBob = env.balance(bob); + auto const preBobUSD = env.balance(bob, USD.issue()); + + auto const batchFee = batch::calcBatchFee(env, 1, 2); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, preAliceSeq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), preAliceSeq - 10), + batch::inner(pay(bob, alice, XRP(5)), preBobSeq), + batch::sig(bob)); + + env.close(); + { + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + }; + validateClosedLedger(env, testCases); + } + + env.close(); + { + // next ledger is empty + std::vector testCases = {}; + validateClosedLedger(env, testCases); + } + + // Alice pays fee & Bob should not be affected. + BEAST_EXPECT(env.seq(alice) == preAliceSeq + 1); + BEAST_EXPECT(env.balance(alice) == preAlice - batchFee); + BEAST_EXPECT(env.balance(alice, USD.issue()) == preAliceUSD); + BEAST_EXPECT(env.seq(bob) == preBobSeq); + BEAST_EXPECT(env.balance(bob) == preBob); + BEAST_EXPECT(env.balance(bob, USD.issue()) == preBobUSD); + } + + // Invalid: Alice Sequence is a future sequence + { + auto const preAliceSeq = env.seq(alice); + auto const preAlice = env.balance(alice); + auto const preAliceUSD = env.balance(alice, USD.issue()); + auto const preBobSeq = env.seq(bob); + auto const preBob = env.balance(bob); + auto const preBobUSD = env.balance(bob, USD.issue()); + + auto const batchFee = batch::calcBatchFee(env, 1, 2); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, preAliceSeq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), preAliceSeq + 10), + batch::inner(pay(bob, alice, XRP(5)), preBobSeq), + batch::sig(bob)); + + env.close(); + { + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + }; + validateClosedLedger(env, testCases); + } + + env.close(); + { + // next ledger is empty + std::vector testCases = {}; + validateClosedLedger(env, testCases); + } + + // Alice pays fee & Bob should not be affected. + BEAST_EXPECT(env.seq(alice) == preAliceSeq + 1); + BEAST_EXPECT(env.balance(alice) == preAlice - batchFee); + BEAST_EXPECT(env.balance(alice, USD.issue()) == preAliceUSD); + BEAST_EXPECT(env.seq(bob) == preBobSeq); + BEAST_EXPECT(env.balance(bob) == preBob); + BEAST_EXPECT(env.balance(bob, USD.issue()) == preBobUSD); + } + + // Invalid: Bob Sequence is a past sequence + { + auto const preAliceSeq = env.seq(alice); + auto const preAlice = env.balance(alice); + auto const preAliceUSD = env.balance(alice, USD.issue()); + auto const preBobSeq = env.seq(bob); + auto const preBob = env.balance(bob); + auto const preBobUSD = env.balance(bob, USD.issue()); + + auto const batchFee = batch::calcBatchFee(env, 1, 2); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, preAliceSeq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), preAliceSeq + 1), + batch::inner(pay(bob, alice, XRP(5)), preBobSeq - 10), + batch::sig(bob)); + + env.close(); + { + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + }; + validateClosedLedger(env, testCases); + } + + env.close(); + { + // next ledger is empty + std::vector testCases = {}; + validateClosedLedger(env, testCases); + } + + // Alice pays fee & Bob should not be affected. + BEAST_EXPECT(env.seq(alice) == preAliceSeq + 1); + BEAST_EXPECT(env.balance(alice) == preAlice - batchFee); + BEAST_EXPECT(env.balance(alice, USD.issue()) == preAliceUSD); + BEAST_EXPECT(env.seq(bob) == preBobSeq); + BEAST_EXPECT(env.balance(bob) == preBob); + BEAST_EXPECT(env.balance(bob, USD.issue()) == preBobUSD); + } + + // Invalid: Bob Sequence is a future sequence + { + auto const preAliceSeq = env.seq(alice); + auto const preAlice = env.balance(alice); + auto const preAliceUSD = env.balance(alice, USD.issue()); + auto const preBobSeq = env.seq(bob); + auto const preBob = env.balance(bob); + auto const preBobUSD = env.balance(bob, USD.issue()); + + auto const batchFee = batch::calcBatchFee(env, 1, 2); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, preAliceSeq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), preAliceSeq + 1), + batch::inner(pay(bob, alice, XRP(5)), preBobSeq + 10), + batch::sig(bob)); + + env.close(); + { + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + }; + validateClosedLedger(env, testCases); + } + + env.close(); + { + // next ledger is empty + std::vector testCases = {}; + validateClosedLedger(env, testCases); + } + + // Alice pays fee & Bob should not be affected. + BEAST_EXPECT(env.seq(alice) == preAliceSeq + 1); + BEAST_EXPECT(env.balance(alice) == preAlice - batchFee); + BEAST_EXPECT(env.balance(alice, USD.issue()) == preAliceUSD); + BEAST_EXPECT(env.seq(bob) == preBobSeq); + BEAST_EXPECT(env.balance(bob) == preBob); + BEAST_EXPECT(env.balance(bob, USD.issue()) == preBobUSD); + } + + // Invalid: Outer and Inner Sequence are the same + { + auto const preAliceSeq = env.seq(alice); + auto const preAlice = env.balance(alice); + auto const preAliceUSD = env.balance(alice, USD.issue()); + auto const preBobSeq = env.seq(bob); + auto const preBob = env.balance(bob); + auto const preBobUSD = env.balance(bob, USD.issue()); + + auto const batchFee = batch::calcBatchFee(env, 1, 2); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, preAliceSeq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), preAliceSeq), + batch::inner(pay(bob, alice, XRP(5)), preBobSeq), + batch::sig(bob)); + + env.close(); + { + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + }; + validateClosedLedger(env, testCases); + } + + env.close(); + { + // next ledger is empty + std::vector testCases = {}; + validateClosedLedger(env, testCases); + } + + // Alice pays fee & Bob should not be affected. + BEAST_EXPECT(env.seq(alice) == preAliceSeq + 1); + BEAST_EXPECT(env.balance(alice) == preAlice - batchFee); + BEAST_EXPECT(env.balance(alice, USD.issue()) == preAliceUSD); + BEAST_EXPECT(env.seq(bob) == preBobSeq); + BEAST_EXPECT(env.balance(bob) == preBob); + BEAST_EXPECT(env.balance(bob, USD.issue()) == preBobUSD); + } + } + + void + testBadOuterFee(FeatureBitset features) + { + testcase("bad outer fee"); + + using namespace test::jtx; + using namespace std::literals; + + // Bad Fee Without Signer + { + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + env.fund(XRP(10000), alice, bob); + env.close(); + + env(noop(bob), ter(tesSUCCESS)); + env.close(); + + // Bad Fee: Should be batch::calcBatchFee(env, 0, 2) + auto const batchFee = batch::calcBatchFee(env, 0, 1); + auto const aliceSeq = env.seq(alice); + env(batch::outer(alice, aliceSeq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), aliceSeq + 1), + batch::inner(pay(alice, bob, XRP(15)), aliceSeq + 2), + ter(telINSUF_FEE_P)); + env.close(); + } + + // Bad Fee With MultiSign + { + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const carol = Account("carol"); + env.fund(XRP(10000), alice, bob, carol); + env.close(); + + env(noop(bob), ter(tesSUCCESS)); + env.close(); + + env(signers(alice, 2, {{bob, 1}, {carol, 1}})); + env.close(); + + // Bad Fee: Should be batch::calcBatchFee(env, 2, 2) + auto const batchFee = batch::calcBatchFee(env, 1, 2); + auto const aliceSeq = env.seq(alice); + env(batch::outer(alice, aliceSeq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), aliceSeq + 1), + batch::inner(pay(alice, bob, XRP(15)), aliceSeq + 2), + msig(bob, carol), + ter(telINSUF_FEE_P)); + env.close(); + } + + // Bad Fee With MultiSign + BatchSigners + { + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const carol = Account("carol"); + env.fund(XRP(10000), alice, bob, carol); + env.close(); + + env(noop(bob), ter(tesSUCCESS)); + env.close(); + + env(signers(alice, 2, {{bob, 1}, {carol, 1}})); + env.close(); + + // Bad Fee: Should be batch::calcBatchFee(env, 3, 2) + auto const batchFee = batch::calcBatchFee(env, 2, 2); + auto const aliceSeq = env.seq(alice); + auto const bobSeq = env.seq(bob); + env(batch::outer(alice, aliceSeq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), aliceSeq + 1), + batch::inner(pay(bob, alice, XRP(5)), bobSeq), + batch::sig(bob), + msig(bob, carol), + ter(telINSUF_FEE_P)); + env.close(); + } + + // Bad Fee With MultiSign + BatchSigners.Signers + { + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const carol = Account("carol"); + env.fund(XRP(10000), alice, bob, carol); + env.close(); + + env(noop(bob), ter(tesSUCCESS)); + env.close(); + + env(signers(alice, 2, {{bob, 1}, {carol, 1}})); + env.close(); + + env(signers(bob, 2, {{alice, 1}, {carol, 1}})); + env.close(); + + // Bad Fee: Should be batch::calcBatchFee(env, 4, 2) + auto const batchFee = batch::calcBatchFee(env, 3, 2); + auto const aliceSeq = env.seq(alice); + auto const bobSeq = env.seq(bob); + env(batch::outer(alice, aliceSeq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), aliceSeq + 1), + batch::inner(pay(bob, alice, XRP(5)), bobSeq), + batch::msig(bob, {alice, carol}), + msig(bob, carol), + ter(telINSUF_FEE_P)); + env.close(); + } + + // Bad Fee With BatchSigners + { + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + env.fund(XRP(10000), alice, bob); + env.close(); + + env(noop(bob), ter(tesSUCCESS)); + env.close(); + + // Bad Fee: Should be batch::calcBatchFee(env, 1, 2) + auto const batchFee = batch::calcBatchFee(env, 0, 2); + auto const aliceSeq = env.seq(alice); + auto const bobSeq = env.seq(bob); + env(batch::outer(alice, aliceSeq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), aliceSeq + 1), + batch::inner(pay(bob, alice, XRP(5)), bobSeq), + batch::sig(bob), + ter(telINSUF_FEE_P)); + env.close(); + } + + // Bad Fee Dynamic Fee Calculation + { + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + auto const USD = gw["USD"]; + + env.fund(XRP(10000), alice, bob, gw); + env.close(); + auto const ammCreate = + [&alice](STAmount const& amount, STAmount const& amount2) { + Json::Value jv; + jv[jss::Account] = alice.human(); + jv[jss::Amount] = amount.getJson(JsonOptions::none); + jv[jss::Amount2] = amount2.getJson(JsonOptions::none); + jv[jss::TradingFee] = 0; + jv[jss::TransactionType] = jss::AMMCreate; + return jv; + }; + + auto const batchFee = batch::calcBatchFee(env, 0, 2); + auto const seq = env.seq(alice); + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(ammCreate(XRP(10), USD(10)), seq + 1), + batch::inner(pay(alice, bob, XRP(10)), seq + 2), + ter(telINSUF_FEE_P)); + env.close(); + } + } + + void + testCalculateBaseFee(FeatureBitset features) + { + testcase("calculate base fee"); + + using namespace test::jtx; + using namespace std::literals; + + // telENV_RPC_FAILED: Batch: txns array exceeds 8 entries. + { + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + env.fund(XRP(10000), alice, bob); + env.close(); + + auto const batchFee = batch::calcBatchFee(env, 0, 9); + auto const aliceSeq = env.seq(alice); + env(batch::outer(alice, aliceSeq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), aliceSeq), + batch::inner(pay(alice, bob, XRP(1)), aliceSeq), + batch::inner(pay(alice, bob, XRP(1)), aliceSeq), + batch::inner(pay(alice, bob, XRP(1)), aliceSeq), + batch::inner(pay(alice, bob, XRP(1)), aliceSeq), + batch::inner(pay(alice, bob, XRP(1)), aliceSeq), + batch::inner(pay(alice, bob, XRP(1)), aliceSeq), + batch::inner(pay(alice, bob, XRP(1)), aliceSeq), + batch::inner(pay(alice, bob, XRP(1)), aliceSeq), + ter(telENV_RPC_FAILED)); + env.close(); + } + + // temARRAY_TOO_LARGE: Batch: txns array exceeds 8 entries. + { + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + env.fund(XRP(10000), alice, bob); + env.close(); + + auto const batchFee = batch::calcBatchFee(env, 0, 9); + auto const aliceSeq = env.seq(alice); + auto jt = env.jtnofill( + batch::outer(alice, aliceSeq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), aliceSeq), + batch::inner(pay(alice, bob, XRP(1)), aliceSeq), + batch::inner(pay(alice, bob, XRP(1)), aliceSeq), + batch::inner(pay(alice, bob, XRP(1)), aliceSeq), + batch::inner(pay(alice, bob, XRP(1)), aliceSeq), + batch::inner(pay(alice, bob, XRP(1)), aliceSeq), + batch::inner(pay(alice, bob, XRP(1)), aliceSeq), + batch::inner(pay(alice, bob, XRP(1)), aliceSeq), + batch::inner(pay(alice, bob, XRP(1)), aliceSeq)); + + env.app().openLedger().modify( + [&](OpenView& view, beast::Journal j) { + auto const result = + ripple::apply(env.app(), view, *jt.stx, tapNONE, j); + BEAST_EXPECT( + !result.applied && result.ter == temARRAY_TOO_LARGE); + return result.applied; + }); + } + + // telENV_RPC_FAILED: Batch: signers array exceeds 8 entries. + { + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + env.fund(XRP(10000), alice, bob); + env.close(); + + auto const aliceSeq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 9, 2); + env(batch::outer(alice, aliceSeq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), aliceSeq + 1), + batch::inner(pay(alice, bob, XRP(5)), aliceSeq + 2), + batch::sig(bob, bob, bob, bob, bob, bob, bob, bob, bob, bob), + ter(telENV_RPC_FAILED)); + env.close(); + } + + // temARRAY_TOO_LARGE: Batch: signers array exceeds 8 entries. + { + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + env.fund(XRP(10000), alice, bob); + env.close(); + + auto const batchFee = batch::calcBatchFee(env, 0, 9); + auto const aliceSeq = env.seq(alice); + auto jt = env.jtnofill( + batch::outer(alice, aliceSeq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), aliceSeq + 1), + batch::inner(pay(alice, bob, XRP(5)), aliceSeq + 2), + batch::sig(bob, bob, bob, bob, bob, bob, bob, bob, bob, bob)); + + env.app().openLedger().modify( + [&](OpenView& view, beast::Journal j) { + auto const result = + ripple::apply(env.app(), view, *jt.stx, tapNONE, j); + BEAST_EXPECT( + !result.applied && result.ter == temARRAY_TOO_LARGE); + return result.applied; + }); + } + } + + void + testAllOrNothing(FeatureBitset features) + { + testcase("all or nothing"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + auto const USD = gw["USD"]; + env.fund(XRP(10000), alice, bob, gw); + env.close(); + + // all + { + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + + auto const batchFee = batch::calcBatchFee(env, 0, 2); + auto const seq = env.seq(alice); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + batch::inner(pay(alice, bob, XRP(2)), seq + 2)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tesSUCCESS", txIDs[0], batchID}, + {2, "Payment", "tesSUCCESS", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + + // Alice consumes sequences (# of txns) + BEAST_EXPECT(env.seq(alice) == seq + 3); + + // Alice pays XRP & Fee; Bob receives XRP + BEAST_EXPECT(env.balance(alice) == preAlice - XRP(3) - batchFee); + BEAST_EXPECT(env.balance(bob) == preBob + XRP(3)); + } + + // tec failure + { + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + + auto const batchFee = batch::calcBatchFee(env, 0, 2); + auto const seq = env.seq(alice); + + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + // tecUNFUNDED_PAYMENT: alice does not have enough XRP + batch::inner(pay(alice, bob, XRP(9999)), seq + 2)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + }; + validateClosedLedger(env, testCases); + + // Alice consumes sequence + BEAST_EXPECT(env.seq(alice) == seq + 1); + + // Alice pays Fee; Bob should not be affected + BEAST_EXPECT(env.balance(alice) == preAlice - batchFee); + BEAST_EXPECT(env.balance(bob) == preBob); + } + + // tef failure + { + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + + auto const batchFee = batch::calcBatchFee(env, 0, 2); + auto const seq = env.seq(alice); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + // tefNO_AUTH_REQUIRED: trustline auth is not required + batch::inner(trust(alice, USD(1000), tfSetfAuth), seq + 2)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + }; + validateClosedLedger(env, testCases); + + // Alice consumes sequence + BEAST_EXPECT(env.seq(alice) == seq + 1); + + // Alice pays Fee; Bob should not be affected + BEAST_EXPECT(env.balance(alice) == preAlice - batchFee); + BEAST_EXPECT(env.balance(bob) == preBob); + } + + // ter failure + { + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + + auto const batchFee = batch::calcBatchFee(env, 0, 2); + auto const seq = env.seq(alice); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + // terPRE_TICKET: ticket does not exist + batch::inner(trust(alice, USD(1000), tfSetfAuth), 0, seq + 2)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + }; + validateClosedLedger(env, testCases); + + // Alice consumes sequence + BEAST_EXPECT(env.seq(alice) == seq + 1); + + // Alice pays Fee; Bob should not be affected + BEAST_EXPECT(env.balance(alice) == preAlice - batchFee); + BEAST_EXPECT(env.balance(bob) == preBob); + } + } + + void + testOnlyOne(FeatureBitset features) + { + testcase("only one"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const carol = Account("carol"); + auto const dave = Account("dave"); + auto const gw = Account("gw"); + auto const USD = gw["USD"]; + env.fund(XRP(10000), alice, bob, carol, dave, gw); + env.close(); + + // all transactions fail + { + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + + auto const batchFee = batch::calcBatchFee(env, 0, 3); + auto const seq = env.seq(alice); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfOnlyOne), + // tecUNFUNDED_PAYMENT: alice does not have enough XRP + batch::inner(pay(alice, bob, XRP(9999)), seq + 1), + // tecUNFUNDED_PAYMENT: alice does not have enough XRP + batch::inner(pay(alice, bob, XRP(9999)), seq + 2), + // tecUNFUNDED_PAYMENT: alice does not have enough XRP + batch::inner(pay(alice, bob, XRP(9999)), seq + 3)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tecUNFUNDED_PAYMENT", txIDs[0], batchID}, + {2, "Payment", "tecUNFUNDED_PAYMENT", txIDs[1], batchID}, + {3, "Payment", "tecUNFUNDED_PAYMENT", txIDs[2], batchID}, + }; + validateClosedLedger(env, testCases); + + // Alice consumes sequences (# of txns) + BEAST_EXPECT(env.seq(alice) == seq + 4); + + // Alice pays XRP & Fee; Bob receives XRP + BEAST_EXPECT(env.balance(alice) == preAlice - batchFee); + BEAST_EXPECT(env.balance(bob) == preBob); + } + + // first transaction fails + { + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + + auto const batchFee = batch::calcBatchFee(env, 0, 3); + auto const seq = env.seq(alice); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfOnlyOne), + // tecUNFUNDED_PAYMENT: alice does not have enough XRP + batch::inner(pay(alice, bob, XRP(9999)), seq + 1), + batch::inner(pay(alice, bob, XRP(1)), seq + 2), + batch::inner(pay(alice, bob, XRP(2)), seq + 3)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tecUNFUNDED_PAYMENT", txIDs[0], batchID}, + {2, "Payment", "tesSUCCESS", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + + // Alice consumes sequences (# of txns) + BEAST_EXPECT(env.seq(alice) == seq + 3); + + // Alice pays XRP & Fee; Bob receives XRP + BEAST_EXPECT(env.balance(alice) == preAlice - XRP(1) - batchFee); + BEAST_EXPECT(env.balance(bob) == preBob + XRP(1)); + } + + // tec failure + { + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + + auto const batchFee = batch::calcBatchFee(env, 0, 3); + auto const seq = env.seq(alice); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfOnlyOne), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + // tecUNFUNDED_PAYMENT: alice does not have enough XRP + batch::inner(pay(alice, bob, XRP(9999)), seq + 2), + batch::inner(pay(alice, bob, XRP(2)), seq + 3)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tesSUCCESS", txIDs[0], batchID}, + }; + validateClosedLedger(env, testCases); + + // Alice consumes sequences (# of txns) + BEAST_EXPECT(env.seq(alice) == seq + 2); + + // Alice pays XRP & Fee; Bob receives XRP + BEAST_EXPECT(env.balance(alice) == preAlice - XRP(1) - batchFee); + BEAST_EXPECT(env.balance(bob) == preBob + XRP(1)); + } + + // tef failure + { + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + + auto const batchFee = batch::calcBatchFee(env, 0, 3); + auto const seq = env.seq(alice); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfOnlyOne), + // tefNO_AUTH_REQUIRED: trustline auth is not required + batch::inner(trust(alice, USD(1000), tfSetfAuth), seq + 1), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + batch::inner(pay(alice, bob, XRP(2)), seq + 3)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tesSUCCESS", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + + // Alice consumes sequences (# of txns) + BEAST_EXPECT(env.seq(alice) == seq + 2); + + // Alice pays XRP & Fee; Bob receives XRP + BEAST_EXPECT(env.balance(alice) == preAlice - batchFee - XRP(1)); + BEAST_EXPECT(env.balance(bob) == preBob + XRP(1)); + } + + // ter failure + { + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + + auto const batchFee = batch::calcBatchFee(env, 0, 3); + auto const seq = env.seq(alice); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfOnlyOne), + // terPRE_TICKET: ticket does not exist + batch::inner(trust(alice, USD(1000), tfSetfAuth), 0, seq + 1), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + batch::inner(pay(alice, bob, XRP(2)), seq + 3)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tesSUCCESS", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + + // Alice consumes sequences (# of txns) + BEAST_EXPECT(env.seq(alice) == seq + 2); + + // Alice pays XRP & Fee; Bob receives XRP + BEAST_EXPECT(env.balance(alice) == preAlice - batchFee - XRP(1)); + BEAST_EXPECT(env.balance(bob) == preBob + XRP(1)); + } + + // tec (tecKILLED) error + { + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + auto const preCarol = env.balance(carol); + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 6); + + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfOnlyOne), + batch::inner( + offer( + alice, + alice["USD"](100), + XRP(100), + tfImmediateOrCancel), + seq + 1), + batch::inner( + offer( + alice, + alice["USD"](100), + XRP(100), + tfImmediateOrCancel), + seq + 2), + batch::inner( + offer( + alice, + alice["USD"](100), + XRP(100), + tfImmediateOrCancel), + seq + 3), + batch::inner(pay(alice, bob, XRP(100)), seq + 4), + batch::inner(pay(alice, carol, XRP(100)), seq + 5), + batch::inner(pay(alice, dave, XRP(100)), seq + 6)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "OfferCreate", "tecKILLED", txIDs[0], batchID}, + {2, "OfferCreate", "tecKILLED", txIDs[1], batchID}, + {3, "OfferCreate", "tecKILLED", txIDs[2], batchID}, + {4, "Payment", "tesSUCCESS", txIDs[3], batchID}, + }; + validateClosedLedger(env, testCases); + + BEAST_EXPECT(env.balance(alice) == preAlice - XRP(100) - batchFee); + BEAST_EXPECT(env.balance(bob) == preBob + XRP(100)); + BEAST_EXPECT(env.balance(carol) == preCarol); + } + } + + void + testUntilFailure(FeatureBitset features) + { + testcase("until failure"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const carol = Account("carol"); + auto const dave = Account("dave"); + auto const gw = Account("gw"); + auto const USD = gw["USD"]; + env.fund(XRP(10000), alice, bob, carol, dave, gw); + env.close(); + + // first transaction fails + { + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + + auto const batchFee = batch::calcBatchFee(env, 0, 4); + auto const seq = env.seq(alice); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfUntilFailure), + // tecUNFUNDED_PAYMENT: alice does not have enough XRP + batch::inner(pay(alice, bob, XRP(9999)), seq + 1), + batch::inner(pay(alice, bob, XRP(1)), seq + 2), + batch::inner(pay(alice, bob, XRP(2)), seq + 3), + batch::inner(pay(alice, bob, XRP(3)), seq + 4)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tecUNFUNDED_PAYMENT", txIDs[0], batchID}, + }; + validateClosedLedger(env, testCases); + + // Alice consumes sequences (# of txns) + BEAST_EXPECT(env.seq(alice) == seq + 2); + + // Alice pays XRP & Fee; Bob receives XRP + BEAST_EXPECT(env.balance(alice) == preAlice - batchFee); + BEAST_EXPECT(env.balance(bob) == preBob); + } + + // all transactions succeed + { + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + + auto const batchFee = batch::calcBatchFee(env, 0, 4); + auto const seq = env.seq(alice); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfUntilFailure), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + batch::inner(pay(alice, bob, XRP(2)), seq + 2), + batch::inner(pay(alice, bob, XRP(3)), seq + 3), + batch::inner(pay(alice, bob, XRP(4)), seq + 4)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tesSUCCESS", txIDs[0], batchID}, + {2, "Payment", "tesSUCCESS", txIDs[1], batchID}, + {3, "Payment", "tesSUCCESS", txIDs[2], batchID}, + {4, "Payment", "tesSUCCESS", txIDs[3], batchID}, + }; + validateClosedLedger(env, testCases); + + // Alice consumes sequences (# of txns) + BEAST_EXPECT(env.seq(alice) == seq + 5); + + // Alice pays XRP & Fee; Bob receives XRP + BEAST_EXPECT(env.balance(alice) == preAlice - XRP(10) - batchFee); + BEAST_EXPECT(env.balance(bob) == preBob + XRP(10)); + } + + // tec error + { + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + + auto const batchFee = batch::calcBatchFee(env, 0, 4); + auto const seq = env.seq(alice); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfUntilFailure), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + batch::inner(pay(alice, bob, XRP(2)), seq + 2), + // tecUNFUNDED_PAYMENT: alice does not have enough XRP + batch::inner(pay(alice, bob, XRP(9999)), seq + 3), + batch::inner(pay(alice, bob, XRP(3)), seq + 4)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tesSUCCESS", txIDs[0], batchID}, + {2, "Payment", "tesSUCCESS", txIDs[1], batchID}, + {3, "Payment", "tecUNFUNDED_PAYMENT", txIDs[2], batchID}, + }; + validateClosedLedger(env, testCases); + + // Alice consumes sequences (# of txns) + BEAST_EXPECT(env.seq(alice) == seq + 4); + + // Alice pays XRP & Fee; Bob receives XRP + BEAST_EXPECT(env.balance(alice) == preAlice - XRP(3) - batchFee); + BEAST_EXPECT(env.balance(bob) == preBob + XRP(3)); + } + + // tef error + { + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + + auto const batchFee = batch::calcBatchFee(env, 0, 4); + auto const seq = env.seq(alice); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfUntilFailure), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + batch::inner(pay(alice, bob, XRP(2)), seq + 2), + // tefNO_AUTH_REQUIRED: trustline auth is not required + batch::inner(trust(alice, USD(1000), tfSetfAuth), seq + 3), + batch::inner(pay(alice, bob, XRP(3)), seq + 4)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tesSUCCESS", txIDs[0], batchID}, + {2, "Payment", "tesSUCCESS", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + + // Alice consumes sequences (# of txns) + BEAST_EXPECT(env.seq(alice) == seq + 3); + + // Alice pays XRP & Fee; Bob receives XRP + BEAST_EXPECT(env.balance(alice) == preAlice - XRP(3) - batchFee); + BEAST_EXPECT(env.balance(bob) == preBob + XRP(3)); + } + + // ter error + { + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + + auto const batchFee = batch::calcBatchFee(env, 0, 4); + auto const seq = env.seq(alice); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfUntilFailure), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + batch::inner(pay(alice, bob, XRP(2)), seq + 2), + // terPRE_TICKET: ticket does not exist + batch::inner(trust(alice, USD(1000), tfSetfAuth), 0, seq + 3), + batch::inner(pay(alice, bob, XRP(3)), seq + 4)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tesSUCCESS", txIDs[0], batchID}, + {2, "Payment", "tesSUCCESS", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + + // Alice consumes sequences (# of txns) + BEAST_EXPECT(env.seq(alice) == seq + 3); + + // Alice pays XRP & Fee; Bob receives XRP + BEAST_EXPECT(env.balance(alice) == preAlice - XRP(3) - batchFee); + BEAST_EXPECT(env.balance(bob) == preBob + XRP(3)); + } + + // tec (tecKILLED) error + { + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + auto const preCarol = env.balance(carol); + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 4); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfUntilFailure), + batch::inner(pay(alice, bob, XRP(100)), seq + 1), + batch::inner(pay(alice, carol, XRP(100)), seq + 2), + batch::inner( + offer( + alice, + alice["USD"](100), + XRP(100), + tfImmediateOrCancel), + seq + 3), + batch::inner(pay(alice, dave, XRP(100)), seq + 4)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tesSUCCESS", txIDs[0], batchID}, + {2, "Payment", "tesSUCCESS", txIDs[1], batchID}, + {3, "OfferCreate", "tecKILLED", txIDs[2], batchID}, + }; + validateClosedLedger(env, testCases); + + BEAST_EXPECT(env.balance(alice) == preAlice - XRP(200) - batchFee); + BEAST_EXPECT(env.balance(bob) == preBob + XRP(100)); + BEAST_EXPECT(env.balance(carol) == preCarol + XRP(100)); + } + } + + void + testIndependent(FeatureBitset features) + { + testcase("independent"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const carol = Account("carol"); + auto const gw = Account("gw"); + auto const USD = gw["USD"]; + env.fund(XRP(10000), alice, bob, carol, gw); + env.close(); + + // multiple transactions fail + { + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + + auto const batchFee = batch::calcBatchFee(env, 0, 4); + auto const seq = env.seq(alice); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfIndependent), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + // tecUNFUNDED_PAYMENT: alice does not have enough XRP + batch::inner(pay(alice, bob, XRP(9999)), seq + 2), + // tecUNFUNDED_PAYMENT: alice does not have enough XRP + batch::inner(pay(alice, bob, XRP(9999)), seq + 3), + batch::inner(pay(alice, bob, XRP(3)), seq + 4)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tesSUCCESS", txIDs[0], batchID}, + {2, "Payment", "tecUNFUNDED_PAYMENT", txIDs[1], batchID}, + {3, "Payment", "tecUNFUNDED_PAYMENT", txIDs[2], batchID}, + {4, "Payment", "tesSUCCESS", txIDs[3], batchID}, + }; + validateClosedLedger(env, testCases); + + // Alice consumes sequences (# of txns) + BEAST_EXPECT(env.seq(alice) == seq + 5); + + // Alice pays XRP & Fee; Bob receives XRP + BEAST_EXPECT(env.balance(alice) == preAlice - XRP(4) - batchFee); + BEAST_EXPECT(env.balance(bob) == preBob + XRP(4)); + } + + // tec error + { + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + + auto const batchFee = batch::calcBatchFee(env, 0, 4); + auto const seq = env.seq(alice); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfIndependent), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + batch::inner(pay(alice, bob, XRP(2)), seq + 2), + // tecUNFUNDED_PAYMENT: alice does not have enough XRP + batch::inner(pay(alice, bob, XRP(9999)), seq + 3), + batch::inner(pay(alice, bob, XRP(3)), seq + 4)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tesSUCCESS", txIDs[0], batchID}, + {2, "Payment", "tesSUCCESS", txIDs[1], batchID}, + {3, "Payment", "tecUNFUNDED_PAYMENT", txIDs[2], batchID}, + {4, "Payment", "tesSUCCESS", txIDs[3], batchID}, + }; + validateClosedLedger(env, testCases); + + // Alice consumes sequences (# of txns) + BEAST_EXPECT(env.seq(alice) == seq + 5); + + // Alice pays XRP & Fee; Bob receives XRP + BEAST_EXPECT(env.balance(alice) == preAlice - XRP(6) - batchFee); + BEAST_EXPECT(env.balance(bob) == preBob + XRP(6)); + } + + // tef error + { + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + + auto const batchFee = batch::calcBatchFee(env, 0, 4); + auto const seq = env.seq(alice); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfIndependent), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + batch::inner(pay(alice, bob, XRP(2)), seq + 2), + // tefNO_AUTH_REQUIRED: trustline auth is not required + batch::inner(trust(alice, USD(1000), tfSetfAuth), seq + 3), + batch::inner(pay(alice, bob, XRP(3)), seq + 3)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tesSUCCESS", txIDs[0], batchID}, + {2, "Payment", "tesSUCCESS", txIDs[1], batchID}, + {3, "Payment", "tesSUCCESS", txIDs[3], batchID}, + }; + validateClosedLedger(env, testCases); + + // Alice consumes sequences (# of txns) + BEAST_EXPECT(env.seq(alice) == seq + 4); + + // Alice pays XRP & Fee; Bob receives XRP + BEAST_EXPECT(env.balance(alice) == preAlice - batchFee - XRP(6)); + BEAST_EXPECT(env.balance(bob) == preBob + XRP(6)); + } + + // ter error + { + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + + auto const batchFee = batch::calcBatchFee(env, 0, 4); + auto const seq = env.seq(alice); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfIndependent), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + batch::inner(pay(alice, bob, XRP(2)), seq + 2), + // terPRE_TICKET: ticket does not exist + batch::inner(trust(alice, USD(1000), tfSetfAuth), 0, seq + 3), + batch::inner(pay(alice, bob, XRP(3)), seq + 3)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tesSUCCESS", txIDs[0], batchID}, + {2, "Payment", "tesSUCCESS", txIDs[1], batchID}, + {3, "Payment", "tesSUCCESS", txIDs[3], batchID}, + }; + validateClosedLedger(env, testCases); + + // Alice consumes sequences (# of txns) + BEAST_EXPECT(env.seq(alice) == seq + 4); + + // Alice pays XRP & Fee; Bob receives XRP + BEAST_EXPECT(env.balance(alice) == preAlice - batchFee - XRP(6)); + BEAST_EXPECT(env.balance(bob) == preBob + XRP(6)); + } + + // tec (tecKILLED) error + { + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + auto const preCarol = env.balance(carol); + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 3); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfIndependent), + batch::inner(pay(alice, bob, XRP(100)), seq + 1), + batch::inner(pay(alice, carol, XRP(100)), seq + 2), + batch::inner( + offer( + alice, + alice["USD"](100), + XRP(100), + tfImmediateOrCancel), + seq + 3)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tesSUCCESS", txIDs[0], batchID}, + {2, "Payment", "tesSUCCESS", txIDs[1], batchID}, + {3, "OfferCreate", "tecKILLED", txIDs[2], batchID}, + }; + validateClosedLedger(env, testCases); + + BEAST_EXPECT(env.balance(alice) == preAlice - XRP(200) - batchFee); + BEAST_EXPECT(env.balance(bob) == preBob + XRP(100)); + BEAST_EXPECT(env.balance(carol) == preCarol + XRP(100)); + } + } + + void + testInnerSubmitRPC(FeatureBitset features) + { + testcase("inner submit rpc"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + + env.fund(XRP(10000), alice, bob); + env.close(); + + auto submitAndValidate = [&](Slice const& slice) { + auto const jrr = env.rpc("submit", strHex(slice))[jss::result]; + BEAST_EXPECT( + jrr[jss::status] == "error" && + jrr[jss::error] == "invalidTransaction" && + jrr[jss::error_exception] == + "fails local checks: Malformed: Invalid inner batch " + "transaction."); + env.close(); + }; + + // Invalid RPC Submission: TxnSignature + // - has `TxnSignature` field + // - has no `SigningPubKey` field + // - has no `Signers` field + // - has `tfInnerBatchTxn` flag + { + auto txn = batch::inner(pay(alice, bob, XRP(1)), env.seq(alice)); + txn[sfTxnSignature] = "DEADBEEF"; + STParsedJSONObject parsed("test", txn.getTxn()); + Serializer s; + parsed.object->add(s); + submitAndValidate(s.slice()); + } + + // Invalid RPC Submission: SigningPubKey + // - has no `TxnSignature` field + // - has `SigningPubKey` field + // - has no `Signers` field + // - has `tfInnerBatchTxn` flag + { + auto txn = batch::inner(pay(alice, bob, XRP(1)), env.seq(alice)); + txn[sfSigningPubKey] = strHex(alice.pk()); + STParsedJSONObject parsed("test", txn.getTxn()); + Serializer s; + parsed.object->add(s); + submitAndValidate(s.slice()); + } + + // Invalid RPC Submission: Signers + // - has no `TxnSignature` field + // - has empty `SigningPubKey` field + // - has `Signers` field + // - has `tfInnerBatchTxn` flag + { + auto txn = batch::inner(pay(alice, bob, XRP(1)), env.seq(alice)); + txn[sfSigners] = Json::arrayValue; + STParsedJSONObject parsed("test", txn.getTxn()); + Serializer s; + parsed.object->add(s); + submitAndValidate(s.slice()); + } + + // Invalid RPC Submission: tfInnerBatchTxn + // - has no `TxnSignature` field + // - has empty `SigningPubKey` field + // - has no `Signers` field + // - has `tfInnerBatchTxn` flag + { + auto txn = batch::inner(pay(alice, bob, XRP(1)), env.seq(alice)); + STParsedJSONObject parsed("test", txn.getTxn()); + Serializer s; + parsed.object->add(s); + auto const jrr = env.rpc("submit", strHex(s.slice()))[jss::result]; + BEAST_EXPECT( + jrr[jss::status] == "success" && + jrr[jss::engine_result] == "temINVALID_FLAG"); + + env.close(); + } + } + + void + testAccountActivation(FeatureBitset features) + { + testcase("account activation"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + env.fund(XRP(10000), alice); + env.close(); + env.memoize(bob); + + auto const preAlice = env.balance(alice); + auto const ledSeq = env.current()->seq(); + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 1, 2); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1000)), seq + 1), + batch::inner(fset(bob, asfAllowTrustLineClawback), ledSeq), + batch::sig(bob)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tesSUCCESS", txIDs[0], batchID}, + {2, "AccountSet", "tesSUCCESS", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + + // Alice consumes sequences (# of txns) + BEAST_EXPECT(env.seq(alice) == seq + 2); + + // Bob consumes sequences (# of txns) + BEAST_EXPECT(env.seq(bob) == ledSeq + 1); + + // Alice pays XRP & Fee; Bob receives XRP + BEAST_EXPECT(env.balance(alice) == preAlice - XRP(1000) - batchFee); + BEAST_EXPECT(env.balance(bob) == XRP(1000)); + } + + void + testAccountSet(FeatureBitset features) + { + testcase("account set"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + env.fund(XRP(10000), alice, bob); + env.close(); + + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 2); + auto tx1 = batch::inner(noop(alice), seq + 1); + std::string domain = "example.com"; + tx1[sfDomain] = strHex(domain); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfAllOrNothing), + tx1, + batch::inner(pay(alice, bob, XRP(1)), seq + 2)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "AccountSet", "tesSUCCESS", txIDs[0], batchID}, + {2, "Payment", "tesSUCCESS", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + + auto const sle = env.le(keylet::account(alice)); + BEAST_EXPECT(sle); + BEAST_EXPECT( + sle->getFieldVL(sfDomain) == Blob(domain.begin(), domain.end())); + + // Alice consumes sequences (# of txns) + BEAST_EXPECT(env.seq(alice) == seq + 3); + + // Alice pays XRP & Fee; Bob receives XRP + BEAST_EXPECT(env.balance(alice) == preAlice - XRP(1) - batchFee); + BEAST_EXPECT(env.balance(bob) == preBob + XRP(1)); + } + + void + testAccountDelete(FeatureBitset features) + { + testcase("account delete"); + + using namespace test::jtx; + using namespace std::literals; + + // tfIndependent: account delete success + { + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + env.fund(XRP(10000), alice, bob); + env.close(); + + incLgrSeqForAccDel(env, alice); + for (int i = 0; i < 5; ++i) + env.close(); + + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 2) + + env.current()->fees().increment; + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfIndependent), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + batch::inner(acctdelete(alice, bob), seq + 2), + // terNO_ACCOUNT: alice does not exist + batch::inner(pay(alice, bob, XRP(2)), seq + 3)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tesSUCCESS", txIDs[0], batchID}, + {2, "AccountDelete", "tesSUCCESS", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + + // Alice does not exist; Bob receives Alice's XRP + BEAST_EXPECT(!env.le(keylet::account(alice))); + BEAST_EXPECT(env.balance(bob) == preBob + (preAlice - batchFee)); + } + + // tfIndependent: account delete fails + { + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + env.fund(XRP(10000), alice, bob); + env.close(); + + incLgrSeqForAccDel(env, alice); + for (int i = 0; i < 5; ++i) + env.close(); + + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + + env.trust(bob["USD"](1000), alice); + env.close(); + + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 2) + + env.current()->fees().increment; + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfIndependent), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + // tecHAS_OBLIGATIONS: alice has obligations + batch::inner(acctdelete(alice, bob), seq + 2), + batch::inner(pay(alice, bob, XRP(2)), seq + 3)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tesSUCCESS", txIDs[0], batchID}, + {2, "AccountDelete", "tecHAS_OBLIGATIONS", txIDs[1], batchID}, + {3, "Payment", "tesSUCCESS", txIDs[2], batchID}, + }; + validateClosedLedger(env, testCases); + + // Alice does not exist; Bob receives XRP + BEAST_EXPECT(env.le(keylet::account(alice))); + BEAST_EXPECT(env.balance(bob) == preBob + XRP(3)); + } + + // tfAllOrNothing: account delete fails + { + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + env.fund(XRP(10000), alice, bob); + env.close(); + + incLgrSeqForAccDel(env, alice); + for (int i = 0; i < 5; ++i) + env.close(); + + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 2) + + env.current()->fees().increment; + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + batch::inner(acctdelete(alice, bob), seq + 2), + // terNO_ACCOUNT: alice does not exist + batch::inner(pay(alice, bob, XRP(2)), seq + 3)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + }; + validateClosedLedger(env, testCases); + + // Alice still exists; Bob is unchanged + BEAST_EXPECT(env.le(keylet::account(alice))); + BEAST_EXPECT(env.balance(bob) == preBob); + } + } + + void + testObjectCreateSequence(FeatureBitset features) + { + testcase("object create w/ sequence"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + auto const USD = gw["USD"]; + + env.fund(XRP(10000), alice, bob, gw); + env.close(); + + env.trust(USD(1000), alice, bob); + env(pay(gw, alice, USD(100))); + env(pay(gw, bob, USD(100))); + env.close(); + + // success + { + auto const aliceSeq = env.seq(alice); + auto const bobSeq = env.seq(bob); + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + auto const preAliceUSD = env.balance(alice, USD.issue()); + auto const preBobUSD = env.balance(bob, USD.issue()); + + auto const batchFee = batch::calcBatchFee(env, 1, 2); + uint256 const chkID{getCheckIndex(bob, env.seq(bob))}; + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, aliceSeq, batchFee, tfAllOrNothing), + batch::inner(check::create(bob, alice, USD(10)), bobSeq), + batch::inner(check::cash(alice, chkID, USD(10)), aliceSeq + 1), + batch::sig(bob)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "CheckCreate", "tesSUCCESS", txIDs[0], batchID}, + {2, "CheckCash", "tesSUCCESS", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + + // Alice consumes sequences (# of txns) + BEAST_EXPECT(env.seq(alice) == aliceSeq + 2); + + // Alice consumes sequences (# of txns) + BEAST_EXPECT(env.seq(bob) == bobSeq + 1); + + // Alice pays Fee; Bob XRP Unchanged + BEAST_EXPECT(env.balance(alice) == preAlice - batchFee); + BEAST_EXPECT(env.balance(bob) == preBob); + + // Alice pays USD & Bob receives USD + BEAST_EXPECT( + env.balance(alice, USD.issue()) == preAliceUSD + USD(10)); + BEAST_EXPECT(env.balance(bob, USD.issue()) == preBobUSD - USD(10)); + } + + // failure + { + env(fset(alice, asfRequireDest)); + env.close(); + + auto const aliceSeq = env.seq(alice); + auto const bobSeq = env.seq(bob); + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + auto const preAliceUSD = env.balance(alice, USD.issue()); + auto const preBobUSD = env.balance(bob, USD.issue()); + + auto const batchFee = batch::calcBatchFee(env, 1, 2); + uint256 const chkID{getCheckIndex(bob, env.seq(bob))}; + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, aliceSeq, batchFee, tfIndependent), + // tecDST_TAG_NEEDED - alice has enabled asfRequireDest + batch::inner(check::create(bob, alice, USD(10)), bobSeq), + batch::inner(check::cash(alice, chkID, USD(10)), aliceSeq + 1), + batch::sig(bob)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "CheckCreate", "tecDST_TAG_NEEDED", txIDs[0], batchID}, + {2, "CheckCash", "tecNO_ENTRY", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + + // Alice consumes sequences (# of txns) + BEAST_EXPECT(env.seq(alice) == aliceSeq + 2); + + // Bob consumes sequences (# of txns) + BEAST_EXPECT(env.seq(bob) == bobSeq + 1); + + // Alice pays Fee; Bob XRP Unchanged + BEAST_EXPECT(env.balance(alice) == preAlice - batchFee); + BEAST_EXPECT(env.balance(bob) == preBob); + + // Alice pays USD & Bob receives USD + BEAST_EXPECT(env.balance(alice, USD.issue()) == preAliceUSD); + BEAST_EXPECT(env.balance(bob, USD.issue()) == preBobUSD); + } + } + + void + testObjectCreateTicket(FeatureBitset features) + { + testcase("object create w/ ticket"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + auto const USD = gw["USD"]; + + env.fund(XRP(10000), alice, bob, gw); + env.close(); + + env.trust(USD(1000), alice, bob); + env(pay(gw, alice, USD(100))); + env(pay(gw, bob, USD(100))); + env.close(); + + auto const aliceSeq = env.seq(alice); + auto const bobSeq = env.seq(bob); + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + auto const preAliceUSD = env.balance(alice, USD.issue()); + auto const preBobUSD = env.balance(bob, USD.issue()); + + auto const batchFee = batch::calcBatchFee(env, 1, 3); + uint256 const chkID{getCheckIndex(bob, bobSeq + 1)}; + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, aliceSeq, batchFee, tfAllOrNothing), + batch::inner(ticket::create(bob, 10), bobSeq), + batch::inner(check::create(bob, alice, USD(10)), 0, bobSeq + 1), + batch::inner(check::cash(alice, chkID, USD(10)), aliceSeq + 1), + batch::sig(bob)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "TicketCreate", "tesSUCCESS", txIDs[0], batchID}, + {2, "CheckCreate", "tesSUCCESS", txIDs[1], batchID}, + {3, "CheckCash", "tesSUCCESS", txIDs[2], batchID}, + }; + validateClosedLedger(env, testCases); + + BEAST_EXPECT(env.seq(alice) == aliceSeq + 2); + BEAST_EXPECT(env.seq(bob) == bobSeq + 10 + 1); + BEAST_EXPECT(env.balance(alice) == preAlice - batchFee); + BEAST_EXPECT(env.balance(bob) == preBob); + BEAST_EXPECT(env.balance(alice, USD.issue()) == preAliceUSD + USD(10)); + BEAST_EXPECT(env.balance(bob, USD.issue()) == preBobUSD - USD(10)); + } + + void + testObjectCreate3rdParty(FeatureBitset features) + { + testcase("object create w/ 3rd party"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const carol = Account("carol"); + auto const gw = Account("gw"); + auto const USD = gw["USD"]; + + env.fund(XRP(10000), alice, bob, carol, gw); + env.close(); + + env.trust(USD(1000), alice, bob); + env(pay(gw, alice, USD(100))); + env(pay(gw, bob, USD(100))); + env.close(); + + auto const aliceSeq = env.seq(alice); + auto const bobSeq = env.seq(bob); + auto const carolSeq = env.seq(carol); + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + auto const preCarol = env.balance(carol); + auto const preAliceUSD = env.balance(alice, USD.issue()); + auto const preBobUSD = env.balance(bob, USD.issue()); + + auto const batchFee = batch::calcBatchFee(env, 2, 2); + uint256 const chkID{getCheckIndex(bob, env.seq(bob))}; + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(carol, carolSeq, batchFee, tfAllOrNothing), + batch::inner(check::create(bob, alice, USD(10)), bobSeq), + batch::inner(check::cash(alice, chkID, USD(10)), aliceSeq), + batch::sig(alice, bob)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "CheckCreate", "tesSUCCESS", txIDs[0], batchID}, + {2, "CheckCash", "tesSUCCESS", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + + BEAST_EXPECT(env.seq(alice) == aliceSeq + 1); + BEAST_EXPECT(env.seq(bob) == bobSeq + 1); + BEAST_EXPECT(env.seq(carol) == carolSeq + 1); + BEAST_EXPECT(env.balance(alice) == preAlice); + BEAST_EXPECT(env.balance(bob) == preBob); + BEAST_EXPECT(env.balance(carol) == preCarol - batchFee); + BEAST_EXPECT(env.balance(alice, USD.issue()) == preAliceUSD + USD(10)); + BEAST_EXPECT(env.balance(bob, USD.issue()) == preBobUSD - USD(10)); + } + + void + testTickets(FeatureBitset features) + { + { + testcase("tickets outer"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + + env.fund(XRP(10000), alice, bob); + env.close(); + + std::uint32_t aliceTicketSeq{env.seq(alice) + 1}; + env(ticket::create(alice, 10)); + env.close(); + + auto const aliceSeq = env.seq(alice); + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + + auto const batchFee = batch::calcBatchFee(env, 0, 2); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, 0, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), aliceSeq + 0), + batch::inner(pay(alice, bob, XRP(2)), aliceSeq + 1), + ticket::use(aliceTicketSeq)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tesSUCCESS", txIDs[0], batchID}, + {2, "Payment", "tesSUCCESS", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + + auto const sle = env.le(keylet::account(alice)); + BEAST_EXPECT(sle); + BEAST_EXPECT(sle->getFieldU32(sfOwnerCount) == 9); + BEAST_EXPECT(sle->getFieldU32(sfTicketCount) == 9); + + BEAST_EXPECT(env.seq(alice) == aliceSeq + 2); + BEAST_EXPECT(env.balance(alice) == preAlice - XRP(3) - batchFee); + BEAST_EXPECT(env.balance(bob) == preBob + XRP(3)); + } + + { + testcase("tickets inner"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + + env.fund(XRP(10000), alice, bob); + env.close(); + + std::uint32_t aliceTicketSeq{env.seq(alice) + 1}; + env(ticket::create(alice, 10)); + env.close(); + + auto const aliceSeq = env.seq(alice); + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + + auto const batchFee = batch::calcBatchFee(env, 0, 2); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, aliceSeq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), 0, aliceTicketSeq), + batch::inner(pay(alice, bob, XRP(2)), 0, aliceTicketSeq + 1)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tesSUCCESS", txIDs[0], batchID}, + {2, "Payment", "tesSUCCESS", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + + auto const sle = env.le(keylet::account(alice)); + BEAST_EXPECT(sle); + BEAST_EXPECT(sle->getFieldU32(sfOwnerCount) == 8); + BEAST_EXPECT(sle->getFieldU32(sfTicketCount) == 8); + + BEAST_EXPECT(env.seq(alice) == aliceSeq + 1); + BEAST_EXPECT(env.balance(alice) == preAlice - XRP(3) - batchFee); + BEAST_EXPECT(env.balance(bob) == preBob + XRP(3)); + } + + { + testcase("tickets outer inner"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + + env.fund(XRP(10000), alice, bob); + env.close(); + + std::uint32_t aliceTicketSeq{env.seq(alice) + 1}; + env(ticket::create(alice, 10)); + env.close(); + + auto const aliceSeq = env.seq(alice); + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + + auto const batchFee = batch::calcBatchFee(env, 0, 2); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, 0, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), 0, aliceTicketSeq + 1), + batch::inner(pay(alice, bob, XRP(2)), aliceSeq), + ticket::use(aliceTicketSeq)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tesSUCCESS", txIDs[0], batchID}, + {2, "Payment", "tesSUCCESS", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + + auto const sle = env.le(keylet::account(alice)); + BEAST_EXPECT(sle); + BEAST_EXPECT(sle->getFieldU32(sfOwnerCount) == 8); + BEAST_EXPECT(sle->getFieldU32(sfTicketCount) == 8); + + BEAST_EXPECT(env.seq(alice) == aliceSeq + 1); + BEAST_EXPECT(env.balance(alice) == preAlice - XRP(3) - batchFee); + BEAST_EXPECT(env.balance(bob) == preBob + XRP(3)); + } + } + + void + testSequenceOpenLedger(FeatureBitset features) + { + testcase("sequence open ledger"); + + using namespace test::jtx; + using namespace std::literals; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const carol = Account("carol"); + + // Before Batch Txn w/ retry following ledger + { + // IMPORTANT: The batch txn is applied first, then the noop txn. + // Because of this ordering, the noop txn is not applied and is + // overwritten by the payment in the batch transaction. Because the + // terPRE_SEQ is outside of the batch this noop transaction will ge + // reapplied in the following ledger + test::jtx::Env env{*this, envconfig()}; + env.fund(XRP(10000), alice, bob, carol); + env.close(); + + auto const aliceSeq = env.seq(alice); + auto const carolSeq = env.seq(carol); + + // AccountSet Txn + auto const noopTxn = env.jt(noop(alice), seq(aliceSeq + 2)); + auto const noopTxnID = to_string(noopTxn.stx->getTransactionID()); + env(noopTxn, ter(terPRE_SEQ)); + + // Batch Txn + auto const batchFee = batch::calcBatchFee(env, 1, 2); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(carol, carolSeq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), aliceSeq), + batch::inner(pay(alice, bob, XRP(2)), aliceSeq + 1), + batch::sig(alice)); + env.close(); + + { + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tesSUCCESS", txIDs[0], batchID}, + {2, "Payment", "tesSUCCESS", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + } + + env.close(); + { + // next ledger contains noop txn + std::vector testCases = { + {0, "AccountSet", "tesSUCCESS", noopTxnID, std::nullopt}, + }; + validateClosedLedger(env, testCases); + } + } + + // Before Batch Txn w/ same sequence + { + // IMPORTANT: The batch txn is applied first, then the noop txn. + // Because of this ordering, the noop txn is not applied and is + // overwritten by the payment in the batch transaction. + test::jtx::Env env{*this, envconfig()}; + env.fund(XRP(10000), alice, bob); + env.close(); + + auto const aliceSeq = env.seq(alice); + + // AccountSet Txn + auto const noopTxn = env.jt(noop(alice), seq(aliceSeq + 1)); + env(noopTxn, ter(terPRE_SEQ)); + + // Batch Txn + auto const batchFee = batch::calcBatchFee(env, 0, 2); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, aliceSeq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), aliceSeq + 1), + batch::inner(pay(alice, bob, XRP(2)), aliceSeq + 2)); + env.close(); + + { + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tesSUCCESS", txIDs[0], batchID}, + {2, "Payment", "tesSUCCESS", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + } + + env.close(); + { + // next ledger is empty + std::vector testCases = {}; + validateClosedLedger(env, testCases); + } + } + + // After Batch Txn w/ same sequence + { + // IMPORTANT: The batch txn is applied first, then the noop txn. + // Because of this ordering, the noop txn is not applied and is + // overwritten by the payment in the batch transaction. + test::jtx::Env env{*this, envconfig()}; + env.fund(XRP(10000), alice, bob); + env.close(); + + auto const aliceSeq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 2); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, aliceSeq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), aliceSeq + 1), + batch::inner(pay(alice, bob, XRP(2)), aliceSeq + 2)); + + auto const noopTxn = env.jt(noop(alice), seq(aliceSeq + 1)); + auto const noopTxnID = to_string(noopTxn.stx->getTransactionID()); + env(noopTxn, ter(tesSUCCESS)); + env.close(); + + { + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tesSUCCESS", txIDs[0], batchID}, + {2, "Payment", "tesSUCCESS", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + } + + env.close(); + { + // next ledger is empty + std::vector testCases = {}; + validateClosedLedger(env, testCases); + } + } + + // Outer Batch terPRE_SEQ + { + test::jtx::Env env{*this, envconfig()}; + env.fund(XRP(10000), alice, bob, carol); + env.close(); + + auto const aliceSeq = env.seq(alice); + auto const carolSeq = env.seq(carol); + + // Batch Txn + auto const batchFee = batch::calcBatchFee(env, 1, 2); + auto const [txIDs, batchID] = submitBatch( + env, + terPRE_SEQ, + batch::outer(carol, carolSeq + 1, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), aliceSeq), + batch::inner(pay(alice, bob, XRP(2)), aliceSeq + 1), + batch::sig(alice)); + + // AccountSet Txn + auto const noopTxn = env.jt(noop(carol), seq(carolSeq)); + auto const noopTxnID = to_string(noopTxn.stx->getTransactionID()); + env(noopTxn, ter(tesSUCCESS)); + env.close(); + + { + std::vector testCases = { + {0, "AccountSet", "tesSUCCESS", noopTxnID, std::nullopt}, + {1, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {2, "Payment", "tesSUCCESS", txIDs[0], batchID}, + {3, "Payment", "tesSUCCESS", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + } + + env.close(); + { + // next ledger contains no transactions + std::vector testCases = {}; + validateClosedLedger(env, testCases); + } + } + } + + void + testTicketsOpenLedger(FeatureBitset features) + { + testcase("tickets open ledger"); + + using namespace test::jtx; + using namespace std::literals; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + + // Before Batch Txn w/ same ticket + { + // IMPORTANT: The batch txn is applied first, then the noop txn. + // Because of this ordering, the noop txn is not applied and is + // overwritten by the payment in the batch transaction. + test::jtx::Env env{*this, envconfig()}; + env.fund(XRP(10000), alice, bob); + env.close(); + + std::uint32_t aliceTicketSeq{env.seq(alice) + 1}; + env(ticket::create(alice, 10)); + env.close(); + + auto const aliceSeq = env.seq(alice); + + // AccountSet Txn + auto const noopTxn = + env.jt(noop(alice), ticket::use(aliceTicketSeq + 1)); + auto const noopTxnID = to_string(noopTxn.stx->getTransactionID()); + env(noopTxn, ter(tesSUCCESS)); + + // Batch Txn + auto const batchFee = batch::calcBatchFee(env, 0, 2); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, 0, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), 0, aliceTicketSeq + 1), + batch::inner(pay(alice, bob, XRP(2)), aliceSeq), + ticket::use(aliceTicketSeq)); + env.close(); + + { + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tesSUCCESS", txIDs[0], batchID}, + {2, "Payment", "tesSUCCESS", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + } + + env.close(); + { + // next ledger is empty + std::vector testCases = {}; + validateClosedLedger(env, testCases); + } + } + + // After Batch Txn w/ same ticket + { + // IMPORTANT: The batch txn is applied first, then the noop txn. + // Because of this ordering, the noop txn is not applied and is + // overwritten by the payment in the batch transaction. + test::jtx::Env env{*this, envconfig()}; + env.fund(XRP(10000), alice, bob); + env.close(); + + std::uint32_t aliceTicketSeq{env.seq(alice) + 1}; + env(ticket::create(alice, 10)); + env.close(); + + auto const aliceSeq = env.seq(alice); + + // Batch Txn + auto const batchFee = batch::calcBatchFee(env, 0, 2); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, 0, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), 0, aliceTicketSeq + 1), + batch::inner(pay(alice, bob, XRP(2)), aliceSeq), + ticket::use(aliceTicketSeq)); + + // AccountSet Txn + auto const noopTxn = + env.jt(noop(alice), ticket::use(aliceTicketSeq + 1)); + env(noopTxn); + + env.close(); + { + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tesSUCCESS", txIDs[0], batchID}, + {2, "Payment", "tesSUCCESS", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + } + + env.close(); + { + // next ledger is empty + std::vector testCases = {}; + validateClosedLedger(env, testCases); + } + } + } + + void + testObjectsOpenLedger(FeatureBitset features) + { + testcase("objects open ledger"); + + using namespace test::jtx; + using namespace std::literals; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + + // Consume Object Before Batch Txn + { + // IMPORTANT: The initial result of `CheckCash` is tecNO_ENTRY + // because the create transaction has not been applied because the + // batch will run in the close ledger process. The batch will be + // allied and then retry this transaction in the current ledger. + + test::jtx::Env env{*this, envconfig()}; + env.fund(XRP(10000), alice, bob); + env.close(); + + std::uint32_t aliceTicketSeq{env.seq(alice) + 1}; + env(ticket::create(alice, 10)); + env.close(); + + auto const aliceSeq = env.seq(alice); + + // CheckCash Txn + uint256 const chkID{getCheckIndex(alice, aliceSeq)}; + auto const objTxn = env.jt(check::cash(bob, chkID, XRP(10))); + auto const objTxnID = to_string(objTxn.stx->getTransactionID()); + env(objTxn, ter(tecNO_ENTRY)); + + // Batch Txn + auto const batchFee = batch::calcBatchFee(env, 0, 2); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, 0, batchFee, tfAllOrNothing), + batch::inner(check::create(alice, bob, XRP(10)), aliceSeq), + batch::inner(pay(alice, bob, XRP(1)), 0, aliceTicketSeq + 1), + ticket::use(aliceTicketSeq)); + + env.close(); + { + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "CheckCreate", "tesSUCCESS", txIDs[0], batchID}, + {2, "Payment", "tesSUCCESS", txIDs[1], batchID}, + {3, "CheckCash", "tesSUCCESS", objTxnID, std::nullopt}, + }; + validateClosedLedger(env, testCases); + } + + env.close(); + { + // next ledger is empty + std::vector testCases = {}; + validateClosedLedger(env, testCases); + } + } + + // Create Object Before Batch Txn + { + test::jtx::Env env{*this, envconfig()}; + env.fund(XRP(10000), alice, bob); + env.close(); + + std::uint32_t aliceTicketSeq{env.seq(alice) + 1}; + env(ticket::create(alice, 10)); + env.close(); + + auto const aliceSeq = env.seq(alice); + auto const bobSeq = env.seq(bob); + + // CheckCreate Txn + uint256 const chkID{getCheckIndex(alice, aliceSeq)}; + auto const objTxn = env.jt(check::create(alice, bob, XRP(10))); + auto const objTxnID = to_string(objTxn.stx->getTransactionID()); + env(objTxn, ter(tesSUCCESS)); + + // Batch Txn + auto const batchFee = batch::calcBatchFee(env, 1, 2); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, 0, batchFee, tfAllOrNothing), + batch::inner(check::cash(bob, chkID, XRP(10)), bobSeq), + batch::inner(pay(alice, bob, XRP(1)), 0, aliceTicketSeq + 1), + ticket::use(aliceTicketSeq), + batch::sig(bob)); + + env.close(); + { + std::vector testCases = { + {0, "CheckCreate", "tesSUCCESS", objTxnID, std::nullopt}, + {1, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {2, "CheckCash", "tesSUCCESS", txIDs[0], batchID}, + {3, "Payment", "tesSUCCESS", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + } + } + + // After Batch Txn + { + // IMPORTANT: The initial result of `CheckCash` is tecNO_ENTRY + // because the create transaction has not been applied because the + // batch will run in the close ledger process. The batch will be + // applied and then retry this transaction in the current ledger. + + test::jtx::Env env{*this, envconfig()}; + env.fund(XRP(10000), alice, bob); + env.close(); + + std::uint32_t aliceTicketSeq{env.seq(alice) + 1}; + env(ticket::create(alice, 10)); + env.close(); + + auto const aliceSeq = env.seq(alice); + + // Batch Txn + auto const batchFee = batch::calcBatchFee(env, 0, 2); + uint256 const chkID{getCheckIndex(alice, aliceSeq)}; + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, 0, batchFee, tfAllOrNothing), + batch::inner(check::create(alice, bob, XRP(10)), aliceSeq), + batch::inner(pay(alice, bob, XRP(1)), 0, aliceTicketSeq + 1), + ticket::use(aliceTicketSeq)); + + // CheckCash Txn + auto const objTxn = env.jt(check::cash(bob, chkID, XRP(10))); + auto const objTxnID = to_string(objTxn.stx->getTransactionID()); + env(objTxn, ter(tecNO_ENTRY)); + + env.close(); + { + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "CheckCreate", "tesSUCCESS", txIDs[0], batchID}, + {2, "Payment", "tesSUCCESS", txIDs[1], batchID}, + {3, "CheckCash", "tesSUCCESS", objTxnID, std::nullopt}, + }; + validateClosedLedger(env, testCases); + } + } + } + + void + testPseudoTxn(FeatureBitset features) + { + testcase("pseudo txn with tfInnerBatchTxn"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + env.fund(XRP(10000), alice, bob); + env.close(); + + STTx const stx = STTx(ttAMENDMENT, [&](auto& obj) { + obj.setAccountID(sfAccount, AccountID()); + obj.setFieldH256(sfAmendment, uint256(2)); + obj.setFieldU32(sfLedgerSequence, env.seq(alice)); + obj.setFieldU32(sfFlags, tfInnerBatchTxn); + }); + + std::string reason; + BEAST_EXPECT(isPseudoTx(stx)); + BEAST_EXPECT(!passesLocalChecks(stx, reason)); + BEAST_EXPECT(reason == "Cannot submit pseudo transactions."); + env.app().openLedger().modify([&](OpenView& view, beast::Journal j) { + auto const result = ripple::apply(env.app(), view, stx, tapNONE, j); + BEAST_EXPECT(!result.applied && result.ter == temINVALID_FLAG); + return result.applied; + }); + } + + void + testOpenLedger(FeatureBitset features) + { + testcase("batch open ledger"); + // IMPORTANT: When a transaction is submitted outside of a batch and + // another transaction is part of the batch, the batch might fail + // because the sequence is out of order. This is because the canonical + // order of transactions is determined by the account first. So in this + // case, alice's batch comes after bobs self submitted transaction even + // though the payment was submitted after the batch. + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig()}; + XRPAmount const baseFee = env.current()->fees().base; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + + env.fund(XRP(10000), alice, bob); + env.close(); + + env(noop(bob), ter(tesSUCCESS)); + env.close(); + + auto const aliceSeq = env.seq(alice); + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + auto const bobSeq = env.seq(bob); + + // Alice Pays Bob (Open Ledger) + auto const payTxn1 = env.jt(pay(alice, bob, XRP(10)), seq(aliceSeq)); + auto const payTxn1ID = to_string(payTxn1.stx->getTransactionID()); + env(payTxn1, ter(tesSUCCESS)); + + // Alice & Bob Atomic Batch + auto const batchFee = batch::calcBatchFee(env, 1, 2); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, aliceSeq + 1, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), aliceSeq + 2), + batch::inner(pay(bob, alice, XRP(5)), bobSeq), + batch::sig(bob)); + + // Bob pays Alice (Open Ledger) + auto const payTxn2 = env.jt(pay(bob, alice, XRP(5)), seq(bobSeq + 1)); + auto const payTxn2ID = to_string(payTxn2.stx->getTransactionID()); + env(payTxn2, ter(terPRE_SEQ)); + env.close(); + + std::vector testCases = { + {0, "Payment", "tesSUCCESS", payTxn1ID, std::nullopt}, + {1, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {2, "Payment", "tesSUCCESS", txIDs[0], batchID}, + {3, "Payment", "tesSUCCESS", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + + env.close(); + { + // next ledger includes the payment txn + std::vector testCases = { + {0, "Payment", "tesSUCCESS", payTxn2ID, std::nullopt}, + }; + validateClosedLedger(env, testCases); + } + + // Alice consumes sequences (# of txns) + BEAST_EXPECT(env.seq(alice) == aliceSeq + 3); + + // Alice consumes sequences (# of txns) + BEAST_EXPECT(env.seq(bob) == bobSeq + 2); + + // Alice pays XRP & Fee; Bob receives XRP & pays Fee + BEAST_EXPECT( + env.balance(alice) == preAlice - XRP(10) - batchFee - baseFee); + BEAST_EXPECT(env.balance(bob) == preBob + XRP(10) - baseFee); + } + + void + testBatchTxQueue(FeatureBitset features) + { + testcase("batch tx queue"); + + using namespace test::jtx; + using namespace std::literals; + + // only outer batch transactions are counter towards the queue size + { + test::jtx::Env env{ + *this, + makeSmallQueueConfig( + {{"minimum_txn_in_ledger_standalone", "2"}}), + nullptr, + beast::severities::kError}; + + auto alice = Account("alice"); + auto bob = Account("bob"); + auto carol = Account("carol"); + + // Fund across several ledgers so the TxQ metrics stay restricted. + env.fund(XRP(10000), noripple(alice, bob)); + env.close(env.now() + 5s, 10000ms); + env.fund(XRP(10000), noripple(carol)); + env.close(env.now() + 5s, 10000ms); + + // Fill the ledger + env(noop(alice)); + env(noop(alice)); + env(noop(alice)); + checkMetrics(*this, env, 0, std::nullopt, 3, 2); + + env(noop(carol), ter(terQUEUED)); + checkMetrics(*this, env, 1, std::nullopt, 3, 2); + + auto const aliceSeq = env.seq(alice); + auto const bobSeq = env.seq(bob); + auto const batchFee = batch::calcBatchFee(env, 1, 2); + + // Queue Batch + { + env(batch::outer(alice, aliceSeq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), aliceSeq + 1), + batch::inner(pay(bob, alice, XRP(5)), bobSeq), + batch::sig(bob), + ter(terQUEUED)); + } + + checkMetrics(*this, env, 2, std::nullopt, 3, 2); + + // Replace Queued Batch + { + env(batch::outer( + alice, + aliceSeq, + openLedgerFee(env, batchFee), + tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), aliceSeq + 1), + batch::inner(pay(bob, alice, XRP(5)), bobSeq), + batch::sig(bob), + ter(tesSUCCESS)); + env.close(); + } + + checkMetrics(*this, env, 0, 12, 1, 6); + } + + // inner batch transactions are counter towards the ledger tx count + { + test::jtx::Env env{ + *this, + makeSmallQueueConfig( + {{"minimum_txn_in_ledger_standalone", "2"}}), + nullptr, + beast::severities::kError}; + + auto alice = Account("alice"); + auto bob = Account("bob"); + auto carol = Account("carol"); + + // Fund across several ledgers so the TxQ metrics stay restricted. + env.fund(XRP(10000), noripple(alice, bob)); + env.close(env.now() + 5s, 10000ms); + env.fund(XRP(10000), noripple(carol)); + env.close(env.now() + 5s, 10000ms); + + // Fill the ledger leaving room for 1 queued transaction + env(noop(alice)); + env(noop(alice)); + checkMetrics(*this, env, 0, std::nullopt, 2, 2); + + auto const aliceSeq = env.seq(alice); + auto const bobSeq = env.seq(bob); + auto const batchFee = batch::calcBatchFee(env, 1, 2); + + // Batch Successful + { + env(batch::outer(alice, aliceSeq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), aliceSeq + 1), + batch::inner(pay(bob, alice, XRP(5)), bobSeq), + batch::sig(bob), + ter(tesSUCCESS)); + } + + checkMetrics(*this, env, 0, std::nullopt, 3, 2); + + env(noop(carol), ter(terQUEUED)); + checkMetrics(*this, env, 1, std::nullopt, 3, 2); + } + } + + void + testBatchNetworkOps(FeatureBitset features) + { + testcase("batch network ops"); + + using namespace test::jtx; + using namespace std::literals; + + Env env( + *this, + envconfig(), + features, + nullptr, + beast::severities::kDisabled); + + auto alice = Account("alice"); + auto bob = Account("bob"); + env.fund(XRP(10000), alice, bob); + env.close(); + + auto submitTx = [&](std::uint32_t flags) -> uint256 { + auto jt = env.jt(pay(alice, bob, XRP(1)), txflags(flags)); + Serializer s; + jt.stx->add(s); + env.app().getOPs().submitTransaction(jt.stx); + return jt.stx->getTransactionID(); + }; + + auto processTxn = [&](std::uint32_t flags) -> uint256 { + auto jt = env.jt(pay(alice, bob, XRP(1)), txflags(flags)); + Serializer s; + jt.stx->add(s); + std::string reason; + auto transaction = + std::make_shared(jt.stx, reason, env.app()); + env.app().getOPs().processTransaction( + transaction, false, true, NetworkOPs::FailHard::yes); + return transaction->getID(); + }; + + // Validate: NetworkOPs::submitTransaction() + { + // Submit a tx with tfInnerBatchTxn + uint256 const txBad = submitTx(tfInnerBatchTxn); + BEAST_EXPECT(env.app().getHashRouter().getFlags(txBad) == 0); + } + + // Validate: NetworkOPs::processTransaction() + { + uint256 const txid = processTxn(tfInnerBatchTxn); + // HashRouter::getFlags() should return SF_BAD + BEAST_EXPECT(env.app().getHashRouter().getFlags(txid) == SF_BAD); + } + } + + void + testBatchDelegate(FeatureBitset features) + { + testcase("batch delegate"); + + using namespace test::jtx; + using namespace std::literals; + + // delegated non atomic inner + { + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + auto const USD = gw["USD"]; + env.fund(XRP(10000), alice, bob, gw); + env.close(); + + env(delegate::set(alice, bob, {"Payment"})); + env.close(); + + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + + auto const batchFee = batch::calcBatchFee(env, 0, 2); + auto const seq = env.seq(alice); + + auto tx = batch::inner(pay(alice, bob, XRP(1)), seq + 1); + tx[jss::Delegate] = bob.human(); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfAllOrNothing), + tx, + batch::inner(pay(alice, bob, XRP(2)), seq + 2)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tesSUCCESS", txIDs[0], batchID}, + {2, "Payment", "tesSUCCESS", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + + // Alice consumes sequences (# of txns) + BEAST_EXPECT(env.seq(alice) == seq + 3); + + // Alice pays XRP & Fee; Bob receives XRP + BEAST_EXPECT(env.balance(alice) == preAlice - XRP(3) - batchFee); + BEAST_EXPECT(env.balance(bob) == preBob + XRP(3)); + } + + // delegated atomic inner + { + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const carol = Account("carol"); + auto const gw = Account("gw"); + auto const USD = gw["USD"]; + env.fund(XRP(10000), alice, bob, carol, gw); + env.close(); + + env(delegate::set(bob, carol, {"Payment"})); + env.close(); + + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + auto const preCarol = env.balance(carol); + + auto const batchFee = batch::calcBatchFee(env, 1, 2); + auto const aliceSeq = env.seq(alice); + auto const bobSeq = env.seq(bob); + + auto tx = batch::inner(pay(bob, alice, XRP(1)), bobSeq); + tx[jss::Delegate] = carol.human(); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, aliceSeq, batchFee, tfAllOrNothing), + tx, + batch::inner(pay(alice, bob, XRP(2)), aliceSeq + 1), + batch::sig(bob)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "Payment", "tesSUCCESS", txIDs[0], batchID}, + {2, "Payment", "tesSUCCESS", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + + BEAST_EXPECT(env.seq(alice) == aliceSeq + 2); + BEAST_EXPECT(env.seq(bob) == bobSeq + 1); + BEAST_EXPECT(env.balance(alice) == preAlice - XRP(1) - batchFee); + BEAST_EXPECT(env.balance(bob) == preBob + XRP(1)); + // NOTE: Carol would normally pay the fee for delegated txns, but + // because the batch is atomic, the fee is paid by the batch + BEAST_EXPECT(env.balance(carol) == preCarol); + } + + // delegated non atomic inner (AccountSet) + { + test::jtx::Env env{*this, envconfig()}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + auto const USD = gw["USD"]; + env.fund(XRP(10000), alice, bob, gw); + env.close(); + + env(delegate::set(alice, bob, {"AccountDomainSet"})); + env.close(); + + auto const preAlice = env.balance(alice); + auto const preBob = env.balance(bob); + + auto const batchFee = batch::calcBatchFee(env, 0, 2); + auto const seq = env.seq(alice); + + auto tx = batch::inner(noop(alice), seq + 1); + std::string const domain = "example.com"; + tx[sfDomain.jsonName] = strHex(domain); + tx[jss::Delegate] = bob.human(); + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfAllOrNothing), + tx, + batch::inner(pay(alice, bob, XRP(2)), seq + 2)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "AccountSet", "tesSUCCESS", txIDs[0], batchID}, + {2, "Payment", "tesSUCCESS", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + + // Alice consumes sequences (# of txns) + BEAST_EXPECT(env.seq(alice) == seq + 3); + + // Alice pays XRP & Fee; Bob receives XRP + BEAST_EXPECT(env.balance(alice) == preAlice - XRP(2) - batchFee); + BEAST_EXPECT(env.balance(bob) == preBob + XRP(2)); + } + } + + void + testWithFeats(FeatureBitset features) + { + testEnable(features); + testPreflight(features); + testPreclaim(features); + testBadRawTxn(features); + testBadSequence(features); + testBadOuterFee(features); + testCalculateBaseFee(features); + testAllOrNothing(features); + testOnlyOne(features); + testUntilFailure(features); + testIndependent(features); + testInnerSubmitRPC(features); + testAccountActivation(features); + testAccountSet(features); + testAccountDelete(features); + testObjectCreateSequence(features); + testObjectCreateTicket(features); + testObjectCreate3rdParty(features); + testTickets(features); + testSequenceOpenLedger(features); + testTicketsOpenLedger(features); + testObjectsOpenLedger(features); + testPseudoTxn(features); + testOpenLedger(features); + testBatchTxQueue(features); + testBatchNetworkOps(features); + testBatchDelegate(features); + } + +public: + void + run() override + { + using namespace test::jtx; + auto const sa = supported_amendments(); + testWithFeats(sa); + } +}; + +BEAST_DEFINE_TESTSUITE(Batch, app, ripple); + +} // namespace test +} // namespace ripple diff --git a/src/test/app/Delegate_test.cpp b/src/test/app/Delegate_test.cpp index 5136627148..ca173a6993 100644 --- a/src/test/app/Delegate_test.cpp +++ b/src/test/app/Delegate_test.cpp @@ -231,6 +231,7 @@ class Delegate_test : public beast::unit_test::suite ter(tecNO_PERMISSION)); env(delegate::set(gw, alice, {"UNLModify"}), ter(tecNO_PERMISSION)); env(delegate::set(gw, alice, {"SetFee"}), ter(tecNO_PERMISSION)); + env(delegate::set(gw, alice, {"Batch"}), ter(tecNO_PERMISSION)); } } diff --git a/src/test/app/MultiSign_test.cpp b/src/test/app/MultiSign_test.cpp index b24c7ca39e..8c1880c1a0 100644 --- a/src/test/app/MultiSign_test.cpp +++ b/src/test/app/MultiSign_test.cpp @@ -460,7 +460,7 @@ public: // Attempt a multisigned transaction that meets the quorum. auto const baseFee = env.current()->fees().base; std::uint32_t aliceSeq = env.seq(alice); - env(noop(alice), msig(msig::Reg{cheri, cher}), fee(2 * baseFee)); + env(noop(alice), msig(Reg{cheri, cher}), fee(2 * baseFee)); env.close(); BEAST_EXPECT(env.seq(alice) == aliceSeq + 1); @@ -480,7 +480,7 @@ public: BEAST_EXPECT(env.seq(alice) == aliceSeq + 1); aliceSeq = env.seq(alice); - env(noop(alice), msig(msig::Reg{becky, beck}), fee(2 * baseFee)); + env(noop(alice), msig(Reg{becky, beck}), fee(2 * baseFee)); env.close(); BEAST_EXPECT(env.seq(alice) == aliceSeq + 1); @@ -488,7 +488,7 @@ public: aliceSeq = env.seq(alice); env(noop(alice), fee(3 * baseFee), - msig(msig::Reg{becky, beck}, msig::Reg{cheri, cher})); + msig(Reg{becky, beck}, Reg{cheri, cher})); env.close(); BEAST_EXPECT(env.seq(alice) == aliceSeq + 1); } @@ -783,12 +783,12 @@ public: BEAST_EXPECT(env.seq(alice) == aliceSeq + 1); aliceSeq = env.seq(alice); - env(noop(alice), msig(msig::Reg{cheri, cher}), fee(2 * baseFee)); + env(noop(alice), msig(Reg{cheri, cher}), fee(2 * baseFee)); env.close(); BEAST_EXPECT(env.seq(alice) == aliceSeq + 1); aliceSeq = env.seq(alice); - env(noop(alice), msig(msig::Reg{daria, dari}), fee(2 * baseFee)); + env(noop(alice), msig(Reg{daria, dari}), fee(2 * baseFee)); env.close(); BEAST_EXPECT(env.seq(alice) == aliceSeq + 1); @@ -801,7 +801,7 @@ public: aliceSeq = env.seq(alice); env(noop(alice), fee(5 * baseFee), - msig(becky, msig::Reg{cheri, cher}, msig::Reg{daria, dari}, jinni)); + msig(becky, Reg{cheri, cher}, Reg{daria, dari}, jinni)); env.close(); BEAST_EXPECT(env.seq(alice) == aliceSeq + 1); @@ -820,7 +820,7 @@ public: aliceSeq = env.seq(alice); env(noop(alice), fee(9 * baseFee), - msig(becky, msig::Reg{cheri, cher}, msig::Reg{daria, dari}, jinni)); + msig(becky, Reg{cheri, cher}, Reg{daria, dari}, jinni)); env.close(); BEAST_EXPECT(env.seq(alice) == aliceSeq + 1); @@ -828,7 +828,7 @@ public: aliceSeq = env.seq(alice); env(noop(alice), fee(5 * baseFee), - msig(becky, cheri, msig::Reg{daria, dari}, jinni)); + msig(becky, cheri, Reg{daria, dari}, jinni)); env.close(); BEAST_EXPECT(env.seq(alice) == aliceSeq + 1); @@ -853,8 +853,8 @@ public: fee(9 * baseFee), msig( becky, - msig::Reg{cheri, cher}, - msig::Reg{daria, dari}, + Reg{cheri, cher}, + Reg{daria, dari}, haunt, jinni, phase, @@ -1349,7 +1349,7 @@ public: // Becky cannot 2-level multisign for alice. 2-level multisigning // is not supported. env(noop(alice), - msig(msig::Reg{becky, bogie}), + msig(Reg{becky, bogie}), fee(2 * baseFee), ter(tefBAD_SIGNATURE)); env.close(); @@ -1358,7 +1358,7 @@ public: // not yet enabled. Account const beck{"beck", KeyType::ed25519}; env(noop(alice), - msig(msig::Reg{becky, beck}), + msig(Reg{becky, beck}), fee(2 * baseFee), ter(tefBAD_SIGNATURE)); env.close(); @@ -1368,13 +1368,13 @@ public: env(regkey(becky, beck), msig(demon), fee(2 * baseFee)); env.close(); - env(noop(alice), msig(msig::Reg{becky, beck}), fee(2 * baseFee)); + env(noop(alice), msig(Reg{becky, beck}), fee(2 * baseFee)); env.close(); // The presence of becky's regular key does not influence whether she // can 2-level multisign; it still won't work. env(noop(alice), - msig(msig::Reg{becky, demon}), + msig(Reg{becky, demon}), fee(2 * baseFee), ter(tefBAD_SIGNATURE)); env.close(); diff --git a/src/test/app/TxQ_test.cpp b/src/test/app/TxQ_test.cpp index 7b69cee1ce..947640495d 100644 --- a/src/test/app/TxQ_test.cpp +++ b/src/test/app/TxQ_test.cpp @@ -18,6 +18,7 @@ //============================================================================== #include +#include #include #include #include @@ -42,97 +43,6 @@ class TxQPosNegFlows_test : public beast::unit_test::suite static constexpr FeeLevel64 baseFeeLevel{256}; static constexpr FeeLevel64 minEscalationFeeLevel = baseFeeLevel * 500; - void - checkMetrics( - int line, - jtx::Env& env, - std::size_t expectedCount, - std::optional expectedMaxCount, - std::size_t expectedInLedger, - std::size_t expectedPerLedger, - std::uint64_t expectedMinFeeLevel = baseFeeLevel.fee(), - std::uint64_t expectedMedFeeLevel = minEscalationFeeLevel.fee()) - { - FeeLevel64 const expectedMin{expectedMinFeeLevel}; - FeeLevel64 const expectedMed{expectedMedFeeLevel}; - auto const metrics = env.app().getTxQ().getMetrics(*env.current()); - using namespace std::string_literals; - - metrics.referenceFeeLevel == baseFeeLevel - ? pass() - : fail( - "reference: "s + - std::to_string(metrics.referenceFeeLevel.value()) + "/" + - std::to_string(baseFeeLevel.value()), - __FILE__, - line); - - metrics.txCount == expectedCount - ? pass() - : fail( - "txCount: "s + std::to_string(metrics.txCount) + "/" + - std::to_string(expectedCount), - __FILE__, - line); - - metrics.txQMaxSize == expectedMaxCount - ? pass() - : fail( - "txQMaxSize: "s + - std::to_string(metrics.txQMaxSize.value_or(0)) + "/" + - std::to_string(expectedMaxCount.value_or(0)), - __FILE__, - line); - - metrics.txInLedger == expectedInLedger - ? pass() - : fail( - "txInLedger: "s + std::to_string(metrics.txInLedger) + "/" + - std::to_string(expectedInLedger), - __FILE__, - line); - - metrics.txPerLedger == expectedPerLedger - ? pass() - : fail( - "txPerLedger: "s + std::to_string(metrics.txPerLedger) + "/" + - std::to_string(expectedPerLedger), - __FILE__, - line); - - metrics.minProcessingFeeLevel == expectedMin - ? pass() - : fail( - "minProcessingFeeLevel: "s + - std::to_string(metrics.minProcessingFeeLevel.value()) + - "/" + std::to_string(expectedMin.value()), - __FILE__, - line); - - metrics.medFeeLevel == expectedMed - ? pass() - : fail( - "medFeeLevel: "s + - std::to_string(metrics.medFeeLevel.value()) + "/" + - std::to_string(expectedMed.value()), - __FILE__, - line); - - auto const expectedCurFeeLevel = expectedInLedger > expectedPerLedger - ? expectedMed * expectedInLedger * expectedInLedger / - (expectedPerLedger * expectedPerLedger) - : metrics.referenceFeeLevel; - - metrics.openLedgerFeeLevel == expectedCurFeeLevel - ? pass() - : fail( - "openLedgerFeeLevel: "s + - std::to_string(metrics.openLedgerFeeLevel.value()) + "/" + - std::to_string(expectedCurFeeLevel.value()), - __FILE__, - line); - } - void fillQueue(jtx::Env& env, jtx::Account const& account) { @@ -244,7 +154,7 @@ class TxQPosNegFlows_test : public beast::unit_test::suite // transactions as though they are ordinary transactions. auto const flagPerLedger = 1 + ripple::detail::numUpVotedAmendments(); auto const flagMaxQueue = ledgersInQueue * flagPerLedger; - checkMetrics(__LINE__, env, 0, flagMaxQueue, 0, flagPerLedger); + checkMetrics(*this, env, 0, flagMaxQueue, 0, flagPerLedger); // Pad a couple of txs with normal fees so the median comes // back down to normal @@ -255,7 +165,7 @@ class TxQPosNegFlows_test : public beast::unit_test::suite // metrics to reset to defaults, EXCEPT the maxQueue size. using namespace std::chrono_literals; env.close(env.now() + 5s, 10000ms); - checkMetrics(__LINE__, env, 0, flagMaxQueue, 0, expectedPerLedger); + checkMetrics(*this, env, 0, flagMaxQueue, 0, expectedPerLedger); auto const fees = env.current()->fees(); BEAST_EXPECT(fees.base == XRPAmount{base}); BEAST_EXPECT(fees.reserve == XRPAmount{reserve}); @@ -287,37 +197,37 @@ public: auto queued = ter(terQUEUED); auto const baseFee = env.current()->fees().base.drops(); - checkMetrics(__LINE__, env, 0, std::nullopt, 0, 3); + checkMetrics(*this, env, 0, std::nullopt, 0, 3); // Create several accounts while the fee is cheap so they all apply. env.fund(XRP(50000), noripple(alice, bob, charlie, daria)); - checkMetrics(__LINE__, env, 0, std::nullopt, 4, 3); + checkMetrics(*this, env, 0, std::nullopt, 4, 3); // Alice - price starts exploding: held env(noop(alice), queued); - checkMetrics(__LINE__, env, 1, std::nullopt, 4, 3); + checkMetrics(*this, env, 1, std::nullopt, 4, 3); // Bob with really high fee - applies env(noop(bob), fee(openLedgerCost(env))); - checkMetrics(__LINE__, env, 1, std::nullopt, 5, 3); + checkMetrics(*this, env, 1, std::nullopt, 5, 3); // Daria with low fee: hold env(noop(daria), fee(baseFee * 100), queued); - checkMetrics(__LINE__, env, 2, std::nullopt, 5, 3); + checkMetrics(*this, env, 2, std::nullopt, 5, 3); env.close(); // Verify that the held transactions got applied - checkMetrics(__LINE__, env, 0, 10, 2, 5); + checkMetrics(*this, env, 0, 10, 2, 5); ////////////////////////////////////////////////////////////// // Make some more accounts. We'll need them later to abuse the queue. env.fund(XRP(50000), noripple(elmo, fred, gwen, hank)); - checkMetrics(__LINE__, env, 0, 10, 6, 5); + checkMetrics(*this, env, 0, 10, 6, 5); // Now get a bunch of transactions held. env(noop(alice), fee(baseFee * 1.2), queued); - checkMetrics(__LINE__, env, 1, 10, 6, 5); + checkMetrics(*this, env, 1, 10, 6, 5); env(noop(bob), fee(baseFee), queued); // won't clear the queue env(noop(charlie), fee(baseFee * 2), queued); @@ -326,11 +236,11 @@ public: env(noop(fred), fee(baseFee * 1.9), queued); env(noop(gwen), fee(baseFee * 1.6), queued); env(noop(hank), fee(baseFee * 1.8), queued); - checkMetrics(__LINE__, env, 8, 10, 6, 5); + checkMetrics(*this, env, 8, 10, 6, 5); env.close(); // Verify that the held transactions got applied - checkMetrics(__LINE__, env, 1, 12, 7, 6); + checkMetrics(*this, env, 1, 12, 7, 6); // Bob's transaction is still stuck in the queue. @@ -339,45 +249,45 @@ public: // Hank sends another txn env(noop(hank), fee(baseFee), queued); // But he's not going to leave it in the queue - checkMetrics(__LINE__, env, 2, 12, 7, 6); + checkMetrics(*this, env, 2, 12, 7, 6); // Hank sees his txn got held and bumps the fee, // but doesn't even bump it enough to requeue env(noop(hank), fee(baseFee * 1.1), ter(telCAN_NOT_QUEUE_FEE)); - checkMetrics(__LINE__, env, 2, 12, 7, 6); + checkMetrics(*this, env, 2, 12, 7, 6); // Hank sees his txn got held and bumps the fee, // enough to requeue, but doesn't bump it enough to // apply to the ledger env(noop(hank), fee(baseFee * 600), queued); // But he's not going to leave it in the queue - checkMetrics(__LINE__, env, 2, 12, 7, 6); + checkMetrics(*this, env, 2, 12, 7, 6); // Hank sees his txn got held and bumps the fee, // high enough to get into the open ledger, because // he doesn't want to wait. env(noop(hank), fee(openLedgerCost(env))); - checkMetrics(__LINE__, env, 1, 12, 8, 6); + checkMetrics(*this, env, 1, 12, 8, 6); // Hank then sends another, less important txn // (In addition to the metrics, this will verify that // the original txn got removed.) env(noop(hank), fee(baseFee * 2), queued); - checkMetrics(__LINE__, env, 2, 12, 8, 6); + checkMetrics(*this, env, 2, 12, 8, 6); env.close(); // Verify that bob and hank's txns were applied - checkMetrics(__LINE__, env, 0, 16, 2, 8); + checkMetrics(*this, env, 0, 16, 2, 8); // Close again with a simulated time leap to // reset the escalation limit down to minimum env.close(env.now() + 5s, 10000ms); - checkMetrics(__LINE__, env, 0, 16, 0, 3); + checkMetrics(*this, env, 0, 16, 0, 3); // Then close once more without the time leap // to reset the queue maxsize down to minimum env.close(); - checkMetrics(__LINE__, env, 0, 6, 0, 3); + checkMetrics(*this, env, 0, 6, 0, 3); ////////////////////////////////////////////////////////////// @@ -390,7 +300,7 @@ public: env(noop(gwen), fee(largeFee)); env(noop(fred), fee(largeFee)); env(noop(elmo), fee(largeFee)); - checkMetrics(__LINE__, env, 0, 6, 4, 3); + checkMetrics(*this, env, 0, 6, 4, 3); // Use explicit fees so we can control which txn // will get dropped @@ -406,7 +316,7 @@ public: // Queue is full now. // clang-format off - checkMetrics(__LINE__, env, 6, 6, 4, 3, txFeeLevelByAccount(env, daria) + 1); + checkMetrics(*this, env, 6, 6, 4, 3, txFeeLevelByAccount(env, daria) + 1); // clang-format on // Try to add another transaction with the default (low) fee, // it should fail because the queue is full. @@ -419,7 +329,7 @@ public: // Queue is still full, of course, but the min fee has gone up // clang-format off - checkMetrics(__LINE__, env, 6, 6, 4, 3, txFeeLevelByAccount(env, elmo) + 1); + checkMetrics(*this, env, 6, 6, 4, 3, txFeeLevelByAccount(env, elmo) + 1); // clang-format on // Close out the ledger, the transactions are accepted, the @@ -428,11 +338,11 @@ public: // is put back in. Neat. env.close(); // clang-format off - checkMetrics(__LINE__, env, 2, 8, 5, 4, baseFeeLevel.fee(), calcMedFeeLevel(FeeLevel64{baseFeeLevel.fee() * largeFeeMultiplier})); + checkMetrics(*this, env, 2, 8, 5, 4, baseFeeLevel.fee(), calcMedFeeLevel(FeeLevel64{baseFeeLevel.fee() * largeFeeMultiplier})); // clang-format on env.close(); - checkMetrics(__LINE__, env, 0, 10, 2, 5); + checkMetrics(*this, env, 0, 10, 2, 5); ////////////////////////////////////////////////////////////// @@ -446,10 +356,10 @@ public: env(noop(daria)); env(pay(alice, iris, XRP(1000)), queued); env(noop(iris), seq(1), fee(baseFee * 2), ter(terNO_ACCOUNT)); - checkMetrics(__LINE__, env, 1, 10, 6, 5); + checkMetrics(*this, env, 1, 10, 6, 5); env.close(); - checkMetrics(__LINE__, env, 0, 12, 1, 6); + checkMetrics(*this, env, 0, 12, 1, 6); env.require(balance(iris, XRP(1000))); BEAST_EXPECT(env.seq(iris) == 11); @@ -475,7 +385,7 @@ public: ++metrics.txCount; checkMetrics( - __LINE__, + *this, env, metrics.txCount, metrics.txQMaxSize, @@ -496,14 +406,14 @@ public: auto queued = ter(terQUEUED); auto const baseFee = env.current()->fees().base.drops(); - checkMetrics(__LINE__, env, 0, std::nullopt, 0, 3); + checkMetrics(*this, env, 0, std::nullopt, 0, 3); // Fund alice and then fill the ledger. env.fund(XRP(50000), noripple(alice)); env(noop(alice)); env(noop(alice)); env(noop(alice)); - checkMetrics(__LINE__, env, 0, std::nullopt, 4, 3); + checkMetrics(*this, env, 0, std::nullopt, 4, 3); ////////////////////////////////////////////////////////////////// @@ -515,11 +425,11 @@ public: env(noop(alice), ticket::use(tkt1 - 2), ter(tefNO_TICKET)); env(noop(alice), ticket::use(tkt1 - 1), ter(terPRE_TICKET)); env.require(owners(alice, 0), tickets(alice, 0)); - checkMetrics(__LINE__, env, 1, std::nullopt, 4, 3); + checkMetrics(*this, env, 1, std::nullopt, 4, 3); env.close(); env.require(owners(alice, 250), tickets(alice, 250)); - checkMetrics(__LINE__, env, 0, 8, 1, 4); + checkMetrics(*this, env, 0, 8, 1, 4); BEAST_EXPECT(env.seq(alice) == tkt1 + 250); ////////////////////////////////////////////////////////////////// @@ -547,7 +457,7 @@ public: ticket::use(tkt1 + 13), fee(baseFee * 2.3), ter(telCAN_NOT_QUEUE_FULL)); - checkMetrics(__LINE__, env, 8, 8, 5, 4, expectedMinFeeLevel); + checkMetrics(*this, env, 8, 8, 5, 4, expectedMinFeeLevel); // Check which of the queued transactions got into the ledger by // attempting to replace them. @@ -579,7 +489,7 @@ public: // the queue. env(noop(alice), ticket::use(tkt1 + 13), ter(telCAN_NOT_QUEUE_FEE)); - checkMetrics(__LINE__, env, 3, 10, 6, 5); + checkMetrics(*this, env, 3, 10, 6, 5); ////////////////////////////////////////////////////////////////// @@ -610,7 +520,7 @@ public: env(noop(alice), seq(nextSeq + 5), queued); env(noop(alice), seq(nextSeq + 6), queued); env(noop(alice), seq(nextSeq + 7), ter(telCAN_NOT_QUEUE_FULL)); - checkMetrics(__LINE__, env, 10, 10, 6, 5, 257); + checkMetrics(*this, env, 10, 10, 6, 5, 257); // Check which of the queued transactions got into the ledger by // attempting to replace them. @@ -638,7 +548,7 @@ public: env(noop(alice), seq(nextSeq + 6), ter(telCAN_NOT_QUEUE_FEE)); env(noop(alice), seq(nextSeq + 7), ter(telCAN_NOT_QUEUE_FEE)); - checkMetrics(__LINE__, env, 4, 12, 7, 6); + checkMetrics(*this, env, 4, 12, 7, 6); BEAST_EXPECT(env.seq(alice) == nextSeq + 4); ////////////////////////////////////////////////////////////////// @@ -669,7 +579,7 @@ public: fee(baseFee * 2.1), ter(telCAN_NOT_QUEUE_FULL)); - checkMetrics(__LINE__, env, 10, 12, 7, 6); + checkMetrics(*this, env, 10, 12, 7, 6); env.close(); env.require(owners(alice, 231), tickets(alice, 231)); @@ -700,7 +610,7 @@ public: env(noop(alice), seq(nextSeq + 7), ter(telCAN_NOT_QUEUE_FEE)); BEAST_EXPECT(env.seq(alice) == nextSeq + 6); - checkMetrics(__LINE__, env, 6, 14, 8, 7); + checkMetrics(*this, env, 6, 14, 8, 7); ////////////////////////////////////////////////////////////////// @@ -739,7 +649,7 @@ public: env(noop(alice), seq(nextSeq + 7), ter(tefPAST_SEQ)); BEAST_EXPECT(env.seq(alice) == nextSeq + 8); - checkMetrics(__LINE__, env, 0, 16, 6, 8); + checkMetrics(*this, env, 0, 16, 6, 8); } void @@ -754,28 +664,28 @@ public: auto gw = Account("gw"); auto USD = gw["USD"]; - checkMetrics(__LINE__, env, 0, std::nullopt, 0, 2); + checkMetrics(*this, env, 0, std::nullopt, 0, 2); // Create accounts env.fund(XRP(50000), noripple(alice, gw)); - checkMetrics(__LINE__, env, 0, std::nullopt, 2, 2); + checkMetrics(*this, env, 0, std::nullopt, 2, 2); env.close(); - checkMetrics(__LINE__, env, 0, 4, 0, 2); + checkMetrics(*this, env, 0, 4, 0, 2); // Alice creates an unfunded offer while the ledger is not full env(offer(alice, XRP(1000), USD(1000)), ter(tecUNFUNDED_OFFER)); - checkMetrics(__LINE__, env, 0, 4, 1, 2); + checkMetrics(*this, env, 0, 4, 1, 2); fillQueue(env, alice); - checkMetrics(__LINE__, env, 0, 4, 3, 2); + checkMetrics(*this, env, 0, 4, 3, 2); // Alice creates an unfunded offer that goes in the queue env(offer(alice, XRP(1000), USD(1000)), ter(terQUEUED)); - checkMetrics(__LINE__, env, 1, 4, 3, 2); + checkMetrics(*this, env, 1, 4, 3, 2); // The offer comes out of the queue env.close(); - checkMetrics(__LINE__, env, 0, 6, 1, 3); + checkMetrics(*this, env, 0, 6, 1, 3); } void @@ -794,44 +704,44 @@ public: auto queued = ter(terQUEUED); auto const baseFee = env.current()->fees().base.drops(); - checkMetrics(__LINE__, env, 0, std::nullopt, 0, 2); + checkMetrics(*this, env, 0, std::nullopt, 0, 2); // Create several accounts while the fee is cheap so they all apply. env.fund(XRP(50000), noripple(alice, bob, charlie)); - checkMetrics(__LINE__, env, 0, std::nullopt, 3, 2); + checkMetrics(*this, env, 0, std::nullopt, 3, 2); // Future transaction for Alice - fails env(noop(alice), fee(openLedgerCost(env)), seq(env.seq(alice) + 1), ter(terPRE_SEQ)); - checkMetrics(__LINE__, env, 0, std::nullopt, 3, 2); + checkMetrics(*this, env, 0, std::nullopt, 3, 2); // Current transaction for Alice: held env(noop(alice), queued); - checkMetrics(__LINE__, env, 1, std::nullopt, 3, 2); + checkMetrics(*this, env, 1, std::nullopt, 3, 2); // Alice - sequence is too far ahead, so won't queue. env(noop(alice), seq(env.seq(alice) + 2), ter(telCAN_NOT_QUEUE)); - checkMetrics(__LINE__, env, 1, std::nullopt, 3, 2); + checkMetrics(*this, env, 1, std::nullopt, 3, 2); // Bob with really high fee - applies env(noop(bob), fee(openLedgerCost(env))); - checkMetrics(__LINE__, env, 1, std::nullopt, 4, 2); + checkMetrics(*this, env, 1, std::nullopt, 4, 2); // Daria with low fee: hold env(noop(charlie), fee(baseFee * 100), queued); - checkMetrics(__LINE__, env, 2, std::nullopt, 4, 2); + checkMetrics(*this, env, 2, std::nullopt, 4, 2); // Alice with normal fee: hold env(noop(alice), seq(env.seq(alice) + 1), queued); - checkMetrics(__LINE__, env, 3, std::nullopt, 4, 2); + checkMetrics(*this, env, 3, std::nullopt, 4, 2); env.close(); // Verify that the held transactions got applied // Alice's bad transaction applied from the // Local Txs. - checkMetrics(__LINE__, env, 0, 8, 4, 4); + checkMetrics(*this, env, 0, 8, 4, 4); } void @@ -853,7 +763,7 @@ public: auto queued = ter(terQUEUED); auto const baseFee = env.current()->fees().base.drops(); - checkMetrics(__LINE__, env, 0, std::nullopt, 0, 2); + checkMetrics(*this, env, 0, std::nullopt, 0, 2); // Fund across several ledgers so the TxQ metrics stay restricted. env.fund(XRP(1000), noripple(alice, bob)); @@ -863,11 +773,11 @@ public: env.fund(XRP(1000), noripple(edgar, felicia)); env.close(env.now() + 5s, 10000ms); - checkMetrics(__LINE__, env, 0, std::nullopt, 0, 2); + checkMetrics(*this, env, 0, std::nullopt, 0, 2); env(noop(bob)); env(noop(charlie)); env(noop(daria)); - checkMetrics(__LINE__, env, 0, std::nullopt, 3, 2); + checkMetrics(*this, env, 0, std::nullopt, 3, 2); BEAST_EXPECT(env.current()->info().seq == 6); // Fail to queue an item with a low LastLedgerSeq @@ -886,7 +796,7 @@ public: env(noop(charlie), fee(largeFee), queued); env(noop(daria), fee(largeFee), queued); env(noop(edgar), fee(largeFee), queued); - checkMetrics(__LINE__, env, 5, std::nullopt, 3, 2); + checkMetrics(*this, env, 5, std::nullopt, 3, 2); { auto& txQ = env.app().getTxQ(); auto aliceStat = txQ.getAccountTxs(alice.id()); @@ -910,7 +820,7 @@ public: } env.close(); - checkMetrics(__LINE__, env, 1, 6, 4, 3); + checkMetrics(*this, env, 1, 6, 4, 3); // Keep alice's transaction waiting. env(noop(bob), fee(largeFee), queued); @@ -918,12 +828,12 @@ public: env(noop(daria), fee(largeFee), queued); env(noop(edgar), fee(largeFee), queued); env(noop(felicia), fee(largeFee - 1), queued); - checkMetrics(__LINE__, env, 6, 6, 4, 3, 257); + checkMetrics(*this, env, 6, 6, 4, 3, 257); env.close(); // alice's transaction is still hanging around // clang-format off - checkMetrics(__LINE__, env, 1, 8, 5, 4, baseFeeLevel.fee(), baseFeeLevel.fee() * largeFeeMultiplier); + checkMetrics(*this, env, 1, 8, 5, 4, baseFeeLevel.fee(), baseFeeLevel.fee() * largeFeeMultiplier); // clang-format on BEAST_EXPECT(env.seq(alice) == 3); @@ -938,7 +848,7 @@ public: env(noop(edgar), fee(anotherLargeFee), queued); env(noop(felicia), fee(anotherLargeFee - 1), queued); env(noop(felicia), fee(anotherLargeFee - 1), seq(env.seq(felicia) + 1), queued); - checkMetrics(__LINE__, env, 8, 8, 5, 4, baseFeeLevel.fee() + 1, baseFeeLevel.fee() * largeFeeMultiplier); + checkMetrics(*this, env, 8, 8, 5, 4, baseFeeLevel.fee() + 1, baseFeeLevel.fee() * largeFeeMultiplier); // clang-format on env.close(); @@ -946,7 +856,7 @@ public: // into the ledger, so her transaction is gone, // though one of felicia's is still in the queue. // clang-format off - checkMetrics(__LINE__, env, 1, 10, 6, 5, baseFeeLevel.fee(), baseFeeLevel.fee() * largeFeeMultiplier); + checkMetrics(*this, env, 1, 10, 6, 5, baseFeeLevel.fee(), baseFeeLevel.fee() * largeFeeMultiplier); // clang-format on BEAST_EXPECT(env.seq(alice) == 3); BEAST_EXPECT(env.seq(felicia) == 7); @@ -954,7 +864,7 @@ public: env.close(); // And now the queue is empty // clang-format off - checkMetrics(__LINE__, env, 0, 12, 1, 6, baseFeeLevel.fee(), baseFeeLevel.fee() * anotherLargeFeeMultiplier); + checkMetrics(*this, env, 0, 12, 1, 6, baseFeeLevel.fee(), baseFeeLevel.fee() * anotherLargeFeeMultiplier); // clang-format on BEAST_EXPECT(env.seq(alice) == 3); BEAST_EXPECT(env.seq(felicia) == 8); @@ -976,7 +886,7 @@ public: auto queued = ter(terQUEUED); auto const baseFee = env.current()->fees().base.drops(); - checkMetrics(__LINE__, env, 0, std::nullopt, 0, 2); + checkMetrics(*this, env, 0, std::nullopt, 0, 2); // Fund across several ledgers so the TxQ metrics stay restricted. env.fund(XRP(1000), noripple(alice, bob)); @@ -988,21 +898,21 @@ public: env(noop(alice)); env(noop(alice)); env(noop(alice)); - checkMetrics(__LINE__, env, 0, std::nullopt, 3, 2); + checkMetrics(*this, env, 0, std::nullopt, 3, 2); env(noop(bob), queued); - checkMetrics(__LINE__, env, 1, std::nullopt, 3, 2); + checkMetrics(*this, env, 1, std::nullopt, 3, 2); // Since Alice's queue is empty this blocker can go into her queue. env(regkey(alice, bob), fee(0), queued); - checkMetrics(__LINE__, env, 2, std::nullopt, 3, 2); + checkMetrics(*this, env, 2, std::nullopt, 3, 2); // Close out this ledger so we can get a maxsize env.close(); - checkMetrics(__LINE__, env, 0, 6, 2, 3); + checkMetrics(*this, env, 0, 6, 2, 3); fillQueue(env, alice); - checkMetrics(__LINE__, env, 0, 6, 4, 3); + checkMetrics(*this, env, 0, 6, 4, 3); constexpr auto aliceFeeMultiplier = 3; auto feeAlice = baseFee * aliceFeeMultiplier; @@ -1013,12 +923,12 @@ public: feeAlice = (feeAlice + 1) * 125 / 100; ++seqAlice; } - checkMetrics(__LINE__, env, 4, 6, 4, 3); + checkMetrics(*this, env, 4, 6, 4, 3); // Bob adds a zero fee blocker to his queue. auto const seqBob = env.seq(bob); env(regkey(bob, alice), fee(0), queued); - checkMetrics(__LINE__, env, 5, 6, 4, 3); + checkMetrics(*this, env, 5, 6, 4, 3); // Carol fills the queue. auto feeCarol = feeAlice; @@ -1030,7 +940,7 @@ public: ++seqCarol; } // clang-format off - checkMetrics( __LINE__, env, 6, 6, 4, 3, baseFeeLevel.fee() * aliceFeeMultiplier + 1); + checkMetrics(*this, env, 6, 6, 4, 3, baseFeeLevel.fee() * aliceFeeMultiplier + 1); // clang-format on // Carol submits high enough to beat Bob's average fee which kicks @@ -1042,20 +952,20 @@ public: env.close(); // Some of Alice's transactions stay in the queue. Bob's // transaction returns to the TxQ. - checkMetrics(__LINE__, env, 5, 8, 5, 4); + checkMetrics(*this, env, 5, 8, 5, 4); BEAST_EXPECT(env.seq(alice) == seqAlice - 4); BEAST_EXPECT(env.seq(bob) == seqBob); BEAST_EXPECT(env.seq(carol) == seqCarol + 1); env.close(); // The remaining queued transactions flush through to the ledger. - checkMetrics(__LINE__, env, 0, 10, 5, 5); + checkMetrics(*this, env, 0, 10, 5, 5); BEAST_EXPECT(env.seq(alice) == seqAlice); BEAST_EXPECT(env.seq(bob) == seqBob + 1); BEAST_EXPECT(env.seq(carol) == seqCarol + 1); env.close(); - checkMetrics(__LINE__, env, 0, 10, 0, 5); + checkMetrics(*this, env, 0, 10, 0, 5); BEAST_EXPECT(env.seq(alice) == seqAlice); BEAST_EXPECT(env.seq(bob) == seqBob + 1); BEAST_EXPECT(env.seq(carol) == seqCarol + 1); @@ -1101,19 +1011,19 @@ public: auto queued = ter(terQUEUED); - checkMetrics(__LINE__, env, 0, std::nullopt, 0, 2); + checkMetrics(*this, env, 0, std::nullopt, 0, 2); env.fund(XRP(1000), noripple(alice, bob)); - checkMetrics(__LINE__, env, 0, std::nullopt, 2, 2); + checkMetrics(*this, env, 0, std::nullopt, 2, 2); // Fill the ledger env(noop(alice)); - checkMetrics(__LINE__, env, 0, std::nullopt, 3, 2); + checkMetrics(*this, env, 0, std::nullopt, 3, 2); // Put a transaction in the queue env(noop(alice), queued); - checkMetrics(__LINE__, env, 1, std::nullopt, 3, 2); + checkMetrics(*this, env, 1, std::nullopt, 3, 2); // Now cheat, and bypass the queue. { @@ -1131,12 +1041,12 @@ public: }); env.postconditions(jt, parsed); } - checkMetrics(__LINE__, env, 1, std::nullopt, 4, 2); + checkMetrics(*this, env, 1, std::nullopt, 4, 2); env.close(); // Alice's queued transaction failed in TxQ::accept // with tefPAST_SEQ - checkMetrics(__LINE__, env, 0, 8, 0, 4); + checkMetrics(*this, env, 0, 8, 0, 4); } void @@ -1158,7 +1068,7 @@ public: auto queued = ter(terQUEUED); - checkMetrics(__LINE__, env, 0, std::nullopt, 0, 3); + checkMetrics(*this, env, 0, std::nullopt, 0, 3); // ledgers in queue is 2 because of makeConfig auto const initQueueMax = initFee(env, 3, 2, 10, 200, 50); @@ -1166,11 +1076,11 @@ public: // Create several accounts while the fee is cheap so they all apply. env.fund(drops(2000), noripple(alice)); env.fund(XRP(500000), noripple(bob, charlie, daria)); - checkMetrics(__LINE__, env, 0, initQueueMax, 4, 3); + checkMetrics(*this, env, 0, initQueueMax, 4, 3); // Alice - price starts exploding: held env(noop(alice), fee(11), queued); - checkMetrics(__LINE__, env, 1, initQueueMax, 4, 3); + checkMetrics(*this, env, 1, initQueueMax, 4, 3); auto aliceSeq = env.seq(alice); auto bobSeq = env.seq(bob); @@ -1178,28 +1088,28 @@ public: // Alice - try to queue a second transaction, but leave a gap env(noop(alice), seq(aliceSeq + 2), fee(100), ter(telCAN_NOT_QUEUE)); - checkMetrics(__LINE__, env, 1, initQueueMax, 4, 3); + checkMetrics(*this, env, 1, initQueueMax, 4, 3); // Alice - queue a second transaction. Yay! env(noop(alice), seq(aliceSeq + 1), fee(13), queued); - checkMetrics(__LINE__, env, 2, initQueueMax, 4, 3); + checkMetrics(*this, env, 2, initQueueMax, 4, 3); // Alice - queue a third transaction. Yay. env(noop(alice), seq(aliceSeq + 2), fee(17), queued); - checkMetrics(__LINE__, env, 3, initQueueMax, 4, 3); + checkMetrics(*this, env, 3, initQueueMax, 4, 3); // Bob - queue a transaction env(noop(bob), queued); - checkMetrics(__LINE__, env, 4, initQueueMax, 4, 3); + checkMetrics(*this, env, 4, initQueueMax, 4, 3); // Bob - queue a second transaction env(noop(bob), seq(bobSeq + 1), fee(50), queued); - checkMetrics(__LINE__, env, 5, initQueueMax, 4, 3); + checkMetrics(*this, env, 5, initQueueMax, 4, 3); // Charlie - queue a transaction, with a higher fee // than default env(noop(charlie), fee(15), queued); - checkMetrics(__LINE__, env, 6, initQueueMax, 4, 3); + checkMetrics(*this, env, 6, initQueueMax, 4, 3); BEAST_EXPECT(env.seq(alice) == aliceSeq); BEAST_EXPECT(env.seq(bob) == bobSeq); @@ -1208,7 +1118,7 @@ public: env.close(); // Verify that all of but one of the queued transactions // got applied. - checkMetrics(__LINE__, env, 1, 8, 5, 4); + checkMetrics(*this, env, 1, 8, 5, 4); // Verify that the stuck transaction is Bob's second. // Even though it had a higher fee than Alice's and @@ -1230,7 +1140,7 @@ public: queued); ++aliceSeq; } - checkMetrics(__LINE__, env, 8, 8, 5, 4, 513); + checkMetrics(*this, env, 8, 8, 5, 4, 513); { auto& txQ = env.app().getTxQ(); auto aliceStat = txQ.getAccountTxs(alice.id()); @@ -1261,24 +1171,24 @@ public: json(jss::LastLedgerSequence, lastLedgerSeq + 7), fee(aliceFee), ter(telCAN_NOT_QUEUE_FULL)); - checkMetrics(__LINE__, env, 8, 8, 5, 4, 513); + checkMetrics(*this, env, 8, 8, 5, 4, 513); // Charlie - try to add another item to the queue, // which fails because fee is lower than Alice's // queued average. env(noop(charlie), fee(19), ter(telCAN_NOT_QUEUE_FULL)); - checkMetrics(__LINE__, env, 8, 8, 5, 4, 513); + checkMetrics(*this, env, 8, 8, 5, 4, 513); // Charlie - add another item to the queue, which // causes Alice's last txn to drop env(noop(charlie), fee(30), queued); - checkMetrics(__LINE__, env, 8, 8, 5, 4, 538); + checkMetrics(*this, env, 8, 8, 5, 4, 538); // Alice - now attempt to add one more to the queue, // which fails because the last tx was dropped, so // there is no complete chain. env(noop(alice), seq(aliceSeq), fee(aliceFee), ter(telCAN_NOT_QUEUE)); - checkMetrics(__LINE__, env, 8, 8, 5, 4, 538); + checkMetrics(*this, env, 8, 8, 5, 4, 538); // Alice wants this tx more than the dropped tx, // so resubmits with higher fee, but the queue @@ -1287,7 +1197,7 @@ public: seq(aliceSeq - 1), fee(aliceFee), ter(telCAN_NOT_QUEUE_FULL)); - checkMetrics(__LINE__, env, 8, 8, 5, 4, 538); + checkMetrics(*this, env, 8, 8, 5, 4, 538); // Try to replace a middle item in the queue // without enough fee. @@ -1297,18 +1207,18 @@ public: seq(aliceSeq), fee(aliceFee), ter(telCAN_NOT_QUEUE_FEE)); - checkMetrics(__LINE__, env, 8, 8, 5, 4, 538); + checkMetrics(*this, env, 8, 8, 5, 4, 538); // Replace a middle item from the queue successfully ++aliceFee; env(noop(alice), seq(aliceSeq), fee(aliceFee), queued); - checkMetrics(__LINE__, env, 8, 8, 5, 4, 538); + checkMetrics(*this, env, 8, 8, 5, 4, 538); env.close(); // Alice's transactions processed, along with // Charlie's, and the lost one is replayed and // added back to the queue. - checkMetrics(__LINE__, env, 4, 10, 6, 5); + checkMetrics(*this, env, 4, 10, 6, 5); aliceSeq = env.seq(alice) + 1; @@ -1322,18 +1232,18 @@ public: seq(aliceSeq), fee(aliceFee), ter(telCAN_NOT_QUEUE_BALANCE)); - checkMetrics(__LINE__, env, 4, 10, 6, 5); + checkMetrics(*this, env, 4, 10, 6, 5); // Try to spend more than Alice can afford with all the other txs. aliceSeq += 2; env(noop(alice), seq(aliceSeq), fee(aliceFee), ter(terINSUF_FEE_B)); - checkMetrics(__LINE__, env, 4, 10, 6, 5); + checkMetrics(*this, env, 4, 10, 6, 5); // Replace the last queued item with a transaction that will // bankrupt Alice --aliceFee; env(noop(alice), seq(aliceSeq), fee(aliceFee), queued); - checkMetrics(__LINE__, env, 4, 10, 6, 5); + checkMetrics(*this, env, 4, 10, 6, 5); // Alice - Attempt to queue a last transaction, but it // fails because the fee in flight is too high, before @@ -1344,14 +1254,14 @@ public: seq(aliceSeq), fee(aliceFee), ter(telCAN_NOT_QUEUE_BALANCE)); - checkMetrics(__LINE__, env, 4, 10, 6, 5); + checkMetrics(*this, env, 4, 10, 6, 5); env.close(); // All of Alice's transactions applied. - checkMetrics(__LINE__, env, 0, 12, 4, 6); + checkMetrics(*this, env, 0, 12, 4, 6); env.close(); - checkMetrics(__LINE__, env, 0, 12, 0, 6); + checkMetrics(*this, env, 0, 12, 0, 6); // Alice is broke env.require(balance(alice, XRP(0))); @@ -1361,17 +1271,17 @@ public: // account limit (10) txs. fillQueue(env, bob); bobSeq = env.seq(bob); - checkMetrics(__LINE__, env, 0, 12, 7, 6); + checkMetrics(*this, env, 0, 12, 7, 6); for (int i = 0; i < 10; ++i) env(noop(bob), seq(bobSeq + i), queued); - checkMetrics(__LINE__, env, 10, 12, 7, 6); + checkMetrics(*this, env, 10, 12, 7, 6); // Bob hit the single account limit env(noop(bob), seq(bobSeq + 10), ter(telCAN_NOT_QUEUE_FULL)); - checkMetrics(__LINE__, env, 10, 12, 7, 6); + checkMetrics(*this, env, 10, 12, 7, 6); // Bob can replace one of the earlier txs regardless // of the limit env(noop(bob), seq(bobSeq + 5), fee(20), queued); - checkMetrics(__LINE__, env, 10, 12, 7, 6); + checkMetrics(*this, env, 10, 12, 7, 6); // Try to replace a middle item in the queue // with enough fee to bankrupt bob and make the @@ -1382,7 +1292,7 @@ public: seq(bobSeq + 5), fee(bobFee), ter(telCAN_NOT_QUEUE_BALANCE)); - checkMetrics(__LINE__, env, 10, 12, 7, 6); + checkMetrics(*this, env, 10, 12, 7, 6); // Attempt to replace a middle item in the queue with enough fee // to bankrupt bob, and also to use fee averaging to clear out the @@ -1396,14 +1306,14 @@ public: seq(bobSeq + 5), fee(bobFee), ter(telCAN_NOT_QUEUE_BALANCE)); - checkMetrics(__LINE__, env, 10, 12, 7, 6); + checkMetrics(*this, env, 10, 12, 7, 6); // Close the ledger and verify that the queued transactions succeed // and bob has the right ending balance. env.close(); - checkMetrics(__LINE__, env, 3, 14, 8, 7); + checkMetrics(*this, env, 3, 14, 8, 7); env.close(); - checkMetrics(__LINE__, env, 0, 16, 3, 8); + checkMetrics(*this, env, 0, 16, 3, 8); env.require(balance(bob, drops(499'999'999'750))); } @@ -1431,20 +1341,20 @@ public: BEAST_EXPECT(env.current()->fees().base == 10); - checkMetrics(__LINE__, env, 0, std::nullopt, 0, 4); + checkMetrics(*this, env, 0, std::nullopt, 0, 4); // Create several accounts while the fee is cheap so they all apply. env.fund(XRP(50000), noripple(alice, bob, charlie, daria)); - checkMetrics(__LINE__, env, 0, std::nullopt, 4, 4); + checkMetrics(*this, env, 0, std::nullopt, 4, 4); env.close(); - checkMetrics(__LINE__, env, 0, 8, 0, 4); + checkMetrics(*this, env, 0, 8, 0, 4); env.fund(XRP(50000), noripple(elmo, fred, gwen, hank)); - checkMetrics(__LINE__, env, 0, 8, 4, 4); + checkMetrics(*this, env, 0, 8, 4, 4); env.close(); - checkMetrics(__LINE__, env, 0, 8, 0, 4); + checkMetrics(*this, env, 0, 8, 0, 4); ////////////////////////////////////////////////////////////// @@ -1455,7 +1365,7 @@ public: env(noop(gwen)); env(noop(fred)); env(noop(elmo)); - checkMetrics(__LINE__, env, 0, 8, 5, 4); + checkMetrics(*this, env, 0, 8, 5, 4); auto aliceSeq = env.seq(alice); auto bobSeq = env.seq(bob); @@ -1482,7 +1392,7 @@ public: // Queue is full now. Minimum fee now reflects the // lowest fee in the queue. auto minFeeLevel = txFeeLevelByAccount(env, alice); - checkMetrics(__LINE__, env, 8, 8, 5, 4, minFeeLevel + 1); + checkMetrics(*this, env, 8, 8, 5, 4, minFeeLevel + 1); // Try to add another transaction with the default (low) fee, // it should fail because it can't replace the one already @@ -1495,13 +1405,13 @@ public: env(noop(charlie), fee(100), seq(charlieSeq + 1), queued); // Queue is still full. - checkMetrics(__LINE__, env, 8, 8, 5, 4, minFeeLevel + 1); + checkMetrics(*this, env, 8, 8, 5, 4, minFeeLevel + 1); // Six txs are processed out of the queue into the ledger, // leaving two txs. The dropped tx is retried from localTxs, and // put back into the queue. env.close(); - checkMetrics(__LINE__, env, 3, 10, 6, 5); + checkMetrics(*this, env, 3, 10, 6, 5); // This next test should remain unchanged regardless of // transaction ordering @@ -1587,7 +1497,7 @@ public: env(noop(gwen), seq(gwenSeq + qTxCount1[gwen.id()]++), fee(15), queued); minFeeLevel = txFeeLevelByAccount(env, gwen) + 1; - checkMetrics(__LINE__, env, 10, 10, 6, 5, minFeeLevel); + checkMetrics(*this, env, 10, 10, 6, 5, minFeeLevel); // Add another transaction, with a higher fee, // Not high enough to get into the ledger, but high @@ -1597,13 +1507,13 @@ public: seq(aliceSeq + qTxCount1[alice.id()]++), queued); - checkMetrics(__LINE__, env, 10, 10, 6, 5, minFeeLevel); + checkMetrics(*this, env, 10, 10, 6, 5, minFeeLevel); // Seven txs are processed out of the queue, leaving 3. One // dropped tx is retried from localTxs, and put back into the // queue. env.close(); - checkMetrics(__LINE__, env, 4, 12, 7, 6); + checkMetrics(*this, env, 4, 12, 7, 6); // Refresh the queue counts auto qTxCount2 = getTxsQueued(); @@ -1668,13 +1578,13 @@ public: auto alice = Account("alice"); - checkMetrics(__LINE__, env, 0, std::nullopt, 0, 1); + checkMetrics(*this, env, 0, std::nullopt, 0, 1); env.fund(XRP(50000), noripple(alice)); - checkMetrics(__LINE__, env, 0, std::nullopt, 1, 1); + checkMetrics(*this, env, 0, std::nullopt, 1, 1); env(fset(alice, asfAccountTxnID)); - checkMetrics(__LINE__, env, 0, std::nullopt, 2, 1); + checkMetrics(*this, env, 0, std::nullopt, 2, 1); // Immediately after the fset, the sfAccountTxnID field // is still uninitialized, so preflight succeeds here, @@ -1683,14 +1593,14 @@ public: json(R"({"AccountTxnID": "0"})"), ter(telCAN_NOT_QUEUE)); - checkMetrics(__LINE__, env, 0, std::nullopt, 2, 1); + checkMetrics(*this, env, 0, std::nullopt, 2, 1); env.close(); // The failed transaction is retried from LocalTx // and succeeds. - checkMetrics(__LINE__, env, 0, 4, 1, 2); + checkMetrics(*this, env, 0, 4, 1, 2); env(noop(alice)); - checkMetrics(__LINE__, env, 0, 4, 2, 2); + checkMetrics(*this, env, 0, 4, 2, 2); env(noop(alice), json(R"({"AccountTxnID": "0"})"), ter(tefWRONG_PRIOR)); } @@ -1714,10 +1624,10 @@ public: auto alice = Account("alice"); - checkMetrics(__LINE__, env, 0, std::nullopt, 0, 2); + checkMetrics(*this, env, 0, std::nullopt, 0, 2); env.fund(XRP(50000), noripple(alice)); - checkMetrics(__LINE__, env, 0, std::nullopt, 1, 2); + checkMetrics(*this, env, 0, std::nullopt, 1, 2); FeeLevel64 medFeeLevel; for (int i = 0; i < 10; ++i) @@ -1737,12 +1647,12 @@ public: env(noop(alice), fee(cost)); } - checkMetrics(__LINE__, env, 0, std::nullopt, 11, 2); + checkMetrics(*this, env, 0, std::nullopt, 11, 2); env.close(); // If not for the maximum, the per ledger would be 11. // clang-format off - checkMetrics(__LINE__, env, 0, 10, 0, 5, baseFeeLevel.fee(), calcMedFeeLevel(medFeeLevel)); + checkMetrics(*this, env, 0, 10, 0, 5, baseFeeLevel.fee(), calcMedFeeLevel(medFeeLevel)); // clang-format on } @@ -1831,22 +1741,22 @@ public: // ledgers in queue is 2 because of makeConfig auto const initQueueMax = initFee(env, 3, 2, 10, 200, 50); - checkMetrics(__LINE__, env, 0, initQueueMax, 0, 3); + checkMetrics(*this, env, 0, initQueueMax, 0, 3); env.fund(drops(5000), noripple(alice)); env.fund(XRP(50000), noripple(bob)); - checkMetrics(__LINE__, env, 0, initQueueMax, 2, 3); + checkMetrics(*this, env, 0, initQueueMax, 2, 3); auto USD = bob["USD"]; env(offer(alice, USD(5000), drops(5000)), require(owners(alice, 1))); - checkMetrics(__LINE__, env, 0, initQueueMax, 3, 3); + checkMetrics(*this, env, 0, initQueueMax, 3, 3); env.close(); - checkMetrics(__LINE__, env, 0, 6, 0, 3); + checkMetrics(*this, env, 0, 6, 0, 3); // Fill up the ledger fillQueue(env, alice); - checkMetrics(__LINE__, env, 0, 6, 4, 3); + checkMetrics(*this, env, 0, 6, 4, 3); // Queue up a couple of transactions, plus one // more expensive one. @@ -1855,7 +1765,7 @@ public: env(noop(alice), seq(aliceSeq++), queued); env(noop(alice), seq(aliceSeq++), queued); env(noop(alice), fee(drops(1000)), seq(aliceSeq), queued); - checkMetrics(__LINE__, env, 4, 6, 4, 3); + checkMetrics(*this, env, 4, 6, 4, 3); // This offer should take Alice's offer // up to Alice's reserve. @@ -1863,7 +1773,7 @@ public: fee(openLedgerCost(env)), require( balance(alice, drops(250)), owners(alice, 1), lines(alice, 1))); - checkMetrics(__LINE__, env, 4, 6, 5, 3); + checkMetrics(*this, env, 4, 6, 5, 3); // Try adding a new transaction. // Too many fees in flight. @@ -1871,12 +1781,12 @@ public: fee(drops(200)), seq(aliceSeq + 1), ter(telCAN_NOT_QUEUE_BALANCE)); - checkMetrics(__LINE__, env, 4, 6, 5, 3); + checkMetrics(*this, env, 4, 6, 5, 3); // Close the ledger. All of Alice's transactions // take a fee, except the last one. env.close(); - checkMetrics(__LINE__, env, 1, 10, 3, 5); + checkMetrics(*this, env, 1, 10, 3, 5); env.require(balance(alice, drops(250 - 30))); // Still can't add a new transaction for Alice, @@ -1885,7 +1795,7 @@ public: fee(drops(200)), seq(aliceSeq + 1), ter(telCAN_NOT_QUEUE_BALANCE)); - checkMetrics(__LINE__, env, 1, 10, 3, 5); + checkMetrics(*this, env, 1, 10, 3, 5); /* At this point, Alice's transaction is indefinitely stuck in the queue. Eventually it will either @@ -1897,13 +1807,13 @@ public: for (int i = 0; i < 9; ++i) { env.close(); - checkMetrics(__LINE__, env, 1, 10, 0, 5); + checkMetrics(*this, env, 1, 10, 0, 5); } // And Alice's transaction expires (via the retry limit, // not LastLedgerSequence). env.close(); - checkMetrics(__LINE__, env, 0, 10, 0, 5); + checkMetrics(*this, env, 0, 10, 0, 5); } void @@ -1922,11 +1832,11 @@ public: Env env(*this, makeConfig({{"minimum_txn_in_ledger_standalone", "3"}})); auto const baseFee = env.current()->fees().base.drops(); - checkMetrics(__LINE__, env, 0, std::nullopt, 0, 3); + checkMetrics(*this, env, 0, std::nullopt, 0, 3); env.fund(XRP(50000), noripple(alice, bob)); env.memoize(charlie); - checkMetrics(__LINE__, env, 0, std::nullopt, 2, 3); + checkMetrics(*this, env, 0, std::nullopt, 2, 3); { // Cannot put a blocker in an account's queue if that queue // already holds two or more (non-blocker) entries. @@ -1935,7 +1845,7 @@ public: env(noop(alice)); // Set a regular key just to clear the password spent flag env(regkey(alice, charlie)); - checkMetrics(__LINE__, env, 0, std::nullopt, 4, 3); + checkMetrics(*this, env, 0, std::nullopt, 4, 3); // Put two "normal" txs in the queue auto const aliceSeq = env.seq(alice); @@ -1961,11 +1871,11 @@ public: // Other accounts are not affected env(noop(bob), queued); - checkMetrics(__LINE__, env, 3, std::nullopt, 4, 3); + checkMetrics(*this, env, 3, std::nullopt, 4, 3); // Drain the queue. env.close(); - checkMetrics(__LINE__, env, 0, 8, 4, 4); + checkMetrics(*this, env, 0, 8, 4, 4); } { // Replace a lone non-blocking tx with a blocker. @@ -2006,7 +1916,7 @@ public: // Drain the queue. env.close(); - checkMetrics(__LINE__, env, 0, 10, 3, 5); + checkMetrics(*this, env, 0, 10, 3, 5); } { // Put a blocker in an empty queue. @@ -2034,7 +1944,7 @@ public: // Drain the queue. env.close(); - checkMetrics(__LINE__, env, 0, 12, 3, 6); + checkMetrics(*this, env, 0, 12, 3, 6); } } @@ -2054,12 +1964,12 @@ public: Env env(*this, makeConfig({{"minimum_txn_in_ledger_standalone", "3"}})); auto const baseFee = env.current()->fees().base.drops(); - checkMetrics(__LINE__, env, 0, std::nullopt, 0, 3); + checkMetrics(*this, env, 0, std::nullopt, 0, 3); env.fund(XRP(50000), noripple(alice, bob)); env.memoize(charlie); - checkMetrics(__LINE__, env, 0, std::nullopt, 2, 3); + checkMetrics(*this, env, 0, std::nullopt, 2, 3); std::uint32_t tkt{env.seq(alice) + 1}; { @@ -2070,7 +1980,7 @@ public: env(ticket::create(alice, 250), seq(tkt - 1)); // Set a regular key just to clear the password spent flag env(regkey(alice, charlie)); - checkMetrics(__LINE__, env, 0, std::nullopt, 4, 3); + checkMetrics(*this, env, 0, std::nullopt, 4, 3); // Put two "normal" txs in the queue auto const aliceSeq = env.seq(alice); @@ -2100,11 +2010,11 @@ public: // Other accounts are not affected env(noop(bob), queued); - checkMetrics(__LINE__, env, 3, std::nullopt, 4, 3); + checkMetrics(*this, env, 3, std::nullopt, 4, 3); // Drain the queue and local transactions. env.close(); - checkMetrics(__LINE__, env, 0, 8, 5, 4); + checkMetrics(*this, env, 0, 8, 5, 4); // Show that the local transactions have flushed through as well. BEAST_EXPECT(env.seq(alice) == aliceSeq + 1); @@ -2166,7 +2076,7 @@ public: // Drain the queue. env.close(); - checkMetrics(__LINE__, env, 0, 10, 4, 5); + checkMetrics(*this, env, 0, 10, 4, 5); // Show that the local transactions have flushed through as well. BEAST_EXPECT(env.seq(alice) == aliceSeq + 1); @@ -2200,7 +2110,7 @@ public: // Drain the queue. env.close(); - checkMetrics(__LINE__, env, 0, 12, 3, 6); + checkMetrics(*this, env, 0, 12, 3, 6); } } @@ -2232,10 +2142,10 @@ public: auto limit = 3; - checkMetrics(__LINE__, env, 0, initQueueMax, 0, limit); + checkMetrics(*this, env, 0, initQueueMax, 0, limit); env.fund(XRP(50000), noripple(alice, charlie), gw); - checkMetrics(__LINE__, env, 0, initQueueMax, limit + 1, limit); + checkMetrics(*this, env, 0, initQueueMax, limit + 1, limit); auto USD = gw["USD"]; auto BUX = gw["BUX"]; @@ -2250,16 +2160,16 @@ public: // If this offer crosses, all of alice's // XRP will be taken (except the reserve). env(offer(alice, BUX(5000), XRP(50000)), queued); - checkMetrics(__LINE__, env, 1, initQueueMax, limit + 1, limit); + checkMetrics(*this, env, 1, initQueueMax, limit + 1, limit); // But because the reserve is protected, another // transaction will be allowed to queue env(noop(alice), seq(aliceSeq + 1), queued); - checkMetrics(__LINE__, env, 2, initQueueMax, limit + 1, limit); + checkMetrics(*this, env, 2, initQueueMax, limit + 1, limit); env.close(); ++limit; - checkMetrics(__LINE__, env, 0, limit * 2, 2, limit); + checkMetrics(*this, env, 0, limit * 2, 2, limit); // But once we close the ledger, we find alice // has plenty of XRP, because the offer didn't @@ -2271,7 +2181,7 @@ public: ////////////////////////////////////////// // Offer with high XRP out and high total fee blocks later txs fillQueue(env, alice); - checkMetrics(__LINE__, env, 0, limit * 2, limit + 1, limit); + checkMetrics(*this, env, 0, limit * 2, limit + 1, limit); aliceSeq = env.seq(alice); aliceBal = env.balance(alice); @@ -2279,12 +2189,12 @@ public: // Alice creates an offer with a fee of half the reserve env(offer(alice, BUX(5000), XRP(50000)), fee(drops(100)), queued); - checkMetrics(__LINE__, env, 1, limit * 2, limit + 1, limit); + checkMetrics(*this, env, 1, limit * 2, limit + 1, limit); // Alice creates another offer with a fee // that brings the total to just shy of the reserve env(noop(alice), fee(drops(99)), seq(aliceSeq + 1), queued); - checkMetrics(__LINE__, env, 2, limit * 2, limit + 1, limit); + checkMetrics(*this, env, 2, limit * 2, limit + 1, limit); // So even a noop will look like alice // doesn't have the balance to pay the fee @@ -2292,11 +2202,11 @@ public: fee(drops(51)), seq(aliceSeq + 2), ter(terINSUF_FEE_B)); - checkMetrics(__LINE__, env, 2, limit * 2, limit + 1, limit); + checkMetrics(*this, env, 2, limit * 2, limit + 1, limit); env.close(); ++limit; - checkMetrics(__LINE__, env, 0, limit * 2, 3, limit); + checkMetrics(*this, env, 0, limit * 2, 3, limit); // But once we close the ledger, we find alice // has plenty of XRP, because the offer didn't @@ -2308,7 +2218,7 @@ public: ////////////////////////////////////////// // Offer with high XRP out and super high fee blocks later txs fillQueue(env, alice); - checkMetrics(__LINE__, env, 0, limit * 2, limit + 1, limit); + checkMetrics(*this, env, 0, limit * 2, limit + 1, limit); aliceSeq = env.seq(alice); aliceBal = env.balance(alice); @@ -2317,7 +2227,7 @@ public: // Alice creates an offer with a fee larger than the reserve // This one can queue because it's the first in the queue for alice env(offer(alice, BUX(5000), XRP(50000)), fee(drops(300)), queued); - checkMetrics(__LINE__, env, 1, limit * 2, limit + 1, limit); + checkMetrics(*this, env, 1, limit * 2, limit + 1, limit); // So even a noop will look like alice // doesn't have the balance to pay the fee @@ -2325,11 +2235,11 @@ public: fee(drops(51)), seq(aliceSeq + 1), ter(telCAN_NOT_QUEUE_BALANCE)); - checkMetrics(__LINE__, env, 1, limit * 2, limit + 1, limit); + checkMetrics(*this, env, 1, limit * 2, limit + 1, limit); env.close(); ++limit; - checkMetrics(__LINE__, env, 0, limit * 2, 2, limit); + checkMetrics(*this, env, 0, limit * 2, 2, limit); // But once we close the ledger, we find alice // has plenty of XRP, because the offer didn't @@ -2341,7 +2251,7 @@ public: ////////////////////////////////////////// // Offer with low XRP out allows later txs fillQueue(env, alice); - checkMetrics(__LINE__, env, 0, limit * 2, limit + 1, limit); + checkMetrics(*this, env, 0, limit * 2, limit + 1, limit); aliceSeq = env.seq(alice); aliceBal = env.balance(alice); @@ -2351,11 +2261,11 @@ public: // And later transactions are just fine env(noop(alice), seq(aliceSeq + 1), queued); - checkMetrics(__LINE__, env, 2, limit * 2, limit + 1, limit); + checkMetrics(*this, env, 2, limit * 2, limit + 1, limit); env.close(); ++limit; - checkMetrics(__LINE__, env, 0, limit * 2, 2, limit); + checkMetrics(*this, env, 0, limit * 2, 2, limit); // But once we close the ledger, we find alice // has plenty of XRP, because the offer didn't @@ -2367,7 +2277,7 @@ public: ////////////////////////////////////////// // Large XRP payment doesn't block later txs fillQueue(env, alice); - checkMetrics(__LINE__, env, 0, limit * 2, limit + 1, limit); + checkMetrics(*this, env, 0, limit * 2, limit + 1, limit); aliceSeq = env.seq(alice); aliceBal = env.balance(alice); @@ -2380,11 +2290,11 @@ public: // But because the reserve is protected, another // transaction will be allowed to queue env(noop(alice), seq(aliceSeq + 1), queued); - checkMetrics(__LINE__, env, 2, limit * 2, limit + 1, limit); + checkMetrics(*this, env, 2, limit * 2, limit + 1, limit); env.close(); ++limit; - checkMetrics(__LINE__, env, 0, limit * 2, 2, limit); + checkMetrics(*this, env, 0, limit * 2, 2, limit); // But once we close the ledger, we find alice // still has most of her balance, because the @@ -2394,7 +2304,7 @@ public: ////////////////////////////////////////// // Small XRP payment allows later txs fillQueue(env, alice); - checkMetrics(__LINE__, env, 0, limit * 2, limit + 1, limit); + checkMetrics(*this, env, 0, limit * 2, limit + 1, limit); aliceSeq = env.seq(alice); aliceBal = env.balance(alice); @@ -2405,11 +2315,11 @@ public: // And later transactions are just fine env(noop(alice), seq(aliceSeq + 1), queued); - checkMetrics(__LINE__, env, 2, limit * 2, limit + 1, limit); + checkMetrics(*this, env, 2, limit * 2, limit + 1, limit); env.close(); ++limit; - checkMetrics(__LINE__, env, 0, limit * 2, 2, limit); + checkMetrics(*this, env, 0, limit * 2, 2, limit); // The payment succeeds env.require( @@ -2420,19 +2330,19 @@ public: auto const amount = USD(500000); env(trust(alice, USD(50000000))); env(trust(charlie, USD(50000000))); - checkMetrics(__LINE__, env, 0, limit * 2, 4, limit); + checkMetrics(*this, env, 0, limit * 2, 4, limit); // Close so we don't have to deal // with tx ordering in consensus. env.close(); env(pay(gw, alice, amount)); - checkMetrics(__LINE__, env, 0, limit * 2, 1, limit); + checkMetrics(*this, env, 0, limit * 2, 1, limit); // Close so we don't have to deal // with tx ordering in consensus. env.close(); fillQueue(env, alice); - checkMetrics(__LINE__, env, 0, limit * 2, limit + 1, limit); + checkMetrics(*this, env, 0, limit * 2, limit + 1, limit); aliceSeq = env.seq(alice); aliceBal = env.balance(alice); @@ -2445,11 +2355,11 @@ public: // But that's fine, because it doesn't affect // alice's XRP balance (other than the fee, of course). env(noop(alice), seq(aliceSeq + 1), queued); - checkMetrics(__LINE__, env, 2, limit * 2, limit + 1, limit); + checkMetrics(*this, env, 2, limit * 2, limit + 1, limit); env.close(); ++limit; - checkMetrics(__LINE__, env, 0, limit * 2, 2, limit); + checkMetrics(*this, env, 0, limit * 2, 2, limit); // So once we close the ledger, alice has her // XRP balance, but her USD balance went to charlie. @@ -2469,7 +2379,7 @@ public: env.close(); fillQueue(env, charlie); - checkMetrics(__LINE__, env, 0, limit * 2, limit + 1, limit); + checkMetrics(*this, env, 0, limit * 2, limit + 1, limit); aliceSeq = env.seq(alice); aliceBal = env.balance(alice); @@ -2485,11 +2395,11 @@ public: // But because the reserve is protected, another // transaction will be allowed to queue env(noop(alice), seq(aliceSeq + 1), queued); - checkMetrics(__LINE__, env, 2, limit * 2, limit + 1, limit); + checkMetrics(*this, env, 2, limit * 2, limit + 1, limit); env.close(); ++limit; - checkMetrics(__LINE__, env, 0, limit * 2, 2, limit); + checkMetrics(*this, env, 0, limit * 2, 2, limit); // So once we close the ledger, alice sent a payment // to charlie using only a portion of her XRP balance @@ -2504,7 +2414,7 @@ public: // Small XRP to IOU payment allows later txs. fillQueue(env, charlie); - checkMetrics(__LINE__, env, 0, limit * 2, limit + 1, limit); + checkMetrics(*this, env, 0, limit * 2, limit + 1, limit); aliceSeq = env.seq(alice); aliceBal = env.balance(alice); @@ -2519,11 +2429,11 @@ public: // And later transactions are just fine env(noop(alice), seq(aliceSeq + 1), queued); - checkMetrics(__LINE__, env, 2, limit * 2, limit + 1, limit); + checkMetrics(*this, env, 2, limit * 2, limit + 1, limit); env.close(); ++limit; - checkMetrics(__LINE__, env, 0, limit * 2, 2, limit); + checkMetrics(*this, env, 0, limit * 2, 2, limit); // So once we close the ledger, alice sent a payment // to charlie using only a portion of her XRP balance @@ -2540,7 +2450,7 @@ public: env.close(); fillQueue(env, charlie); - checkMetrics(__LINE__, env, 0, limit * 2, limit + 1, limit); + checkMetrics(*this, env, 0, limit * 2, limit + 1, limit); aliceSeq = env.seq(alice); aliceBal = env.balance(alice); @@ -2550,11 +2460,11 @@ public: env(noop(alice), seq(aliceSeq + 1), ter(terINSUF_FEE_B)); BEAST_EXPECT(env.balance(alice) == drops(30)); - checkMetrics(__LINE__, env, 1, limit * 2, limit + 1, limit); + checkMetrics(*this, env, 1, limit * 2, limit + 1, limit); env.close(); ++limit; - checkMetrics(__LINE__, env, 0, limit * 2, 1, limit); + checkMetrics(*this, env, 0, limit * 2, 1, limit); BEAST_EXPECT(env.balance(alice) == drops(5)); } @@ -2639,27 +2549,27 @@ public: Env env(*this, makeConfig({{"minimum_txn_in_ledger_standalone", "3"}})); auto const baseFee = env.current()->fees().base.drops(); - checkMetrics(__LINE__, env, 0, std::nullopt, 0, 3); + checkMetrics(*this, env, 0, std::nullopt, 0, 3); // Fund accounts while the fee is cheap so they all apply. env.fund(XRP(50000), noripple(alice, bob, charlie)); - checkMetrics(__LINE__, env, 0, std::nullopt, 3, 3); + checkMetrics(*this, env, 0, std::nullopt, 3, 3); // Alice - no fee change yet env(noop(alice)); - checkMetrics(__LINE__, env, 0, std::nullopt, 4, 3); + checkMetrics(*this, env, 0, std::nullopt, 4, 3); // Bob with really high fee - applies env(noop(bob), fee(openLedgerCost(env))); - checkMetrics(__LINE__, env, 0, std::nullopt, 5, 3); + checkMetrics(*this, env, 0, std::nullopt, 5, 3); // Charlie with low fee: queued env(noop(charlie), fee(baseFee * 100), queued); - checkMetrics(__LINE__, env, 1, std::nullopt, 5, 3); + checkMetrics(*this, env, 1, std::nullopt, 5, 3); env.close(); // Verify that the queued transaction was applied - checkMetrics(__LINE__, env, 0, 10, 1, 5); + checkMetrics(*this, env, 0, 10, 1, 5); ///////////////////////////////////////////////////////////////// @@ -2670,7 +2580,7 @@ public: env(noop(bob), fee(baseFee * 100)); env(noop(bob), fee(baseFee * 100)); env(noop(bob), fee(baseFee * 100)); - checkMetrics(__LINE__, env, 0, 10, 6, 5); + checkMetrics(*this, env, 0, 10, 6, 5); // Use explicit fees so we can control which txn // will get dropped @@ -2695,7 +2605,7 @@ public: env(noop(alice), fee(baseFee * 2.1), seq(aliceSeq++), queued); // Queue is full now. - checkMetrics(__LINE__, env, 10, 10, 6, 5, expectedFeeLevel + 1); + checkMetrics(*this, env, 10, 10, 6, 5, expectedFeeLevel + 1); // Try to add another transaction with the default (low) fee, // it should fail because the queue is full. @@ -2825,7 +2735,7 @@ public: auto const bob = Account("bob"); env.fund(XRP(500000), noripple(alice, bob)); - checkMetrics(__LINE__, env, 0, std::nullopt, 2, 1); + checkMetrics(*this, env, 0, std::nullopt, 2, 1); auto const aliceSeq = env.seq(alice); BEAST_EXPECT(env.current()->info().seq == 3); @@ -2845,7 +2755,7 @@ public: seq(aliceSeq + 3), json(R"({"LastLedgerSequence":11})"), ter(terQUEUED)); - checkMetrics(__LINE__, env, 4, std::nullopt, 2, 1); + checkMetrics(*this, env, 4, std::nullopt, 2, 1); auto const bobSeq = env.seq(bob); // Ledger 4 gets 3, // Ledger 5 gets 4, @@ -2854,17 +2764,17 @@ public: { env(noop(bob), seq(bobSeq + i), fee(baseFee * 20), ter(terQUEUED)); } - checkMetrics(__LINE__, env, 4 + 3 + 4 + 5, std::nullopt, 2, 1); + checkMetrics(*this, env, 4 + 3 + 4 + 5, std::nullopt, 2, 1); // Close ledger 3 env.close(); - checkMetrics(__LINE__, env, 4 + 4 + 5, 20, 3, 2); + checkMetrics(*this, env, 4 + 4 + 5, 20, 3, 2); // Close ledger 4 env.close(); - checkMetrics(__LINE__, env, 4 + 5, 30, 4, 3); + checkMetrics(*this, env, 4 + 5, 30, 4, 3); // Close ledger 5 env.close(); // Alice's first two txs expired. - checkMetrics(__LINE__, env, 2, 40, 5, 4); + checkMetrics(*this, env, 2, 40, 5, 4); // Because aliceSeq is missing, aliceSeq + 1 fails env(noop(alice), seq(aliceSeq + 1), ter(terPRE_SEQ)); @@ -2873,27 +2783,27 @@ public: env(fset(alice, asfAccountTxnID), seq(aliceSeq), ter(telCAN_NOT_QUEUE_BLOCKS)); - checkMetrics(__LINE__, env, 2, 40, 5, 4); + checkMetrics(*this, env, 2, 40, 5, 4); // However we can fill the gap with a non-blocker. env(noop(alice), seq(aliceSeq), fee(baseFee * 2), ter(terQUEUED)); - checkMetrics(__LINE__, env, 3, 40, 5, 4); + checkMetrics(*this, env, 3, 40, 5, 4); // Attempt to queue up a new aliceSeq + 1 tx that's a blocker. env(fset(alice, asfAccountTxnID), seq(aliceSeq + 1), ter(telCAN_NOT_QUEUE_BLOCKS)); - checkMetrics(__LINE__, env, 3, 40, 5, 4); + checkMetrics(*this, env, 3, 40, 5, 4); // Queue up a non-blocker replacement for aliceSeq + 1. env(noop(alice), seq(aliceSeq + 1), fee(baseFee * 2), ter(terQUEUED)); - checkMetrics(__LINE__, env, 4, 40, 5, 4); + checkMetrics(*this, env, 4, 40, 5, 4); // Close ledger 6 env.close(); // We expect that all of alice's queued tx's got into // the open ledger. - checkMetrics(__LINE__, env, 0, 50, 4, 5); + checkMetrics(*this, env, 0, 50, 4, 5); BEAST_EXPECT(env.seq(alice) == aliceSeq + 4); } @@ -2927,7 +2837,7 @@ public: auto const bob = Account("bob"); env.fund(XRP(500000), noripple(alice, bob)); - checkMetrics(__LINE__, env, 0, std::nullopt, 2, 1); + checkMetrics(*this, env, 0, std::nullopt, 2, 1); auto const aliceSeq = env.seq(alice); BEAST_EXPECT(env.current()->info().seq == 3); @@ -2974,7 +2884,7 @@ public: seq(aliceSeq + 19), json(R"({"LastLedgerSequence":11})"), ter(terQUEUED)); - checkMetrics(__LINE__, env, 10, std::nullopt, 2, 1); + checkMetrics(*this, env, 10, std::nullopt, 2, 1); auto const bobSeq = env.seq(bob); // Ledger 4 gets 2 from bob and 1 from alice, @@ -2984,21 +2894,21 @@ public: { env(noop(bob), seq(bobSeq + i), fee(baseFee * 20), ter(terQUEUED)); } - checkMetrics(__LINE__, env, 10 + 2 + 4 + 5, std::nullopt, 2, 1); + checkMetrics(*this, env, 10 + 2 + 4 + 5, std::nullopt, 2, 1); // Close ledger 3 env.close(); - checkMetrics(__LINE__, env, 9 + 4 + 5, 20, 3, 2); + checkMetrics(*this, env, 9 + 4 + 5, 20, 3, 2); BEAST_EXPECT(env.seq(alice) == aliceSeq + 12); // Close ledger 4 env.close(); - checkMetrics(__LINE__, env, 9 + 5, 30, 4, 3); + checkMetrics(*this, env, 9 + 5, 30, 4, 3); BEAST_EXPECT(env.seq(alice) == aliceSeq + 12); // Close ledger 5 env.close(); // Three of Alice's txs expired. - checkMetrics(__LINE__, env, 6, 40, 5, 4); + checkMetrics(*this, env, 6, 40, 5, 4); BEAST_EXPECT(env.seq(alice) == aliceSeq + 12); // Top off Alice's queue again using Tickets so the sequence gap is @@ -3009,7 +2919,7 @@ public: env(noop(alice), ticket::use(aliceSeq + 4), ter(terQUEUED)); env(noop(alice), ticket::use(aliceSeq + 5), ter(terQUEUED)); env(noop(alice), ticket::use(aliceSeq + 6), ter(telCAN_NOT_QUEUE_FULL)); - checkMetrics(__LINE__, env, 11, 40, 5, 4); + checkMetrics(*this, env, 11, 40, 5, 4); // Even though alice's queue is full we can still slide in a couple // more transactions because she has a sequence gap. But we @@ -3040,7 +2950,7 @@ public: // Finally we can fill in the entire gap. env(noop(alice), seq(aliceSeq + 18), ter(terQUEUED)); - checkMetrics(__LINE__, env, 14, 40, 5, 4); + checkMetrics(*this, env, 14, 40, 5, 4); // Verify that nothing can be added now that the gap is filled. env(noop(alice), seq(aliceSeq + 20), ter(telCAN_NOT_QUEUE_FULL)); @@ -3049,18 +2959,18 @@ public: // but alice adds some more transaction(s) so expectedCount // may not reduce to 8. env.close(); - checkMetrics(__LINE__, env, 9, 50, 6, 5); + checkMetrics(*this, env, 9, 50, 6, 5); BEAST_EXPECT(env.seq(alice) == aliceSeq + 15); // Close ledger 7. That should remove 4 more of alice's transactions. env.close(); - checkMetrics(__LINE__, env, 2, 60, 7, 6); + checkMetrics(*this, env, 2, 60, 7, 6); BEAST_EXPECT(env.seq(alice) == aliceSeq + 19); // Close one last ledger to see all of alice's transactions moved // into the ledger, including the tickets env.close(); - checkMetrics(__LINE__, env, 0, 70, 2, 7); + checkMetrics(*this, env, 0, 70, 2, 7); BEAST_EXPECT(env.seq(alice) == aliceSeq + 21); } @@ -3079,7 +2989,7 @@ public: env.fund(XRP(100000), alice, bob); fillQueue(env, alice); - checkMetrics(__LINE__, env, 0, std::nullopt, 7, 6); + checkMetrics(*this, env, 0, std::nullopt, 7, 6); // Queue up several transactions for alice sign-and-submit auto const aliceSeq = env.seq(alice); @@ -3100,7 +3010,7 @@ public: noop(alice), fee(baseFee * 100), seq(none), ter(terQUEUED))( submitParams); } - checkMetrics(__LINE__, env, 5, std::nullopt, 7, 6); + checkMetrics(*this, env, 5, std::nullopt, 7, 6); { auto aliceStat = txQ.getAccountTxs(alice.id()); SeqProxy seq = SeqProxy::sequence(aliceSeq); @@ -3126,25 +3036,25 @@ public: // Give them a higher fee so they'll beat alice's. for (int i = 0; i < 8; ++i) envs(noop(bob), fee(baseFee * 200), seq(none), ter(terQUEUED))(); - checkMetrics(__LINE__, env, 13, std::nullopt, 7, 6); + checkMetrics(*this, env, 13, std::nullopt, 7, 6); env.close(); - checkMetrics(__LINE__, env, 5, 14, 8, 7); + checkMetrics(*this, env, 5, 14, 8, 7); // Put some more txs in the queue for bob. // Give them a higher fee so they'll beat alice's. fillQueue(env, bob); for (int i = 0; i < 9; ++i) envs(noop(bob), fee(baseFee * 200), seq(none), ter(terQUEUED))(); - checkMetrics(__LINE__, env, 14, 14, 8, 7, 25601); + checkMetrics(*this, env, 14, 14, 8, 7, 25601); env.close(); // Put some more txs in the queue for bob. // Give them a higher fee so they'll beat alice's. fillQueue(env, bob); for (int i = 0; i < 10; ++i) envs(noop(bob), fee(baseFee * 200), seq(none), ter(terQUEUED))(); - checkMetrics(__LINE__, env, 15, 16, 9, 8); + checkMetrics(*this, env, 15, 16, 9, 8); env.close(); - checkMetrics(__LINE__, env, 4, 18, 10, 9); + checkMetrics(*this, env, 4, 18, 10, 9); { // Bob has nothing left in the queue. auto bobStat = txQ.getAccountTxs(bob.id()); @@ -3172,7 +3082,7 @@ public: // Now, fill the gap. envs(noop(alice), fee(baseFee * 100), seq(none), ter(terQUEUED))( submitParams); - checkMetrics(__LINE__, env, 5, 18, 10, 9); + checkMetrics(*this, env, 5, 18, 10, 9); { auto aliceStat = txQ.getAccountTxs(alice.id()); auto seq = aliceSeq; @@ -3187,7 +3097,7 @@ public: } env.close(); - checkMetrics(__LINE__, env, 0, 20, 5, 10); + checkMetrics(*this, env, 0, 20, 5, 10); { // Bob's data has been cleaned up. auto bobStat = txQ.getAccountTxs(bob.id()); @@ -3246,10 +3156,10 @@ public: BEAST_EXPECT(!queue_data.isMember(jss::max_spend_drops_total)); BEAST_EXPECT(!queue_data.isMember(jss::transactions)); } - checkMetrics(__LINE__, env, 0, 6, 0, 3); + checkMetrics(*this, env, 0, 6, 0, 3); fillQueue(env, alice); - checkMetrics(__LINE__, env, 0, 6, 4, 3); + checkMetrics(*this, env, 0, 6, 4, 3); { auto const info = env.rpc("json", "account_info", withQueue); @@ -3278,7 +3188,7 @@ public: submitParams); envs(noop(alice), fee(baseFee * 10), seq(none), ter(terQUEUED))( submitParams); - checkMetrics(__LINE__, env, 4, 6, 4, 3); + checkMetrics(*this, env, 4, 6, 4, 3); { auto const info = env.rpc("json", "account_info", withQueue); @@ -3330,7 +3240,7 @@ public: // Drain the queue so we can queue up a blocker. env.close(); - checkMetrics(__LINE__, env, 0, 8, 4, 4); + checkMetrics(*this, env, 0, 8, 4, 4); // Fill the ledger and then queue up a blocker. envs(noop(alice), seq(none))(submitParams); @@ -3341,7 +3251,7 @@ public: seq(none), json(jss::LastLedgerSequence, 10), ter(terQUEUED))(submitParams); - checkMetrics(__LINE__, env, 1, 8, 5, 4); + checkMetrics(*this, env, 1, 8, 5, 4); { auto const info = env.rpc("json", "account_info", withQueue); @@ -3405,7 +3315,7 @@ public: fee(baseFee * 10), seq(none), ter(telCAN_NOT_QUEUE_BLOCKED))(submitParams); - checkMetrics(__LINE__, env, 1, 8, 5, 4); + checkMetrics(*this, env, 1, 8, 5, 4); { auto const info = env.rpc("json", "account_info", withQueue); @@ -3483,9 +3393,9 @@ public: } env.close(); - checkMetrics(__LINE__, env, 0, 10, 2, 5); + checkMetrics(*this, env, 0, 10, 2, 5); env.close(); - checkMetrics(__LINE__, env, 0, 10, 0, 5); + checkMetrics(*this, env, 0, 10, 0, 5); { auto const info = env.rpc("json", "account_info", withQueue); @@ -3555,10 +3465,10 @@ public: state[jss::load_factor_fee_reference] == 256); } - checkMetrics(__LINE__, env, 0, 6, 0, 3); + checkMetrics(*this, env, 0, 6, 0, 3); fillQueue(env, alice); - checkMetrics(__LINE__, env, 0, 6, 4, 3); + checkMetrics(*this, env, 0, 6, 4, 3); auto aliceSeq = env.seq(alice); auto submitParams = Json::Value(Json::objectValue); @@ -3568,7 +3478,7 @@ public: fee(baseFee * 10), seq(aliceSeq + i), ter(terQUEUED))(submitParams); - checkMetrics(__LINE__, env, 4, 6, 4, 3); + checkMetrics(*this, env, 4, 6, 4, 3); { auto const server_info = env.rpc("server_info"); @@ -3794,7 +3704,7 @@ public: // Fund the first few accounts at non escalated fee env.fund(XRP(50000), noripple(a, b, c, d)); - checkMetrics(__LINE__, env, 0, std::nullopt, 4, 3); + checkMetrics(*this, env, 0, std::nullopt, 4, 3); // First transaction establishes the messaging using namespace std::chrono_literals; @@ -3844,7 +3754,7 @@ public: jv[jss::load_factor_fee_reference] == 256; })); - checkMetrics(__LINE__, env, 0, 8, 0, 4); + checkMetrics(*this, env, 0, 8, 0, 4); // Fund then next few accounts at non escalated fee env.fund(XRP(50000), noripple(e, f, g, h, i)); @@ -3858,7 +3768,7 @@ public: env(noop(e), fee(baseFee), queued); env(noop(f), fee(baseFee), queued); env(noop(g), fee(baseFee), queued); - checkMetrics(__LINE__, env, 7, 8, 5, 4); + checkMetrics(*this, env, 7, 8, 5, 4); // Last transaction escalates the fee BEAST_EXPECT(wsc->findMsg(5s, [&](auto const& jv) { @@ -3928,7 +3838,7 @@ public: auto alice = Account("alice"); auto bob = Account("bob"); - checkMetrics(__LINE__, env, 0, std::nullopt, 0, 3); + checkMetrics(*this, env, 0, std::nullopt, 0, 3); env.fund(XRP(50000000), alice, bob); fillQueue(env, alice); @@ -3982,7 +3892,7 @@ public: seq(aliceSeq++), ter(terQUEUED)); - checkMetrics(__LINE__, env, 3, std::nullopt, 4, 3); + checkMetrics(*this, env, 3, std::nullopt, 4, 3); // Figure out how much it would cost to cover all the // queued txs + itself @@ -3994,7 +3904,7 @@ public: // the edge case test. env(noop(alice), fee(totalFee), seq(aliceSeq++), ter(terQUEUED)); - checkMetrics(__LINE__, env, 4, std::nullopt, 4, 3); + checkMetrics(*this, env, 4, std::nullopt, 4, 3); // Now repeat the process including the new tx // and avoiding the rounding error @@ -4004,7 +3914,7 @@ public: // Submit a transaction with that fee. It will succeed. env(noop(alice), fee(totalFee), seq(aliceSeq++)); - checkMetrics(__LINE__, env, 0, std::nullopt, 9, 3); + checkMetrics(*this, env, 0, std::nullopt, 9, 3); } testcase("replace last tx with enough to clear queue"); @@ -4029,7 +3939,7 @@ public: seq(aliceSeq++), ter(terQUEUED)); - checkMetrics(__LINE__, env, 3, std::nullopt, 9, 3); + checkMetrics(*this, env, 3, std::nullopt, 9, 3); // Figure out how much it would cost to cover all the // queued txs + itself @@ -4041,10 +3951,10 @@ public: env(noop(alice), fee(totalFee), seq(aliceSeq++)); // The queue is clear - checkMetrics(__LINE__, env, 0, std::nullopt, 12, 3); + checkMetrics(*this, env, 0, std::nullopt, 12, 3); env.close(); - checkMetrics(__LINE__, env, 0, 24, 0, 12); + checkMetrics(*this, env, 0, 24, 0, 12); } testcase("replace middle tx with enough to clear queue"); @@ -4060,7 +3970,7 @@ public: ter(terQUEUED)); } - checkMetrics(__LINE__, env, 5, 24, 13, 12); + checkMetrics(*this, env, 5, 24, 13, 12); // Figure out how much it would cost to cover 3 txns uint64_t const totalFee = calcTotalFee(baseFee * 10 * 2, 3); @@ -4068,7 +3978,7 @@ public: aliceSeq -= 3; env(noop(alice), fee(totalFee), seq(aliceSeq++)); - checkMetrics(__LINE__, env, 2, 24, 16, 12); + checkMetrics(*this, env, 2, 24, 16, 12); auto const aliceQueue = env.app().getTxQ().getAccountTxs(alice.id()); BEAST_EXPECT(aliceQueue.size() == 2); @@ -4083,7 +3993,7 @@ public: // Close the ledger to clear the queue env.close(); - checkMetrics(__LINE__, env, 0, 32, 2, 16); + checkMetrics(*this, env, 0, 32, 2, 16); } testcase("clear queue failure (load)"); @@ -4109,7 +4019,7 @@ public: totalPaid += baseFee * 2.2; } - checkMetrics(__LINE__, env, 4, 32, 17, 16); + checkMetrics(*this, env, 4, 32, 17, 16); // Figure out how much it would cost to cover all the txns // + 1 @@ -4123,11 +4033,11 @@ public: env(noop(alice), fee(totalFee), seq(aliceSeq++), ter(terQUEUED)); // The original last transaction is still in the queue - checkMetrics(__LINE__, env, 5, 32, 17, 16); + checkMetrics(*this, env, 5, 32, 17, 16); // With high load, some of the txs stay in the queue env.close(); - checkMetrics(__LINE__, env, 3, 34, 2, 17); + checkMetrics(*this, env, 3, 34, 2, 17); // Load drops back down feeTrack.setRemoteFee(origFee); @@ -4135,14 +4045,14 @@ public: // Because of the earlier failure, alice can not clear the queue, // no matter how high the fee fillQueue(env, bob); - checkMetrics(__LINE__, env, 3, 34, 18, 17); + checkMetrics(*this, env, 3, 34, 18, 17); env(noop(alice), fee(XRP(1)), seq(aliceSeq++), ter(terQUEUED)); - checkMetrics(__LINE__, env, 4, 34, 18, 17); + checkMetrics(*this, env, 4, 34, 18, 17); // With normal load, those txs get into the ledger env.close(); - checkMetrics(__LINE__, env, 0, 36, 4, 18); + checkMetrics(*this, env, 0, 36, 4, 18); } } @@ -4164,77 +4074,77 @@ public: {"maximum_txn_per_account", "200"}})); auto alice = Account("alice"); - checkMetrics(__LINE__, env, 0, std::nullopt, 0, 3); + checkMetrics(*this, env, 0, std::nullopt, 0, 3); env.fund(XRP(50000000), alice); fillQueue(env, alice); - checkMetrics(__LINE__, env, 0, std::nullopt, 4, 3); + checkMetrics(*this, env, 0, std::nullopt, 4, 3); auto seqAlice = env.seq(alice); auto txCount = 140; for (int i = 0; i < txCount; ++i) env(noop(alice), seq(seqAlice++), ter(terQUEUED)); - checkMetrics(__LINE__, env, txCount, std::nullopt, 4, 3); + checkMetrics(*this, env, txCount, std::nullopt, 4, 3); // Close a few ledgers successfully, so the limit grows env.close(); // 4 + 25% = 5 txCount -= 6; - checkMetrics(__LINE__, env, txCount, 10, 6, 5, 257); + checkMetrics(*this, env, txCount, 10, 6, 5, 257); env.close(); // 6 + 25% = 7 txCount -= 8; - checkMetrics(__LINE__, env, txCount, 14, 8, 7, 257); + checkMetrics(*this, env, txCount, 14, 8, 7, 257); env.close(); // 8 + 25% = 10 txCount -= 11; - checkMetrics(__LINE__, env, txCount, 20, 11, 10, 257); + checkMetrics(*this, env, txCount, 20, 11, 10, 257); env.close(); // 11 + 25% = 13 txCount -= 14; - checkMetrics(__LINE__, env, txCount, 26, 14, 13, 257); + checkMetrics(*this, env, txCount, 26, 14, 13, 257); env.close(); // 14 + 25% = 17 txCount -= 18; - checkMetrics(__LINE__, env, txCount, 34, 18, 17, 257); + checkMetrics(*this, env, txCount, 34, 18, 17, 257); env.close(); // 18 + 25% = 22 txCount -= 23; - checkMetrics(__LINE__, env, txCount, 44, 23, 22, 257); + checkMetrics(*this, env, txCount, 44, 23, 22, 257); env.close(); // 23 + 25% = 28 txCount -= 29; - checkMetrics(__LINE__, env, txCount, 56, 29, 28); + checkMetrics(*this, env, txCount, 56, 29, 28); // From 3 expected to 28 in 7 "fast" ledgers. // Close the ledger with a delay. env.close(env.now() + 5s, 10000ms); txCount -= 15; - checkMetrics(__LINE__, env, txCount, 56, 15, 14); + checkMetrics(*this, env, txCount, 56, 15, 14); // Close the ledger with a delay. env.close(env.now() + 5s, 10000ms); txCount -= 8; - checkMetrics(__LINE__, env, txCount, 56, 8, 7); + checkMetrics(*this, env, txCount, 56, 8, 7); // Close the ledger with a delay. env.close(env.now() + 5s, 10000ms); txCount -= 4; - checkMetrics(__LINE__, env, txCount, 56, 4, 3); + checkMetrics(*this, env, txCount, 56, 4, 3); // From 28 expected back down to 3 in 3 "slow" ledgers. // Confirm the minimum sticks env.close(env.now() + 5s, 10000ms); txCount -= 4; - checkMetrics(__LINE__, env, txCount, 56, 4, 3); + checkMetrics(*this, env, txCount, 56, 4, 3); BEAST_EXPECT(!txCount); } @@ -4250,35 +4160,35 @@ public: {"maximum_txn_per_account", "200"}})); auto alice = Account("alice"); - checkMetrics(__LINE__, env, 0, std::nullopt, 0, 3); + checkMetrics(*this, env, 0, std::nullopt, 0, 3); env.fund(XRP(50000000), alice); fillQueue(env, alice); - checkMetrics(__LINE__, env, 0, std::nullopt, 4, 3); + checkMetrics(*this, env, 0, std::nullopt, 4, 3); auto seqAlice = env.seq(alice); auto txCount = 43; for (int i = 0; i < txCount; ++i) env(noop(alice), seq(seqAlice++), ter(terQUEUED)); - checkMetrics(__LINE__, env, txCount, std::nullopt, 4, 3); + checkMetrics(*this, env, txCount, std::nullopt, 4, 3); // Close a few ledgers successfully, so the limit grows env.close(); // 4 + 150% = 10 txCount -= 11; - checkMetrics(__LINE__, env, txCount, 20, 11, 10, 257); + checkMetrics(*this, env, txCount, 20, 11, 10, 257); env.close(); // 11 + 150% = 27 txCount -= 28; - checkMetrics(__LINE__, env, txCount, 54, 28, 27); + checkMetrics(*this, env, txCount, 54, 28, 27); // From 3 expected to 28 in 7 "fast" ledgers. // Close the ledger with a delay. env.close(env.now() + 5s, 10000ms); txCount -= 4; - checkMetrics(__LINE__, env, txCount, 54, 4, 3); + checkMetrics(*this, env, txCount, 54, 4, 3); // From 28 expected back down to 3 in 3 "slow" ledgers. @@ -4306,19 +4216,19 @@ public: auto const queued = ter(terQUEUED); - checkMetrics(__LINE__, env, 0, std::nullopt, 0, 3); + checkMetrics(*this, env, 0, std::nullopt, 0, 3); // Create account env.fund(XRP(50000), noripple(alice)); - checkMetrics(__LINE__, env, 0, std::nullopt, 1, 3); + checkMetrics(*this, env, 0, std::nullopt, 1, 3); fillQueue(env, alice); - checkMetrics(__LINE__, env, 0, std::nullopt, 4, 3); + checkMetrics(*this, env, 0, std::nullopt, 4, 3); // Queue a transaction auto const aliceSeq = env.seq(alice); env(noop(alice), queued); - checkMetrics(__LINE__, env, 1, std::nullopt, 4, 3); + checkMetrics(*this, env, 1, std::nullopt, 4, 3); // Now, apply a (different) transaction directly // to the open ledger, bypassing the queue @@ -4334,23 +4244,23 @@ public: return result.applied; }); // the queued transaction is still there - checkMetrics(__LINE__, env, 1, std::nullopt, 5, 3); + checkMetrics(*this, env, 1, std::nullopt, 5, 3); // The next transaction should be able to go into the open // ledger, even though aliceSeq is queued. In earlier incarnations // of the TxQ this would cause an assert. env(noop(alice), seq(aliceSeq + 1), fee(openLedgerCost(env))); - checkMetrics(__LINE__, env, 1, std::nullopt, 6, 3); + checkMetrics(*this, env, 1, std::nullopt, 6, 3); // Now queue a couple more transactions to make sure // they succeed despite aliceSeq being queued env(noop(alice), seq(aliceSeq + 2), queued); env(noop(alice), seq(aliceSeq + 3), queued); - checkMetrics(__LINE__, env, 3, std::nullopt, 6, 3); + checkMetrics(*this, env, 3, std::nullopt, 6, 3); // Now close the ledger. One of the queued transactions // (aliceSeq) should be dropped. env.close(); - checkMetrics(__LINE__, env, 0, 12, 2, 6); + checkMetrics(*this, env, 0, 12, 2, 6); } void @@ -4371,11 +4281,11 @@ public: auto queued = ter(terQUEUED); - checkMetrics(__LINE__, env, 0, std::nullopt, 0, 3); + checkMetrics(*this, env, 0, std::nullopt, 0, 3); // Create account env.fund(XRP(50000), noripple(alice)); - checkMetrics(__LINE__, env, 0, std::nullopt, 1, 3); + checkMetrics(*this, env, 0, std::nullopt, 1, 3); // Create tickets std::uint32_t const tktSeq0{env.seq(alice) + 1}; @@ -4383,12 +4293,12 @@ public: // Fill the queue so the next transaction will be queued. fillQueue(env, alice); - checkMetrics(__LINE__, env, 0, std::nullopt, 4, 3); + checkMetrics(*this, env, 0, std::nullopt, 4, 3); // Queue a transaction with a ticket. Leave an unused ticket // on either side. env(noop(alice), ticket::use(tktSeq0 + 1), queued); - checkMetrics(__LINE__, env, 1, std::nullopt, 4, 3); + checkMetrics(*this, env, 1, std::nullopt, 4, 3); // Now, apply a (different) transaction directly // to the open ledger, bypassing the queue @@ -4406,25 +4316,25 @@ public: return result.applied; }); // the queued transaction is still there - checkMetrics(__LINE__, env, 1, std::nullopt, 5, 3); + checkMetrics(*this, env, 1, std::nullopt, 5, 3); // The next (sequence-based) transaction should be able to go into // the open ledger, even though tktSeq0 is queued. Note that this // sequence-based transaction goes in front of the queued // transaction, so the queued transaction is left in the queue. env(noop(alice), fee(openLedgerCost(env))); - checkMetrics(__LINE__, env, 1, std::nullopt, 6, 3); + checkMetrics(*this, env, 1, std::nullopt, 6, 3); // We should be able to do the same thing with a ticket that goes // if front of the queued transaction. This one too will leave // the queued transaction in place. env(noop(alice), ticket::use(tktSeq0 + 0), fee(openLedgerCost(env))); - checkMetrics(__LINE__, env, 1, std::nullopt, 7, 3); + checkMetrics(*this, env, 1, std::nullopt, 7, 3); // We have one ticketed transaction in the queue. We should able // to add another to the queue. env(noop(alice), ticket::use(tktSeq0 + 2), queued); - checkMetrics(__LINE__, env, 2, std::nullopt, 7, 3); + checkMetrics(*this, env, 2, std::nullopt, 7, 3); // Here we try to force the queued transactions into the ledger by // adding one more queued (ticketed) transaction that pays enough @@ -4440,12 +4350,12 @@ public: // transaction is equally capable of going into the ledger independent // of all other ticket- or sequence-based transactions. env(noop(alice), ticket::use(tktSeq0 + 3), fee(XRP(10))); - checkMetrics(__LINE__, env, 2, std::nullopt, 8, 3); + checkMetrics(*this, env, 2, std::nullopt, 8, 3); // Now close the ledger. One of the queued transactions // (the one with tktSeq0 + 1) should be dropped. env.close(); - checkMetrics(__LINE__, env, 0, 16, 1, 8); + checkMetrics(*this, env, 0, 16, 1, 8); } void @@ -4496,7 +4406,7 @@ public: env.close(); env.fund(XRP(10000), fiona); env.close(); - checkMetrics(__LINE__, env, 0, 10, 0, 2); + checkMetrics(*this, env, 0, 10, 0, 2); // Close ledgers until the amendments show up. int i = 0; @@ -4508,7 +4418,7 @@ public: } auto expectedPerLedger = ripple::detail::numUpVotedAmendments() + 1; checkMetrics( - __LINE__, env, 0, 5 * expectedPerLedger, 0, expectedPerLedger); + *this, env, 0, 5 * expectedPerLedger, 0, expectedPerLedger); // Now wait 2 weeks modulo 256 ledgers for the amendments to be // enabled. Speed the process by closing ledgers every 80 minutes, @@ -4524,7 +4434,7 @@ public: // We're very close to the flag ledger. Fill the ledger. fillQueue(env, alice); checkMetrics( - __LINE__, + *this, env, 0, 5 * expectedPerLedger, @@ -4575,7 +4485,7 @@ public: } std::size_t expectedInQueue = 60; checkMetrics( - __LINE__, + *this, env, expectedInQueue, 5 * expectedPerLedger, @@ -4602,7 +4512,7 @@ public: expectedInLedger -= expectedInQueue; ++expectedPerLedger; checkMetrics( - __LINE__, + *this, env, expectedInQueue, 5 * expectedPerLedger, @@ -4689,7 +4599,7 @@ public: // of their transactions expire out of the queue. To start out // alice fills the ledger. fillQueue(env, alice); - checkMetrics(__LINE__, env, 0, 50, 7, 6); + checkMetrics(*this, env, 0, 50, 7, 6); // Now put a few transactions into alice's queue, including one that // will expire out soon. @@ -4735,9 +4645,9 @@ public: env(noop(fiona), seq(seqFiona++), fee(--feeDrops), ter(terQUEUED)); } - checkMetrics(__LINE__, env, 34, 50, 7, 6); + checkMetrics(*this, env, 34, 50, 7, 6); env.close(); - checkMetrics(__LINE__, env, 26, 50, 8, 7); + checkMetrics(*this, env, 26, 50, 8, 7); // Re-fill the queue so alice and bob stay stuck. feeDrops = medFee; @@ -4748,9 +4658,9 @@ public: env(noop(ellie), seq(seqEllie++), fee(--feeDrops), ter(terQUEUED)); env(noop(fiona), seq(seqFiona++), fee(--feeDrops), ter(terQUEUED)); } - checkMetrics(__LINE__, env, 38, 50, 8, 7); + checkMetrics(*this, env, 38, 50, 8, 7); env.close(); - checkMetrics(__LINE__, env, 29, 50, 9, 8); + checkMetrics(*this, env, 29, 50, 9, 8); // One more time... feeDrops = medFee; @@ -4761,9 +4671,9 @@ public: env(noop(ellie), seq(seqEllie++), fee(--feeDrops), ter(terQUEUED)); env(noop(fiona), seq(seqFiona++), fee(--feeDrops), ter(terQUEUED)); } - checkMetrics(__LINE__, env, 41, 50, 9, 8); + checkMetrics(*this, env, 41, 50, 9, 8); env.close(); - checkMetrics(__LINE__, env, 29, 50, 10, 9); + checkMetrics(*this, env, 29, 50, 10, 9); // Finally the stage is set. alice's and bob's transactions expired // out of the queue which caused the dropPenalty flag to be set on @@ -4785,7 +4695,7 @@ public: env(noop(carol), seq(seqCarol++), fee(--feeDrops), ter(terQUEUED)); env(noop(daria), seq(seqDaria++), fee(--feeDrops), ter(terQUEUED)); env(noop(ellie), seq(seqEllie++), fee(--feeDrops), ter(terQUEUED)); - checkMetrics(__LINE__, env, 48, 50, 10, 9); + checkMetrics(*this, env, 48, 50, 10, 9); // Now induce a fee jump which should cause all the transactions // in the queue to fail with telINSUF_FEE_P. @@ -4802,7 +4712,7 @@ public: // o The _last_ transaction should be dropped from alice's queue. // o The first failing transaction should be dropped from bob's queue. env.close(); - checkMetrics(__LINE__, env, 46, 50, 0, 10); + checkMetrics(*this, env, 46, 50, 0, 10); // Run the local fee back down. while (env.app().getFeeTrack().lowerLocalFee()) @@ -4810,7 +4720,7 @@ public: // bob fills the ledger so it's easier to probe the TxQ. fillQueue(env, bob); - checkMetrics(__LINE__, env, 46, 50, 11, 10); + checkMetrics(*this, env, 46, 50, 11, 10); // Before the close() alice had two transactions in her queue. // We now expect her to have one. Here's the state of alice's queue. @@ -4928,7 +4838,7 @@ public: env.close(); - checkMetrics(__LINE__, env, 0, 50, 4, 6); + checkMetrics(*this, env, 0, 50, 4, 6); } { @@ -4989,7 +4899,7 @@ public: // The ticket transactions that didn't succeed or get queued succeed // this time because the tickets got consumed when the offers came // out of the queue - checkMetrics(__LINE__, env, 0, 50, 8, 7); + checkMetrics(*this, env, 0, 50, 8, 7); } } @@ -5010,7 +4920,7 @@ public: {"account_reserve", "0"}, {"owner_reserve", "0"}})); - checkMetrics(__LINE__, env, 0, std::nullopt, 0, 3); + checkMetrics(*this, env, 0, std::nullopt, 0, 3); // ledgers in queue is 2 because of makeConfig auto const initQueueMax = initFee(env, 3, 2, 0, 0, 0); @@ -5056,34 +4966,34 @@ public: } } - checkMetrics(__LINE__, env, 0, initQueueMax, 0, 3); + checkMetrics(*this, env, 0, initQueueMax, 0, 3); // The noripple is to reduce the number of transactions required to // fund the accounts. There is no rippling in this test. env.fund(XRP(100000), noripple(alice)); - checkMetrics(__LINE__, env, 0, initQueueMax, 1, 3); + checkMetrics(*this, env, 0, initQueueMax, 1, 3); env.close(); - checkMetrics(__LINE__, env, 0, 6, 0, 3); + checkMetrics(*this, env, 0, 6, 0, 3); fillQueue(env, alice); - checkMetrics(__LINE__, env, 0, 6, 4, 3); + checkMetrics(*this, env, 0, 6, 4, 3); env(noop(alice), fee(openLedgerCost(env))); - checkMetrics(__LINE__, env, 0, 6, 5, 3); + checkMetrics(*this, env, 0, 6, 5, 3); auto aliceSeq = env.seq(alice); env(noop(alice), queued); - checkMetrics(__LINE__, env, 1, 6, 5, 3); + checkMetrics(*this, env, 1, 6, 5, 3); env(noop(alice), seq(aliceSeq + 1), fee(10), queued); - checkMetrics(__LINE__, env, 2, 6, 5, 3); + checkMetrics(*this, env, 2, 6, 5, 3); { auto const fee = env.rpc("fee"); @@ -5126,7 +5036,7 @@ public: env.close(); - checkMetrics(__LINE__, env, 0, 10, 2, 5); + checkMetrics(*this, env, 0, 10, 2, 5); } void diff --git a/src/test/jtx.h b/src/test/jtx.h index 6b73ca63ec..2e4764a403 100644 --- a/src/test/jtx.h +++ b/src/test/jtx.h @@ -31,8 +31,10 @@ #include #include #include +#include #include #include +#include #include #include #include diff --git a/src/test/jtx/SignerUtils.h b/src/test/jtx/SignerUtils.h new file mode 100644 index 0000000000..7b1ae5007c --- /dev/null +++ b/src/test/jtx/SignerUtils.h @@ -0,0 +1,56 @@ +#ifndef RIPPLE_TEST_JTX_SIGNERUTILS_H_INCLUDED +#define RIPPLE_TEST_JTX_SIGNERUTILS_H_INCLUDED + +#include + +#include + +namespace ripple { +namespace test { +namespace jtx { + +struct Reg +{ + Account acct; + Account sig; + + Reg(Account const& masterSig) : acct(masterSig), sig(masterSig) + { + } + + Reg(Account const& acct_, Account const& regularSig) + : acct(acct_), sig(regularSig) + { + } + + Reg(char const* masterSig) : acct(masterSig), sig(masterSig) + { + } + + Reg(char const* acct_, char const* regularSig) + : acct(acct_), sig(regularSig) + { + } + + bool + operator<(Reg const& rhs) const + { + return acct < rhs.acct; + } +}; + +// Utility function to sort signers +inline void +sortSigners(std::vector& signers) +{ + std::sort( + signers.begin(), signers.end(), [](Reg const& lhs, Reg const& rhs) { + return lhs.acct < rhs.acct; + }); +} + +} // namespace jtx +} // namespace test +} // namespace ripple + +#endif diff --git a/src/test/jtx/TestHelpers.h b/src/test/jtx/TestHelpers.h index 534419494d..ae46ea4fe3 100644 --- a/src/test/jtx/TestHelpers.h +++ b/src/test/jtx/TestHelpers.h @@ -23,6 +23,7 @@ #include #include +#include #include #include #include @@ -31,6 +32,14 @@ #include +#if (defined(__clang_major__) && __clang_major__ < 15) +#include +using source_location = std::experimental::source_location; +#else +#include +using std::source_location; +#endif + namespace ripple { namespace test { namespace jtx { @@ -445,7 +454,6 @@ create(A const& account, A const& dest, STAmount const& sendMax) jv[sfSendMax.jsonName] = sendMax.getJson(JsonOptions::none); jv[sfDestination.jsonName] = to_string(dest); jv[sfTransactionType.jsonName] = jss::CheckCreate; - jv[sfFlags.jsonName] = tfUniversal; return jv; } // clang-format on @@ -461,6 +469,102 @@ create( } // namespace check +static constexpr FeeLevel64 baseFeeLevel{256}; +static constexpr FeeLevel64 minEscalationFeeLevel = baseFeeLevel * 500; + +template +void +checkMetrics( + Suite& test, + jtx::Env& env, + std::size_t expectedCount, + std::optional expectedMaxCount, + std::size_t expectedInLedger, + std::size_t expectedPerLedger, + std::uint64_t expectedMinFeeLevel = baseFeeLevel.fee(), + std::uint64_t expectedMedFeeLevel = minEscalationFeeLevel.fee(), + source_location const location = source_location::current()) +{ + int line = location.line(); + char const* file = location.file_name(); + FeeLevel64 const expectedMin{expectedMinFeeLevel}; + FeeLevel64 const expectedMed{expectedMedFeeLevel}; + auto const metrics = env.app().getTxQ().getMetrics(*env.current()); + using namespace std::string_literals; + + metrics.referenceFeeLevel == baseFeeLevel + ? test.pass() + : test.fail( + "reference: "s + + std::to_string(metrics.referenceFeeLevel.value()) + "/" + + std::to_string(baseFeeLevel.value()), + file, + line); + + metrics.txCount == expectedCount + ? test.pass() + : test.fail( + "txCount: "s + std::to_string(metrics.txCount) + "/" + + std::to_string(expectedCount), + file, + line); + + metrics.txQMaxSize == expectedMaxCount + ? test.pass() + : test.fail( + "txQMaxSize: "s + std::to_string(metrics.txQMaxSize.value_or(0)) + + "/" + std::to_string(expectedMaxCount.value_or(0)), + file, + line); + + metrics.txInLedger == expectedInLedger + ? test.pass() + : test.fail( + "txInLedger: "s + std::to_string(metrics.txInLedger) + "/" + + std::to_string(expectedInLedger), + file, + line); + + metrics.txPerLedger == expectedPerLedger + ? test.pass() + : test.fail( + "txPerLedger: "s + std::to_string(metrics.txPerLedger) + "/" + + std::to_string(expectedPerLedger), + file, + line); + + metrics.minProcessingFeeLevel == expectedMin + ? test.pass() + : test.fail( + "minProcessingFeeLevel: "s + + std::to_string(metrics.minProcessingFeeLevel.value()) + "/" + + std::to_string(expectedMin.value()), + file, + line); + + metrics.medFeeLevel == expectedMed + ? test.pass() + : test.fail( + "medFeeLevel: "s + std::to_string(metrics.medFeeLevel.value()) + + "/" + std::to_string(expectedMed.value()), + file, + line); + + auto const expectedCurFeeLevel = expectedInLedger > expectedPerLedger + ? expectedMed * expectedInLedger * expectedInLedger / + (expectedPerLedger * expectedPerLedger) + : metrics.referenceFeeLevel; + + metrics.openLedgerFeeLevel == expectedCurFeeLevel + ? test.pass() + : test.fail( + "openLedgerFeeLevel: "s + + std::to_string(metrics.openLedgerFeeLevel.value()) + "/" + + std::to_string(expectedCurFeeLevel.value()), + file, + line); +} + } // namespace jtx } // namespace test } // namespace ripple diff --git a/src/test/jtx/acctdelete.h b/src/test/jtx/acctdelete.h index 98a23c6de2..21d00cb727 100644 --- a/src/test/jtx/acctdelete.h +++ b/src/test/jtx/acctdelete.h @@ -23,6 +23,8 @@ #include #include +#include + namespace ripple { namespace test { namespace jtx { @@ -31,6 +33,15 @@ namespace jtx { Json::Value acctdelete(Account const& account, Account const& dest); +// Close the ledger until the ledger sequence is large enough to close +// the account. If margin is specified, close the ledger so `margin` +// more closes are needed +void +incLgrSeqForAccDel( + jtx::Env& env, + jtx::Account const& acc, + std::uint32_t margin = 0); + } // namespace jtx } // namespace test diff --git a/src/test/jtx/batch.h b/src/test/jtx/batch.h new file mode 100644 index 0000000000..ab235c293f --- /dev/null +++ b/src/test/jtx/batch.h @@ -0,0 +1,169 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2024 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_TEST_JTX_BATCH_H_INCLUDED +#define RIPPLE_TEST_JTX_BATCH_H_INCLUDED + +#include +#include +#include +#include +#include + +#include + +#include "test/jtx/SignerUtils.h" + +#include +#include +#include + +namespace ripple { +namespace test { +namespace jtx { + +/** Batch operations */ +namespace batch { + +/** Calculate Batch Fee. */ +XRPAmount +calcBatchFee( + jtx::Env const& env, + uint32_t const& numSigners, + uint32_t const& txns = 0); + +/** Batch. */ +Json::Value +outer( + jtx::Account const& account, + uint32_t seq, + STAmount const& fee, + std::uint32_t flags); + +/** Adds a new Batch Txn on a JTx and autofills. */ +class inner +{ +private: + Json::Value txn_; + std::uint32_t seq_; + std::optional ticket_; + +public: + inner( + Json::Value const& txn, + std::uint32_t const& sequence, + std::optional const& ticket = std::nullopt, + std::optional const& fee = std::nullopt) + : txn_(txn), seq_(sequence), ticket_(ticket) + { + txn_[jss::SigningPubKey] = ""; + txn_[jss::Sequence] = seq_; + txn_[jss::Fee] = "0"; + txn_[jss::Flags] = txn_[jss::Flags].asUInt() | tfInnerBatchTxn; + + // Optionally set ticket sequence + if (ticket_.has_value()) + { + txn_[jss::Sequence] = 0; + txn_[sfTicketSequence.jsonName] = *ticket_; + } + } + + void + operator()(Env&, JTx& jtx) const; + + Json::Value& + operator[](Json::StaticString const& key) + { + return txn_[key]; + } + + void + removeMember(Json::StaticString const& key) + { + txn_.removeMember(key); + } + + Json::Value const& + getTxn() const + { + return txn_; + } +}; + +/** Set a batch signature on a JTx. */ +class sig +{ +public: + std::vector signers; + + sig(std::vector signers_) : signers(std::move(signers_)) + { + sortSigners(signers); + } + + template + requires std::convertible_to + explicit sig(AccountType&& a0, Accounts&&... aN) + : signers{std::forward(a0), std::forward(aN)...} + { + sortSigners(signers); + } + + void + operator()(Env&, JTx& jt) const; +}; + +/** Set a batch nested multi-signature on a JTx. */ +class msig +{ +public: + Account master; + std::vector signers; + + msig(Account const& masterAccount, std::vector signers_) + : master(masterAccount), signers(std::move(signers_)) + { + sortSigners(signers); + } + + template + requires std::convertible_to + explicit msig( + Account const& masterAccount, + AccountType&& a0, + Accounts&&... aN) + : master(masterAccount) + , signers{std::forward(a0), std::forward(aN)...} + { + sortSigners(signers); + } + + void + operator()(Env&, JTx& jt) const; +}; + +} // namespace batch + +} // namespace jtx + +} // namespace test +} // namespace ripple + +#endif diff --git a/src/test/jtx/impl/AMM.cpp b/src/test/jtx/impl/AMM.cpp index 3482e7e867..6345253584 100644 --- a/src/test/jtx/impl/AMM.cpp +++ b/src/test/jtx/impl/AMM.cpp @@ -821,7 +821,6 @@ pay(Account const& account, AccountID const& to, STAmount const& amount) jv[jss::Amount] = amount.getJson(JsonOptions::none); jv[jss::Destination] = to_string(to); jv[jss::TransactionType] = jss::Payment; - jv[jss::Flags] = tfUniversal; return jv; } diff --git a/src/test/jtx/impl/TestHelpers.cpp b/src/test/jtx/impl/TestHelpers.cpp index e5b136e9c0..cb8141b9f3 100644 --- a/src/test/jtx/impl/TestHelpers.cpp +++ b/src/test/jtx/impl/TestHelpers.cpp @@ -219,7 +219,6 @@ escrow(AccountID const& account, AccountID const& to, STAmount const& amount) { Json::Value jv; jv[jss::TransactionType] = jss::EscrowCreate; - jv[jss::Flags] = tfUniversal; jv[jss::Account] = to_string(account); jv[jss::Destination] = to_string(to); jv[jss::Amount] = amount.getJson(JsonOptions::none); @@ -231,7 +230,6 @@ finish(AccountID const& account, AccountID const& from, std::uint32_t seq) { Json::Value jv; jv[jss::TransactionType] = jss::EscrowFinish; - jv[jss::Flags] = tfUniversal; jv[jss::Account] = to_string(account); jv[sfOwner.jsonName] = to_string(from); jv[sfOfferSequence.jsonName] = seq; @@ -243,7 +241,6 @@ cancel(AccountID const& account, Account const& from, std::uint32_t seq) { Json::Value jv; jv[jss::TransactionType] = jss::EscrowCancel; - jv[jss::Flags] = tfUniversal; jv[jss::Account] = to_string(account); jv[sfOwner.jsonName] = from.human(); jv[sfOfferSequence.jsonName] = seq; @@ -264,7 +261,6 @@ create( { Json::Value jv; jv[jss::TransactionType] = jss::PaymentChannelCreate; - jv[jss::Flags] = tfUniversal; jv[jss::Account] = to_string(account); jv[jss::Destination] = to_string(to); jv[jss::Amount] = amount.getJson(JsonOptions::none); @@ -286,7 +282,6 @@ fund( { Json::Value jv; jv[jss::TransactionType] = jss::PaymentChannelFund; - jv[jss::Flags] = tfUniversal; jv[jss::Account] = to_string(account); jv[sfChannel.fieldName] = to_string(channel); jv[jss::Amount] = amount.getJson(JsonOptions::none); @@ -306,7 +301,6 @@ claim( { Json::Value jv; jv[jss::TransactionType] = jss::PaymentChannelClaim; - jv[jss::Flags] = tfUniversal; jv[jss::Account] = to_string(account); jv["Channel"] = to_string(channel); if (amount) diff --git a/src/test/jtx/impl/acctdelete.cpp b/src/test/jtx/impl/acctdelete.cpp index 842eea7fc2..acce912d46 100644 --- a/src/test/jtx/impl/acctdelete.cpp +++ b/src/test/jtx/impl/acctdelete.cpp @@ -17,6 +17,7 @@ */ //============================================================================== +#include #include #include @@ -36,6 +37,28 @@ acctdelete(jtx::Account const& account, jtx::Account const& dest) return jv; } +// Close the ledger until the ledger sequence is large enough to close +// the account. If margin is specified, close the ledger so `margin` +// more closes are needed +void +incLgrSeqForAccDel(jtx::Env& env, jtx::Account const& acc, std::uint32_t margin) +{ + using namespace jtx; + auto openLedgerSeq = [](jtx::Env& env) -> std::uint32_t { + return env.current()->seq(); + }; + + int const delta = [&]() -> int { + if (env.seq(acc) + 255 > openLedgerSeq(env)) + return env.seq(acc) - openLedgerSeq(env) + 255 - margin; + return 0; + }(); + env.test.BEAST_EXPECT(margin == 0 || delta >= 0); + for (int i = 0; i < delta; ++i) + env.close(); + env.test.BEAST_EXPECT(openLedgerSeq(env) == env.seq(acc) + 255 - margin); +} + } // namespace jtx } // namespace test } // namespace ripple diff --git a/src/test/jtx/impl/batch.cpp b/src/test/jtx/impl/batch.cpp new file mode 100644 index 0000000000..055ed3fb55 --- /dev/null +++ b/src/test/jtx/impl/batch.cpp @@ -0,0 +1,154 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2024 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +#include +#include +#include +#include + +#include +#include + +namespace ripple { +namespace test { +namespace jtx { + +namespace batch { + +XRPAmount +calcBatchFee( + test::jtx::Env const& env, + uint32_t const& numSigners, + uint32_t const& txns) +{ + XRPAmount const feeDrops = env.current()->fees().base; + return ((numSigners + 2) * feeDrops) + feeDrops * txns; +} + +// Batch. +Json::Value +outer( + jtx::Account const& account, + uint32_t seq, + STAmount const& fee, + std::uint32_t flags) +{ + Json::Value jv; + jv[jss::TransactionType] = jss::Batch; + jv[jss::Account] = account.human(); + jv[jss::RawTransactions] = Json::Value{Json::arrayValue}; + jv[jss::Sequence] = seq; + jv[jss::Flags] = flags; + jv[jss::Fee] = to_string(fee); + return jv; +} + +void +inner::operator()(Env& env, JTx& jt) const +{ + auto const index = jt.jv[jss::RawTransactions].size(); + Json::Value& batchTransaction = jt.jv[jss::RawTransactions][index]; + + // Initialize the batch transaction + batchTransaction = Json::Value{}; + batchTransaction[jss::RawTransaction] = txn_; +} + +void +sig::operator()(Env& env, JTx& jt) const +{ + auto const mySigners = signers; + std::optional st; + try + { + // required to cast the STObject to STTx + jt.jv[jss::SigningPubKey] = ""; + st = parse(jt.jv); + } + catch (parse_error const&) + { + env.test.log << pretty(jt.jv) << std::endl; + Rethrow(); + } + STTx const& stx = STTx{std::move(*st)}; + auto& js = jt[sfBatchSigners.getJsonName()]; + for (std::size_t i = 0; i < mySigners.size(); ++i) + { + auto const& e = mySigners[i]; + auto& jo = js[i][sfBatchSigner.getJsonName()]; + jo[jss::Account] = e.acct.human(); + jo[jss::SigningPubKey] = strHex(e.sig.pk().slice()); + + Serializer msg; + serializeBatch(msg, stx.getFlags(), stx.getBatchTransactionIDs()); + auto const sig = ripple::sign( + *publicKeyType(e.sig.pk().slice()), e.sig.sk(), msg.slice()); + jo[sfTxnSignature.getJsonName()] = + strHex(Slice{sig.data(), sig.size()}); + } +} + +void +msig::operator()(Env& env, JTx& jt) const +{ + auto const mySigners = signers; + std::optional st; + try + { + // required to cast the STObject to STTx + jt.jv[jss::SigningPubKey] = ""; + st = parse(jt.jv); + } + catch (parse_error const&) + { + env.test.log << pretty(jt.jv) << std::endl; + Rethrow(); + } + STTx const& stx = STTx{std::move(*st)}; + auto& bs = jt[sfBatchSigners.getJsonName()]; + auto const index = jt[sfBatchSigners.jsonName].size(); + auto& bso = bs[index][sfBatchSigner.getJsonName()]; + bso[jss::Account] = master.human(); + bso[jss::SigningPubKey] = ""; + auto& is = bso[sfSigners.getJsonName()]; + for (std::size_t i = 0; i < mySigners.size(); ++i) + { + auto const& e = mySigners[i]; + auto& iso = is[i][sfSigner.getJsonName()]; + iso[jss::Account] = e.acct.human(); + iso[jss::SigningPubKey] = strHex(e.sig.pk().slice()); + + Serializer msg; + serializeBatch(msg, stx.getFlags(), stx.getBatchTransactionIDs()); + finishMultiSigningData(e.acct.id(), msg); + auto const sig = ripple::sign( + *publicKeyType(e.sig.pk().slice()), e.sig.sk(), msg.slice()); + iso[sfTxnSignature.getJsonName()] = + strHex(Slice{sig.data(), sig.size()}); + } +} + +} // namespace batch + +} // namespace jtx +} // namespace test +} // namespace ripple diff --git a/src/test/jtx/impl/check.cpp b/src/test/jtx/impl/check.cpp index f5aa76658c..831bc900e7 100644 --- a/src/test/jtx/impl/check.cpp +++ b/src/test/jtx/impl/check.cpp @@ -37,7 +37,6 @@ cash(jtx::Account const& dest, uint256 const& checkId, STAmount const& amount) jv[sfAmount.jsonName] = amount.getJson(JsonOptions::none); jv[sfCheckID.jsonName] = to_string(checkId); jv[sfTransactionType.jsonName] = jss::CheckCash; - jv[sfFlags.jsonName] = tfUniversal; return jv; } @@ -53,7 +52,6 @@ cash( jv[sfDeliverMin.jsonName] = atLeast.value.getJson(JsonOptions::none); jv[sfCheckID.jsonName] = to_string(checkId); jv[sfTransactionType.jsonName] = jss::CheckCash; - jv[sfFlags.jsonName] = tfUniversal; return jv; } @@ -65,7 +63,6 @@ cancel(jtx::Account const& dest, uint256 const& checkId) jv[sfAccount.jsonName] = dest.human(); jv[sfCheckID.jsonName] = to_string(checkId); jv[sfTransactionType.jsonName] = jss::CheckCancel; - jv[sfFlags.jsonName] = tfUniversal; return jv; } diff --git a/src/test/jtx/impl/creds.cpp b/src/test/jtx/impl/creds.cpp index f29bc45e20..eae3b9501b 100644 --- a/src/test/jtx/impl/creds.cpp +++ b/src/test/jtx/impl/creds.cpp @@ -39,8 +39,6 @@ create( jv[jss::Account] = issuer.human(); jv[jss::Subject] = subject.human(); - - jv[jss::Flags] = tfUniversal; jv[sfCredentialType.jsonName] = strHex(credType); return jv; @@ -57,8 +55,6 @@ accept( jv[jss::Account] = subject.human(); jv[jss::Issuer] = issuer.human(); jv[sfCredentialType.jsonName] = strHex(credType); - jv[jss::Flags] = tfUniversal; - return jv; } @@ -75,7 +71,6 @@ deleteCred( jv[jss::Subject] = subject.human(); jv[jss::Issuer] = issuer.human(); jv[sfCredentialType.jsonName] = strHex(credType); - jv[jss::Flags] = tfUniversal; return jv; } diff --git a/src/test/jtx/impl/dids.cpp b/src/test/jtx/impl/dids.cpp index 67a523403c..1b443a5d9d 100644 --- a/src/test/jtx/impl/dids.cpp +++ b/src/test/jtx/impl/dids.cpp @@ -35,7 +35,6 @@ set(jtx::Account const& account) Json::Value jv; jv[jss::TransactionType] = jss::DIDSet; jv[jss::Account] = to_string(account.id()); - jv[jss::Flags] = tfUniversal; return jv; } @@ -45,7 +44,6 @@ setValid(jtx::Account const& account) Json::Value jv; jv[jss::TransactionType] = jss::DIDSet; jv[jss::Account] = to_string(account.id()); - jv[jss::Flags] = tfUniversal; jv[sfURI.jsonName] = strHex(std::string{"uri"}); return jv; } @@ -56,7 +54,6 @@ del(jtx::Account const& account) Json::Value jv; jv[jss::TransactionType] = jss::DIDDelete; jv[jss::Account] = to_string(account.id()); - jv[jss::Flags] = tfUniversal; return jv; } diff --git a/src/test/jtx/impl/ledgerStateFixes.cpp b/src/test/jtx/impl/ledgerStateFixes.cpp index 8c78069191..b7df78dd11 100644 --- a/src/test/jtx/impl/ledgerStateFixes.cpp +++ b/src/test/jtx/impl/ledgerStateFixes.cpp @@ -39,7 +39,6 @@ nftPageLinks(jtx::Account const& acct, jtx::Account const& owner) jv[sfLedgerFixType.jsonName] = LedgerStateFix::nfTokenPageLink; jv[sfOwner.jsonName] = owner.human(); jv[sfTransactionType.jsonName] = jss::LedgerStateFix; - jv[sfFlags.jsonName] = tfUniversal; return jv; } diff --git a/src/test/jtx/impl/multisign.cpp b/src/test/jtx/impl/multisign.cpp index a802528247..6ed6df6804 100644 --- a/src/test/jtx/impl/multisign.cpp +++ b/src/test/jtx/impl/multisign.cpp @@ -65,17 +65,6 @@ signers(Account const& account, none_t) //------------------------------------------------------------------------------ -msig::msig(std::vector signers_) : signers(std::move(signers_)) -{ - // Signatures must be applied in sorted order. - std::sort( - signers.begin(), - signers.end(), - [](msig::Reg const& lhs, msig::Reg const& rhs) { - return lhs.acct.id() < rhs.acct.id(); - }); -} - void msig::operator()(Env& env, JTx& jt) const { diff --git a/src/test/jtx/impl/pay.cpp b/src/test/jtx/impl/pay.cpp index 82fe910e9b..d1d994059e 100644 --- a/src/test/jtx/impl/pay.cpp +++ b/src/test/jtx/impl/pay.cpp @@ -35,7 +35,7 @@ pay(AccountID const& account, AccountID const& to, AnyAmount amount) jv[jss::Amount] = amount.value.getJson(JsonOptions::none); jv[jss::Destination] = to_string(to); jv[jss::TransactionType] = jss::Payment; - jv[jss::Flags] = tfUniversal; + jv[jss::Flags] = tfFullyCanonicalSig; return jv; } Json::Value diff --git a/src/test/jtx/impl/txflags.cpp b/src/test/jtx/impl/txflags.cpp index 77c46f35b3..12c9cfeb83 100644 --- a/src/test/jtx/impl/txflags.cpp +++ b/src/test/jtx/impl/txflags.cpp @@ -28,7 +28,7 @@ namespace jtx { void txflags::operator()(Env&, JTx& jt) const { - jt[jss::Flags] = v_ /*| tfUniversal*/; + jt[jss::Flags] = v_ /*| tfFullyCanonicalSig*/; } } // namespace jtx diff --git a/src/test/jtx/impl/xchain_bridge.cpp b/src/test/jtx/impl/xchain_bridge.cpp index c63734ee8f..86e9deda7c 100644 --- a/src/test/jtx/impl/xchain_bridge.cpp +++ b/src/test/jtx/impl/xchain_bridge.cpp @@ -84,7 +84,6 @@ bridge_create( minAccountCreate->getJson(JsonOptions::none); jv[jss::TransactionType] = jss::XChainCreateBridge; - jv[jss::Flags] = tfUniversal; return jv; } @@ -107,7 +106,6 @@ bridge_modify( minAccountCreate->getJson(JsonOptions::none); jv[jss::TransactionType] = jss::XChainModifyBridge; - jv[jss::Flags] = tfUniversal; return jv; } @@ -126,7 +124,6 @@ xchain_create_claim_id( jv[sfOtherChainSource.getJsonName()] = otherChainSource.human(); jv[jss::TransactionType] = jss::XChainCreateClaimID; - jv[jss::Flags] = tfUniversal; return jv; } @@ -148,7 +145,6 @@ xchain_commit( jv[sfOtherChainDestination.getJsonName()] = dst->human(); jv[jss::TransactionType] = jss::XChainCommit; - jv[jss::Flags] = tfUniversal; return jv; } @@ -169,7 +165,6 @@ xchain_claim( jv[sfAmount.getJsonName()] = amt.value.getJson(JsonOptions::none); jv[jss::TransactionType] = jss::XChainClaim; - jv[jss::Flags] = tfUniversal; return jv; } @@ -191,7 +186,6 @@ sidechain_xchain_account_create( reward.value.getJson(JsonOptions::none); jv[jss::TransactionType] = jss::XChainAccountCreateCommit; - jv[jss::Flags] = tfUniversal; return jv; } @@ -242,7 +236,6 @@ claim_attestation( result[sfDestination.getJsonName()] = toBase58(*dst); result[jss::TransactionType] = jss::XChainAddClaimAttestation; - result[jss::Flags] = tfUniversal; return result; } @@ -297,7 +290,6 @@ create_account_attestation( rewardAmount.value.getJson(JsonOptions::none); result[jss::TransactionType] = jss::XChainAddAccountCreateAttestation; - result[jss::Flags] = tfUniversal; return result; } diff --git a/src/test/jtx/multisign.h b/src/test/jtx/multisign.h index 6bcb1a671c..1fed895c6d 100644 --- a/src/test/jtx/multisign.h +++ b/src/test/jtx/multisign.h @@ -21,6 +21,7 @@ #define RIPPLE_TEST_JTX_MULTISIGN_H_INCLUDED #include +#include #include #include #include @@ -65,48 +66,19 @@ signers(Account const& account, none_t); class msig { public: - struct Reg - { - Account acct; - Account sig; - - Reg(Account const& masterSig) : acct(masterSig), sig(masterSig) - { - } - - Reg(Account const& acct_, Account const& regularSig) - : acct(acct_), sig(regularSig) - { - } - - Reg(char const* masterSig) : acct(masterSig), sig(masterSig) - { - } - - Reg(char const* acct_, char const* regularSig) - : acct(acct_), sig(regularSig) - { - } - - bool - operator<(Reg const& rhs) const - { - return acct < rhs.acct; - } - }; - std::vector signers; -public: - msig(std::vector signers_); + msig(std::vector signers_) : signers(std::move(signers_)) + { + sortSigners(signers); + } template requires std::convertible_to explicit msig(AccountType&& a0, Accounts&&... aN) - : msig{std::vector{ - std::forward(a0), - std::forward(aN)...}} + : signers{std::forward(a0), std::forward(aN)...} { + sortSigners(signers); } void diff --git a/src/test/rpc/AccountLines_test.cpp b/src/test/rpc/AccountLines_test.cpp index 6e6f0def19..42acea4111 100644 --- a/src/test/rpc/AccountLines_test.cpp +++ b/src/test/rpc/AccountLines_test.cpp @@ -580,7 +580,6 @@ public: STAmount const& amount) { Json::Value jv; jv[jss::TransactionType] = jss::EscrowCreate; - jv[jss::Flags] = tfUniversal; jv[jss::Account] = account.human(); jv[jss::Destination] = to.human(); jv[jss::Amount] = amount.getJson(JsonOptions::none); @@ -596,7 +595,6 @@ public: PublicKey const& pk) { Json::Value jv; jv[jss::TransactionType] = jss::PaymentChannelCreate; - jv[jss::Flags] = tfUniversal; jv[jss::Account] = account.human(); jv[jss::Destination] = to.human(); jv[jss::Amount] = amount.getJson(JsonOptions::none); diff --git a/src/test/rpc/AccountObjects_test.cpp b/src/test/rpc/AccountObjects_test.cpp index b723095aeb..7a48db73bd 100644 --- a/src/test/rpc/AccountObjects_test.cpp +++ b/src/test/rpc/AccountObjects_test.cpp @@ -698,7 +698,6 @@ public: // gw creates an escrow that we can look for in the ledger. Json::Value jvEscrow; jvEscrow[jss::TransactionType] = jss::EscrowCreate; - jvEscrow[jss::Flags] = tfUniversal; jvEscrow[jss::Account] = gw.human(); jvEscrow[jss::Destination] = gw.human(); jvEscrow[jss::Amount] = XRP(100).value().getJson(JsonOptions::none); @@ -912,7 +911,6 @@ public: // for. Json::Value jvPayChan; jvPayChan[jss::TransactionType] = jss::PaymentChannelCreate; - jvPayChan[jss::Flags] = tfUniversal; jvPayChan[jss::Account] = gw.human(); jvPayChan[jss::Destination] = alice.human(); jvPayChan[jss::Amount] = @@ -938,7 +936,6 @@ public: // gw creates a DID that we can look for in the ledger. Json::Value jvDID; jvDID[jss::TransactionType] = jss::DIDSet; - jvDID[jss::Flags] = tfUniversal; jvDID[jss::Account] = gw.human(); jvDID[sfURI.jsonName] = strHex(std::string{"uri"}); env(jvDID); diff --git a/src/test/rpc/AccountTx_test.cpp b/src/test/rpc/AccountTx_test.cpp index 9af3fdcb61..6e25c26e58 100644 --- a/src/test/rpc/AccountTx_test.cpp +++ b/src/test/rpc/AccountTx_test.cpp @@ -458,7 +458,6 @@ class AccountTx_test : public beast::unit_test::suite STAmount const& amount) { Json::Value escro; escro[jss::TransactionType] = jss::EscrowCreate; - escro[jss::Flags] = tfUniversal; escro[jss::Account] = account.human(); escro[jss::Destination] = to.human(); escro[jss::Amount] = amount.getJson(JsonOptions::none); @@ -487,7 +486,6 @@ class AccountTx_test : public beast::unit_test::suite { Json::Value escrowFinish; escrowFinish[jss::TransactionType] = jss::EscrowFinish; - escrowFinish[jss::Flags] = tfUniversal; escrowFinish[jss::Account] = alice.human(); escrowFinish[sfOwner.jsonName] = alice.human(); escrowFinish[sfOfferSequence.jsonName] = escrowFinishSeq; @@ -496,7 +494,6 @@ class AccountTx_test : public beast::unit_test::suite { Json::Value escrowCancel; escrowCancel[jss::TransactionType] = jss::EscrowCancel; - escrowCancel[jss::Flags] = tfUniversal; escrowCancel[jss::Account] = alice.human(); escrowCancel[sfOwner.jsonName] = alice.human(); escrowCancel[sfOfferSequence.jsonName] = escrowCancelSeq; @@ -510,7 +507,6 @@ class AccountTx_test : public beast::unit_test::suite std::uint32_t payChanSeq{env.seq(alice)}; Json::Value payChanCreate; payChanCreate[jss::TransactionType] = jss::PaymentChannelCreate; - payChanCreate[jss::Flags] = tfUniversal; payChanCreate[jss::Account] = alice.human(); payChanCreate[jss::Destination] = gw.human(); payChanCreate[jss::Amount] = @@ -527,7 +523,6 @@ class AccountTx_test : public beast::unit_test::suite { Json::Value payChanFund; payChanFund[jss::TransactionType] = jss::PaymentChannelFund; - payChanFund[jss::Flags] = tfUniversal; payChanFund[jss::Account] = alice.human(); payChanFund[sfChannel.jsonName] = payChanIndex; payChanFund[jss::Amount] = diff --git a/src/test/rpc/JSONRPC_test.cpp b/src/test/rpc/JSONRPC_test.cpp index cd26758c1f..22c7dfd1dc 100644 --- a/src/test/rpc/JSONRPC_test.cpp +++ b/src/test/rpc/JSONRPC_test.cpp @@ -2132,6 +2132,127 @@ public: result[jss::result][jss::request][jss::command] == "bad_command"); } + void + testAutoFillFails() + { + testcase("autofill fails"); + using namespace test::jtx; + + // test batch raw transactions max size + { + Env env(*this); + auto ledger = env.current(); + auto const& feeTrack = env.app().getFeeTrack(); + Json::Value req; + Account const alice("alice"); + Account const bob("bob"); + env.fund(XRP(100000), alice); + env.close(); + + auto const batchFee = batch::calcBatchFee(env, 0, 2); + auto const seq = env.seq(alice); + auto jt = env.jtnofill( + batch::outer(alice, env.seq(alice), batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + batch::inner(pay(alice, bob, XRP(2)), seq + 2), + batch::inner(pay(alice, bob, XRP(3)), seq + 3), + batch::inner(pay(alice, bob, XRP(4)), seq + 4), + batch::inner(pay(alice, bob, XRP(5)), seq + 5), + batch::inner(pay(alice, bob, XRP(6)), seq + 6), + batch::inner(pay(alice, bob, XRP(7)), seq + 7), + batch::inner(pay(alice, bob, XRP(8)), seq + 8), + batch::inner(pay(alice, bob, XRP(9)), seq + 9)); + + jt.jv.removeMember(jss::Fee); + jt.jv.removeMember(jss::TxnSignature); + req[jss::tx_json] = jt.jv; + Json::Value result = checkFee( + req, + Role::ADMIN, + true, + env.app().config(), + feeTrack, + env.app().getTxQ(), + env.app()); + BEAST_EXPECT(result.size() == 0); + BEAST_EXPECT( + req[jss::tx_json].isMember(jss::Fee) && + req[jss::tx_json][jss::Fee] == + env.current()->fees().base.jsonClipped()); + } + + // test signers max size + { + Env env(*this); + auto ledger = env.current(); + auto const& feeTrack = env.app().getFeeTrack(); + Json::Value req; + Account const alice("alice"); + Account const bob("bob"); + env.fund(XRP(100000), alice, bob); + env.close(); + + auto jt = env.jtnofill( + noop(alice), + msig( + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice, + alice)); + + req[jss::tx_json] = jt.jv; + Json::Value result = checkFee( + req, + Role::ADMIN, + true, + env.app().config(), + feeTrack, + env.app().getTxQ(), + env.app()); + BEAST_EXPECT(result.size() == 0); + BEAST_EXPECT( + req[jss::tx_json].isMember(jss::Fee) && + req[jss::tx_json][jss::Fee] == + env.current()->fees().base.jsonClipped()); + } + } + void testAutoFillFees() { @@ -2785,6 +2906,7 @@ public: run() override { testBadRpcCommand(); + testAutoFillFails(); testAutoFillFees(); testAutoFillEscalatedFees(); testAutoFillNetworkID(); diff --git a/src/test/rpc/LedgerData_test.cpp b/src/test/rpc/LedgerData_test.cpp index b56cb241dd..c2b22efc00 100644 --- a/src/test/rpc/LedgerData_test.cpp +++ b/src/test/rpc/LedgerData_test.cpp @@ -369,7 +369,6 @@ public: { Json::Value jv; jv[jss::TransactionType] = jss::EscrowCreate; - jv[jss::Flags] = tfUniversal; jv[jss::Account] = Account{"bob5"}.human(); jv[jss::Destination] = Account{"bob6"}.human(); jv[jss::Amount] = XRP(50).value().getJson(JsonOptions::none); @@ -383,7 +382,6 @@ public: { Json::Value jv; jv[jss::TransactionType] = jss::PaymentChannelCreate; - jv[jss::Flags] = tfUniversal; jv[jss::Account] = Account{"bob6"}.human(); jv[jss::Destination] = Account{"bob7"}.human(); jv[jss::Amount] = XRP(100).value().getJson(JsonOptions::none); diff --git a/src/test/rpc/LedgerEntry_test.cpp b/src/test/rpc/LedgerEntry_test.cpp index cb6f6d45e2..83232f79c8 100644 --- a/src/test/rpc/LedgerEntry_test.cpp +++ b/src/test/rpc/LedgerEntry_test.cpp @@ -1259,7 +1259,6 @@ class LedgerEntry_test : public beast::unit_test::suite NetClock::time_point const& cancelAfter) { Json::Value jv; jv[jss::TransactionType] = jss::EscrowCreate; - jv[jss::Flags] = tfUniversal; jv[jss::Account] = account.human(); jv[jss::Destination] = to.human(); jv[jss::Amount] = amount.getJson(JsonOptions::none); diff --git a/src/test/rpc/Simulate_test.cpp b/src/test/rpc/Simulate_test.cpp index f27f0c2915..a4360ccc8b 100644 --- a/src/test/rpc/Simulate_test.cpp +++ b/src/test/rpc/Simulate_test.cpp @@ -465,6 +465,36 @@ class Simulate_test : public beast::unit_test::suite } } + void + testInvalidTransactionType() + { + testcase("Invalid transaction type"); + + using namespace jtx; + + Env env(*this); + + Account const alice{"alice"}; + Account const bob{"bob"}; + env.fund(XRP(1000000), alice, bob); + env.close(); + + auto const batchFee = batch::calcBatchFee(env, 0, 2); + auto const seq = env.seq(alice); + auto jt = env.jtnofill( + batch::outer(alice, env.seq(alice), batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), seq + 1), + batch::inner(pay(alice, bob, XRP(10)), seq + 1)); + + jt.jv.removeMember(jss::TxnSignature); + Json::Value params; + params[jss::tx_json] = jt.jv; + auto const resp = env.rpc("json", "simulate", to_string(params)); + BEAST_EXPECT(resp[jss::result][jss::error] == "notImpl"); + BEAST_EXPECT( + resp[jss::result][jss::error_message] == "Not implemented."); + } + void testSuccessfulTransaction() { @@ -1081,6 +1111,7 @@ public: { testParamErrors(); testFeeError(); + testInvalidTransactionType(); testSuccessfulTransaction(); testTransactionNonTecFailure(); testTransactionTecFailure(); diff --git a/src/xrpld/app/ledger/detail/BuildLedger.cpp b/src/xrpld/app/ledger/detail/BuildLedger.cpp index 954507a006..4305426753 100644 --- a/src/xrpld/app/ledger/detail/BuildLedger.cpp +++ b/src/xrpld/app/ledger/detail/BuildLedger.cpp @@ -208,11 +208,17 @@ buildLedger( applyTransactions(app, built, txns, failedTxns, accum, j); if (!txns.empty() || !failedTxns.empty()) - JLOG(j.debug()) << "Applied " << applied << " transactions; " - << failedTxns.size() << " failed and " - << txns.size() << " will be retried."; + JLOG(j.debug()) + << "Applied " << applied << " transactions; " + << failedTxns.size() << " failed and " << txns.size() + << " will be retried. " + << "Total transactions in ledger (including Inner Batch): " + << accum.txCount(); else - JLOG(j.debug()) << "Applied " << applied << " transactions."; + JLOG(j.debug()) + << "Applied " << applied << " transactions. " + << "Total transactions in ledger (including Inner Batch): " + << accum.txCount(); }); } diff --git a/src/xrpld/app/ledger/detail/OpenLedger.cpp b/src/xrpld/app/ledger/detail/OpenLedger.cpp index 86a3b4b840..2c98caaa6d 100644 --- a/src/xrpld/app/ledger/detail/OpenLedger.cpp +++ b/src/xrpld/app/ledger/detail/OpenLedger.cpp @@ -26,6 +26,8 @@ #include #include +#include + #include namespace ripple { @@ -120,6 +122,18 @@ OpenLedger::accept( { auto const& tx = txpair.first; auto const txId = tx->getTransactionID(); + + // skip batch txns + // LCOV_EXCL_START + if (tx->isFlag(tfInnerBatchTxn) && rules.enabled(featureBatch)) + { + XRPL_ASSERT( + txpair.second && txpair.second->isFieldPresent(sfParentBatchID), + "Inner Batch transaction missing sfParentBatchID"); + continue; + } + // LCOV_EXCL_STOP + if (auto const toSkip = app.getHashRouter().shouldRelay(txId)) { JLOG(j_.debug()) << "Relaying recovered tx " << txId; diff --git a/src/xrpld/app/misc/NetworkOPs.cpp b/src/xrpld/app/misc/NetworkOPs.cpp index d87dea3c52..c8197b2219 100644 --- a/src/xrpld/app/misc/NetworkOPs.cpp +++ b/src/xrpld/app/misc/NetworkOPs.cpp @@ -64,6 +64,7 @@ #include #include #include +#include #include #include #include @@ -1190,6 +1191,15 @@ NetworkOPsImp::submitTransaction(std::shared_ptr const& iTrans) return; } + // Enforce Network bar for batch txn + if (iTrans->isFlag(tfInnerBatchTxn) && + m_ledgerMaster.getValidatedRules().enabled(featureBatch)) + { + JLOG(m_journal.error()) + << "Submitted transaction invalid: tfInnerBatchTxn flag present."; + return; + } + // this is an asynchronous interface auto const trans = sterilize(*iTrans); @@ -1249,15 +1259,25 @@ NetworkOPsImp::preProcessTransaction(std::shared_ptr& transaction) return false; } + auto const view = m_ledgerMaster.getCurrentLedger(); + + // This function is called by several different parts of the codebase + // under no circumstances will we ever accept an inner txn within a batch + // txn from the network. + auto const sttx = *transaction->getSTransaction(); + if (sttx.isFlag(tfInnerBatchTxn) && view->rules().enabled(featureBatch)) + { + transaction->setStatus(INVALID); + transaction->setResult(temINVALID_FLAG); + app_.getHashRouter().setFlags(transaction->getID(), SF_BAD); + return false; + } + // NOTE eahennis - I think this check is redundant, // but I'm not 100% sure yet. // If so, only cost is looking up HashRouter flags. - auto const view = m_ledgerMaster.getCurrentLedger(); - auto const [validity, reason] = checkValidity( - app_.getHashRouter(), - *transaction->getSTransaction(), - view->rules(), - app_.config()); + auto const [validity, reason] = + checkValidity(app_.getHashRouter(), sttx, view->rules(), app_.config()); XRPL_ASSERT( validity == Validity::Valid, "ripple::NetworkOPsImp::processTransaction : valid validity"); @@ -1659,13 +1679,17 @@ NetworkOPsImp::apply(std::unique_lock& batchLock) { auto const toSkip = app_.getHashRouter().shouldRelay(e.transaction->getID()); - - if (toSkip) + if (auto const sttx = *(e.transaction->getSTransaction()); + toSkip && + // Skip relaying if it's an inner batch txn and batch + // feature is enabled + !(sttx.isFlag(tfInnerBatchTxn) && + newOL->rules().enabled(featureBatch))) { protocol::TMTransaction tx; Serializer s; - e.transaction->getSTransaction()->add(s); + sttx.add(s); tx.set_rawtransaction(s.data(), s.size()); tx.set_status(protocol::tsCURRENT); tx.set_receivetimestamp( @@ -1677,7 +1701,7 @@ NetworkOPsImp::apply(std::unique_lock& batchLock) } } - if (validatedLedgerIndex) + if (!isTemMalformed(e.result) && validatedLedgerIndex) { auto [fee, accountSeq, availableSeq] = app_.getTxQ().getTxRequiredFeeAndSeq( @@ -3020,6 +3044,11 @@ NetworkOPsImp::pubProposedTransaction( std::shared_ptr const& transaction, TER result) { + // never publish an inner txn inside a batch txn + if (transaction->isFlag(tfInnerBatchTxn) && + ledger->rules().enabled(featureBatch)) + return; + MultiApiJson jvObj = transJson(transaction, result, false, ledger, std::nullopt); diff --git a/src/xrpld/app/misc/detail/TxQ.cpp b/src/xrpld/app/misc/detail/TxQ.cpp index adf96d0e14..6924dae6c8 100644 --- a/src/xrpld/app/misc/detail/TxQ.cpp +++ b/src/xrpld/app/misc/detail/TxQ.cpp @@ -737,6 +737,13 @@ TxQ::apply( STAmountSO stAmountSO{view.rules().enabled(fixSTAmountCanonicalize)}; NumberSO stNumberSO{view.rules().enabled(fixUniversalNumber)}; + // See if the transaction is valid, properly formed, + // etc. before doing potentially expensive queue + // replace and multi-transaction operations. + auto const pfresult = preflight(app, view.rules(), *tx, flags, j); + if (pfresult.ter != tesSUCCESS) + return {pfresult.ter, false}; + // See if the transaction paid a high enough fee that it can go straight // into the ledger. if (auto directApplied = tryDirectApply(app, view, tx, flags, j)) @@ -749,13 +756,6 @@ TxQ::apply( // o The transaction paid a high enough fee that fee averaging will apply. // o The transaction will be queued. - // See if the transaction is valid, properly formed, - // etc. before doing potentially expensive queue - // replace and multi-transaction operations. - auto const pfresult = preflight(app, view.rules(), *tx, flags, j); - if (pfresult.ter != tesSUCCESS) - return {pfresult.ter, false}; - // If the account is not currently in the ledger, don't queue its tx. auto const account = (*tx)[sfAccount]; Keylet const accountKey{keylet::account(account)}; diff --git a/src/xrpld/app/tx/applySteps.h b/src/xrpld/app/tx/applySteps.h index 2a5557ff4b..ec7180e263 100644 --- a/src/xrpld/app/tx/applySteps.h +++ b/src/xrpld/app/tx/applySteps.h @@ -165,6 +165,8 @@ struct PreflightResult public: /// From the input - the transaction STTx const& tx; + /// From the input - the batch identifier, if part of a batch + std::optional const parentBatchId; /// From the input - the rules Rules const rules; /// Consequences of the transaction @@ -183,6 +185,7 @@ public: Context const& ctx_, std::pair const& result) : tx(ctx_.tx) + , parentBatchId(ctx_.parentBatchId) , rules(ctx_.rules) , consequences(result.second) , flags(ctx_.flags) @@ -210,6 +213,8 @@ public: ReadView const& view; /// From the input - the transaction STTx const& tx; + /// From the input - the batch identifier, if part of a batch + std::optional const parentBatchId; /// From the input - the flags ApplyFlags const flags; /// From the input - the journal @@ -217,6 +222,7 @@ public: /// Intermediate transaction result TER const ter; + /// Success flag - whether the transaction is likely to /// claim a fee bool const likelyToClaimFee; @@ -226,6 +232,7 @@ public: PreclaimResult(Context const& ctx_, TER ter_) : view(ctx_.view) , tx(ctx_.tx) + , parentBatchId(ctx_.parentBatchId) , flags(ctx_.flags) , j(ctx_.j) , ter(ter_) @@ -255,6 +262,7 @@ public: @return A `PreflightResult` object containing, among other things, the `TER` code. */ +/** @{ */ PreflightResult preflight( Application& app, @@ -263,6 +271,16 @@ preflight( ApplyFlags flags, beast::Journal j); +PreflightResult +preflight( + Application& app, + Rules const& rules, + uint256 const& parentBatchId, + STTx const& tx, + ApplyFlags flags, + beast::Journal j); +/** @} */ + /** Gate a transaction based on static ledger information. The transaction is checked against all possible diff --git a/src/xrpld/app/tx/detail/ApplyContext.cpp b/src/xrpld/app/tx/detail/ApplyContext.cpp index 71fe246f15..79cbb7f40d 100644 --- a/src/xrpld/app/tx/detail/ApplyContext.cpp +++ b/src/xrpld/app/tx/detail/ApplyContext.cpp @@ -29,6 +29,7 @@ namespace ripple { ApplyContext::ApplyContext( Application& app_, OpenView& base, + std::optional const& parentBatchId, STTx const& tx_, TER preclaimResult_, XRPAmount baseFee_, @@ -41,7 +42,11 @@ ApplyContext::ApplyContext( , journal(journal_) , base_(base) , flags_(flags) + , parentBatchId_(parentBatchId) { + XRPL_ASSERT( + parentBatchId.has_value() == ((flags_ & tapBATCH) == tapBATCH), + "Parent Batch ID should be set if batch apply flag is set"); view_.emplace(&base_, flags_); } @@ -54,7 +59,8 @@ ApplyContext::discard() std::optional ApplyContext::apply(TER ter) { - return view_->apply(base_, tx, ter, flags_ & tapDRY_RUN, journal); + return view_->apply( + base_, tx, ter, parentBatchId_, flags_ & tapDRY_RUN, journal); } std::size_t diff --git a/src/xrpld/app/tx/detail/ApplyContext.h b/src/xrpld/app/tx/detail/ApplyContext.h index 715d4ea471..720d0aeea3 100644 --- a/src/xrpld/app/tx/detail/ApplyContext.h +++ b/src/xrpld/app/tx/detail/ApplyContext.h @@ -39,11 +39,34 @@ public: explicit ApplyContext( Application& app, OpenView& base, + std::optional const& parentBatchId, STTx const& tx, TER preclaimResult, XRPAmount baseFee, ApplyFlags flags, - beast::Journal = beast::Journal{beast::Journal::getNullSink()}); + beast::Journal journal = beast::Journal{beast::Journal::getNullSink()}); + + explicit ApplyContext( + Application& app, + OpenView& base, + STTx const& tx, + TER preclaimResult, + XRPAmount baseFee, + ApplyFlags flags, + beast::Journal journal = beast::Journal{beast::Journal::getNullSink()}) + : ApplyContext( + app, + base, + std::nullopt, + tx, + preclaimResult, + baseFee, + flags, + journal) + { + XRPL_ASSERT( + (flags & tapBATCH) == 0, "Batch apply flag should not be set"); + } Application& app; STTx const& tx; @@ -131,6 +154,9 @@ private: OpenView& base_; ApplyFlags flags_; std::optional view_; + + // The ID of the batch transaction we are executing under, if seated. + std::optional parentBatchId_; }; } // namespace ripple diff --git a/src/xrpld/app/tx/detail/Batch.cpp b/src/xrpld/app/tx/detail/Batch.cpp new file mode 100644 index 0000000000..dcac889a5a --- /dev/null +++ b/src/xrpld/app/tx/detail/Batch.cpp @@ -0,0 +1,482 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2024 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace ripple { + +/** + * @brief Calculates the total base fee for a batch transaction. + * + * This function computes the required base fee for a batch transaction, + * including the base fee for the batch itself, the sum of base fees for + * all inner transactions, and additional fees for each batch signer. + * It performs overflow checks and validates the structure of the batch + * and its signers. + * + * @param view The ledger view providing fee and state information. + * @param tx The batch transaction to calculate the fee for. + * @return XRPAmount The total base fee required for the batch transaction. + * + * @throws std::overflow_error If any fee calculation would overflow the + * XRPAmount type. + * @throws std::length_error If the number of inner transactions or signers + * exceeds the allowed maximum. + * @throws std::invalid_argument If an inner transaction is itself a batch + * transaction. + */ +XRPAmount +Batch::calculateBaseFee(ReadView const& view, STTx const& tx) +{ + XRPAmount const maxAmount{ + std::numeric_limits::max()}; + + // batchBase: view.fees().base for batch processing + default base fee + XRPAmount const baseFee = Transactor::calculateBaseFee(view, tx); + + // LCOV_EXCL_START + if (baseFee > maxAmount - view.fees().base) + throw std::overflow_error("XRPAmount overflow"); + // LCOV_EXCL_STOP + + XRPAmount const batchBase = view.fees().base + baseFee; + + // Calculate the Inner Txn Fees + XRPAmount txnFees{0}; + if (tx.isFieldPresent(sfRawTransactions)) + { + auto const& txns = tx.getFieldArray(sfRawTransactions); + + XRPL_ASSERT( + txns.size() <= maxBatchTxCount, + "Raw Transactions array exceeds max entries."); + + // LCOV_EXCL_START + if (txns.size() > maxBatchTxCount) + throw std::length_error( + "Raw Transactions array exceeds max entries"); + // LCOV_EXCL_STOP + + for (STObject txn : txns) + { + STTx const stx = STTx{std::move(txn)}; + + XRPL_ASSERT( + stx.getTxnType() != ttBATCH, "Inner Batch transaction found."); + + // LCOV_EXCL_START + if (stx.getTxnType() == ttBATCH) + throw std::invalid_argument("Inner Batch transaction found"); + // LCOV_EXCL_STOP + + auto const fee = ripple::calculateBaseFee(view, stx); + // LCOV_EXCL_START + if (txnFees > maxAmount - fee) + throw std::overflow_error("XRPAmount overflow"); + // LCOV_EXCL_STOP + txnFees += fee; + } + } + + // Calculate the Signers/BatchSigners Fees + std::int32_t signerCount = 0; + if (tx.isFieldPresent(sfBatchSigners)) + { + auto const& signers = tx.getFieldArray(sfBatchSigners); + XRPL_ASSERT( + signers.size() <= maxBatchTxCount, + "Batch Signers array exceeds max entries."); + + // LCOV_EXCL_START + if (signers.size() > maxBatchTxCount) + throw std::length_error("Batch Signers array exceeds max entries"); + // LCOV_EXCL_STOP + + for (STObject const& signer : signers) + { + if (signer.isFieldPresent(sfTxnSignature)) + signerCount += 1; + else if (signer.isFieldPresent(sfSigners)) + signerCount += signer.getFieldArray(sfSigners).size(); + } + } + + // LCOV_EXCL_START + if (signerCount > 0 && view.fees().base > maxAmount / signerCount) + throw std::overflow_error("XRPAmount overflow"); + // LCOV_EXCL_STOP + + XRPAmount signerFees = signerCount * view.fees().base; + + // LCOV_EXCL_START + if (signerFees > maxAmount - txnFees) + throw std::overflow_error("XRPAmount overflow"); + if (txnFees + signerFees > maxAmount - batchBase) + throw std::overflow_error("XRPAmount overflow"); + // LCOV_EXCL_STOP + + // 10 drops per batch signature + sum of inner tx fees + batchBase + return signerFees + txnFees + batchBase; +} + +/** + * @brief Performs preflight validation checks for a Batch transaction. + * + * This function validates the structure and contents of a Batch transaction + * before it is processed. It ensures that the Batch feature is enabled, + * checks for valid flags, validates the number and uniqueness of inner + * transactions, and enforces correct signing and fee requirements. + * + * The following validations are performed: + * - The Batch feature must be enabled in the current rules. + * - Only one of the mutually exclusive batch flags must be set. + * - The batch must contain at least two and no more than the maximum allowed + * inner transactions. + * - Each inner transaction must: + * - Be unique within the batch. + * - Not itself be a Batch transaction. + * - Have the tfInnerBatchTxn flag set. + * - Not include a TxnSignature or Signers field. + * - Have an empty SigningPubKey. + * - Pass its own preflight checks. + * - Have a fee of zero. + * - Have either Sequence or TicketSequence set, but not both or neither. + * - Not duplicate Sequence or TicketSequence values for the same account (for + * certain flags). + * - Validates that all required inner transaction accounts are present in the + * batch signers array, and that all batch signers are unique and not the outer + * account. + * - Verifies the batch signature if batch signers are present. + * + * @param ctx The PreflightContext containing the transaction and environment. + * @return NotTEC Returns tesSUCCESS if all checks pass, or an appropriate error + * code otherwise. + */ +NotTEC +Batch::preflight(PreflightContext const& ctx) +{ + if (!ctx.rules.enabled(featureBatch)) + return temDISABLED; + + if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) + return ret; + + auto const parentBatchId = ctx.tx.getTransactionID(); + auto const outerAccount = ctx.tx.getAccountID(sfAccount); + auto const flags = ctx.tx.getFlags(); + + if (flags & tfBatchMask) + { + JLOG(ctx.j.debug()) << "BatchTrace[" << parentBatchId << "]:" + << "invalid flags."; + return temINVALID_FLAG; + } + + if (std::popcount( + flags & + (tfAllOrNothing | tfOnlyOne | tfUntilFailure | tfIndependent)) != 1) + { + JLOG(ctx.j.debug()) << "BatchTrace[" << parentBatchId << "]:" + << "too many flags."; + return temINVALID_FLAG; + } + + auto const& rawTxns = ctx.tx.getFieldArray(sfRawTransactions); + if (rawTxns.size() <= 1) + { + JLOG(ctx.j.debug()) << "BatchTrace[" << parentBatchId << "]:" + << "txns array must have at least 2 entries."; + return temARRAY_EMPTY; + } + + if (rawTxns.size() > maxBatchTxCount) + { + JLOG(ctx.j.debug()) << "BatchTrace[" << parentBatchId << "]:" + << "txns array exceeds 8 entries."; + return temARRAY_TOO_LARGE; + } + + // Validation Inner Batch Txns + std::unordered_set requiredSigners; + std::unordered_set uniqueHashes; + std::unordered_map> + accountSeqTicket; + for (STObject rb : rawTxns) + { + STTx const stx = STTx{std::move(rb)}; + auto const hash = stx.getTransactionID(); + if (!uniqueHashes.emplace(hash).second) + { + JLOG(ctx.j.debug()) << "BatchTrace[" << parentBatchId << "]: " + << "duplicate Txn found. " + << "txID: " << hash; + return temREDUNDANT; + } + + if (stx.getFieldU16(sfTransactionType) == ttBATCH) + { + JLOG(ctx.j.debug()) << "BatchTrace[" << parentBatchId << "]: " + << "batch cannot have an inner batch txn. " + << "txID: " << hash; + return temINVALID; + } + + if (!(stx.getFlags() & tfInnerBatchTxn)) + { + JLOG(ctx.j.debug()) + << "BatchTrace[" << parentBatchId << "]: " + << "inner txn must have the tfInnerBatchTxn flag. " + << "txID: " << hash; + return temINVALID_FLAG; + } + + if (stx.isFieldPresent(sfTxnSignature)) + { + JLOG(ctx.j.debug()) << "BatchTrace[" << parentBatchId << "]: " + << "inner txn cannot include TxnSignature. " + << "txID: " << hash; + return temBAD_SIGNATURE; + } + + if (stx.isFieldPresent(sfSigners)) + { + JLOG(ctx.j.debug()) << "BatchTrace[" << parentBatchId << "]: " + << "inner txn cannot include Signers. " + << "txID: " << hash; + return temBAD_SIGNER; + } + + if (!stx.getSigningPubKey().empty()) + { + JLOG(ctx.j.debug()) << "BatchTrace[" << parentBatchId << "]: " + << "inner txn SigningPubKey must be empty. " + << "txID: " << hash; + return temBAD_REGKEY; + } + + auto const innerAccount = stx.getAccountID(sfAccount); + if (auto const preflightResult = ripple::preflight( + ctx.app, ctx.rules, parentBatchId, stx, tapBATCH, ctx.j); + preflightResult.ter != tesSUCCESS) + { + JLOG(ctx.j.debug()) << "BatchTrace[" << parentBatchId << "]: " + << "inner txn preflight failed: " + << transHuman(preflightResult.ter) << " " + << "txID: " << hash; + return temINVALID_INNER_BATCH; + } + + // Check that the fee is zero + if (auto const fee = stx.getFieldAmount(sfFee); + !fee.native() || fee.xrp() != beast::zero) + { + JLOG(ctx.j.debug()) << "BatchTrace[" << parentBatchId << "]: " + << "inner txn must have a fee of 0. " + << "txID: " << hash; + return temBAD_FEE; + } + + // Check that Sequence and TicketSequence are not both present + if (stx.isFieldPresent(sfTicketSequence) && + stx.getFieldU32(sfSequence) != 0) + { + JLOG(ctx.j.debug()) + << "BatchTrace[" << parentBatchId << "]: " + << "inner txn must have exactly one of Sequence and " + "TicketSequence. " + << "txID: " << hash; + return temSEQ_AND_TICKET; + } + + // Verify that either Sequence or TicketSequence is present + if (!stx.isFieldPresent(sfTicketSequence) && + stx.getFieldU32(sfSequence) == 0) + { + JLOG(ctx.j.debug()) << "BatchTrace[" << parentBatchId << "]: " + << "inner txn must have either Sequence or " + "TicketSequence. " + << "txID: " << hash; + return temSEQ_AND_TICKET; + } + + // Duplicate sequence and ticket checks + if (flags & (tfAllOrNothing | tfUntilFailure)) + { + if (auto const seq = stx.getFieldU32(sfSequence); seq != 0) + { + if (!accountSeqTicket[innerAccount].insert(seq).second) + { + JLOG(ctx.j.debug()) + << "BatchTrace[" << parentBatchId << "]: " + << "duplicate sequence found: " + << "txID: " << hash; + return temREDUNDANT; + } + } + + if (stx.isFieldPresent(sfTicketSequence)) + { + if (auto const ticket = stx.getFieldU32(sfTicketSequence); + !accountSeqTicket[innerAccount].insert(ticket).second) + { + JLOG(ctx.j.debug()) + << "BatchTrace[" << parentBatchId << "]: " + << "duplicate ticket found: " + << "txID: " << hash; + return temREDUNDANT; + } + } + } + + // If the inner account is the same as the outer account, do not add the + // inner account to the required signers set. + if (innerAccount != outerAccount) + requiredSigners.insert(innerAccount); + } + + // LCOV_EXCL_START + if (auto const ret = preflight2(ctx); !isTesSuccess(ret)) + return ret; + // LCOV_EXCL_STOP + + // Validation Batch Signers + std::unordered_set batchSigners; + if (ctx.tx.isFieldPresent(sfBatchSigners)) + { + STArray const& signers = ctx.tx.getFieldArray(sfBatchSigners); + + // Check that the batch signers array is not too large. + if (signers.size() > maxBatchTxCount) + { + JLOG(ctx.j.debug()) << "BatchTrace[" << parentBatchId << "]: " + << "signers array exceeds 8 entries."; + return temARRAY_TOO_LARGE; + } + + // Add batch signers to the set to ensure all signer accounts are + // unique. Meanwhile, remove signer accounts from the set of inner + // transaction accounts (`requiredSigners`). By the end of the loop, + // `requiredSigners` should be empty, indicating that all inner + // accounts are matched with signers. + for (auto const& signer : signers) + { + AccountID const signerAccount = signer.getAccountID(sfAccount); + if (signerAccount == outerAccount) + { + JLOG(ctx.j.debug()) + << "BatchTrace[" << parentBatchId << "]: " + << "signer cannot be the outer account: " << signerAccount; + return temBAD_SIGNER; + } + + if (!batchSigners.insert(signerAccount).second) + { + JLOG(ctx.j.debug()) + << "BatchTrace[" << parentBatchId << "]: " + << "duplicate signer found: " << signerAccount; + return temREDUNDANT; + } + + // Check that the batch signer is in the required signers set. + // Remove it if it does, as it can be crossed off the list. + if (requiredSigners.erase(signerAccount) == 0) + { + JLOG(ctx.j.debug()) << "BatchTrace[" << parentBatchId << "]: " + << "no account signature for inner txn."; + return temBAD_SIGNER; + } + } + + // Check the batch signers signatures. + auto const sigResult = ctx.tx.checkBatchSign( + STTx::RequireFullyCanonicalSig::yes, ctx.rules); + + if (!sigResult) + { + JLOG(ctx.j.debug()) + << "BatchTrace[" << parentBatchId << "]: " + << "invalid batch txn signature: " << sigResult.error(); + return temBAD_SIGNATURE; + } + } + + if (!requiredSigners.empty()) + { + JLOG(ctx.j.debug()) << "BatchTrace[" << parentBatchId << "]: " + << "invalid batch signers."; + return temBAD_SIGNER; + } + return tesSUCCESS; +} + +/** + * @brief Checks the validity of signatures for a batch transaction. + * + * This method first verifies the standard transaction signature by calling + * Transactor::checkSign. If the signature is not valid it returns the + * corresponding error code. + * + * Next, it verifies the batch-specific signature requirements by calling + * Transactor::checkBatchSign. If this check fails, it also returns the + * corresponding error code. + * + * If both checks succeed, the function returns tesSUCCESS. + * + * @param ctx The PreclaimContext containing transaction and environment data. + * @return NotTEC Returns tesSUCCESS if all signature checks pass, or an error + * code otherwise. + */ +NotTEC +Batch::checkSign(PreclaimContext const& ctx) +{ + if (auto ret = Transactor::checkSign(ctx); !isTesSuccess(ret)) + return ret; + + if (auto ret = Transactor::checkBatchSign(ctx); !isTesSuccess(ret)) + return ret; + + return tesSUCCESS; +} + +/** + * @brief Applies the outer batch transaction. + * + * This method is responsible for applying the outer batch transaction. + * The inner transactions within the batch are applied separately in the + * `applyBatchTransactions` method after the outer transaction is processed. + * + * @return TER Returns tesSUCCESS to indicate successful application of the + * outer batch transaction. + */ +TER +Batch::doApply() +{ + return tesSUCCESS; +} + +} // namespace ripple diff --git a/src/xrpld/app/tx/detail/Batch.h b/src/xrpld/app/tx/detail/Batch.h new file mode 100644 index 0000000000..211bce0589 --- /dev/null +++ b/src/xrpld/app/tx/detail/Batch.h @@ -0,0 +1,55 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2024 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_TX_BATCH_H_INCLUDED +#define RIPPLE_TX_BATCH_H_INCLUDED + +#include +#include + +#include +#include + +namespace ripple { + +class Batch : public Transactor +{ +public: + static constexpr ConsequencesFactoryType ConsequencesFactory{Normal}; + + explicit Batch(ApplyContext& ctx) : Transactor(ctx) + { + } + + static XRPAmount + calculateBaseFee(ReadView const& view, STTx const& tx); + + static NotTEC + preflight(PreflightContext const& ctx); + + static NotTEC + checkSign(PreclaimContext const& ctx); + + TER + doApply() override; +}; + +} // namespace ripple + +#endif diff --git a/src/xrpld/app/tx/detail/SetAccount.cpp b/src/xrpld/app/tx/detail/SetAccount.cpp index 599819151a..0c16182ed8 100644 --- a/src/xrpld/app/tx/detail/SetAccount.cpp +++ b/src/xrpld/app/tx/detail/SetAccount.cpp @@ -214,7 +214,7 @@ SetAccount::checkPermission(ReadView const& view, STTx const& tx) // AccountSet transaction. If any delegated account is trying to // update the flag on behalf of another account, it is not // authorized. - if (uSetFlag != 0 || uClearFlag != 0 || uTxFlags != tfFullyCanonicalSig) + if (uSetFlag != 0 || uClearFlag != 0 || uTxFlags & tfUniversalMask) return tecNO_PERMISSION; if (tx.isFieldPresent(sfEmailHash) && diff --git a/src/xrpld/app/tx/detail/Transactor.cpp b/src/xrpld/app/tx/detail/Transactor.cpp index baba7d131e..cc82f7c3ca 100644 --- a/src/xrpld/app/tx/detail/Transactor.cpp +++ b/src/xrpld/app/tx/detail/Transactor.cpp @@ -34,6 +34,7 @@ #include #include #include +#include #include namespace ripple { @@ -42,6 +43,13 @@ namespace ripple { NotTEC preflight0(PreflightContext const& ctx) { + if (isPseudoTx(ctx.tx) && ctx.tx.isFlag(tfInnerBatchTxn)) + { + JLOG(ctx.j.warn()) << "Pseudo transactions cannot contain the " + "tfInnerBatchTxn flag."; + return temINVALID_FLAG; + } + if (!isPseudoTx(ctx.tx) || ctx.tx.isFieldPresent(sfNetworkID)) { uint32_t nodeNID = ctx.app.config().NETWORK_ID; @@ -136,6 +144,14 @@ preflight1(PreflightContext const& ctx) ctx.tx.isFieldPresent(sfAccountTxnID)) return temINVALID; + if (ctx.tx.isFlag(tfInnerBatchTxn) && !ctx.rules.enabled(featureBatch)) + return temINVALID_FLAG; + + XRPL_ASSERT( + ctx.tx.isFlag(tfInnerBatchTxn) == ctx.parentBatchId.has_value() || + !ctx.rules.enabled(featureBatch), + "Inner batch transaction must have a parent batch ID."); + return tesSUCCESS; } @@ -176,25 +192,13 @@ preflight2(PreflightContext const& ctx) if (sigValid.first == Validity::SigBad) { JLOG(ctx.j.debug()) << "preflight2: bad signature. " << sigValid.second; - return temINVALID; + return temINVALID; // LCOV_EXCL_LINE } return tesSUCCESS; } //------------------------------------------------------------------------------ -PreflightContext::PreflightContext( - Application& app_, - STTx const& tx_, - Rules const& rules_, - ApplyFlags flags_, - beast::Journal j_) - : app(app_), tx(tx_), rules(rules_), flags(flags_), j(j_) -{ -} - -//------------------------------------------------------------------------------ - Transactor::Transactor(ApplyContext& ctx) : ctx_(ctx), j_(ctx.journal), account_(ctx.tx.getAccountID(sfAccount)) { @@ -251,6 +255,16 @@ Transactor::checkFee(PreclaimContext const& ctx, XRPAmount baseFee) return temBAD_FEE; auto const feePaid = ctx.tx[sfFee].xrp(); + + if (ctx.flags & tapBATCH) + { + if (feePaid == beast::zero) + return tesSUCCESS; + + JLOG(ctx.j.trace()) << "Batch: Fee must be zero."; + return temBAD_FEE; // LCOV_EXCL_LINE + } + if (!isLegalAmount(feePaid) || feePaid < beast::zero) return temBAD_FEE; @@ -557,51 +571,113 @@ Transactor::apply() NotTEC Transactor::checkSign(PreclaimContext const& ctx) { - if (ctx.flags & tapDRY_RUN) + // Ignore signature check on batch inner transactions + if (ctx.tx.isFlag(tfInnerBatchTxn) && + ctx.view.rules().enabled(featureBatch)) { - // This code must be different for `simulate` - // Since the public key may be empty even for single signing - if (ctx.tx.isFieldPresent(sfSigners)) - return checkMultiSign(ctx); - return checkSingleSign(ctx); + // Defensive Check: These values are also checked in Batch::preflight + if (ctx.tx.isFieldPresent(sfTxnSignature) || + !ctx.tx.getSigningPubKey().empty() || + ctx.tx.isFieldPresent(sfSigners)) + { + return temINVALID_FLAG; // LCOV_EXCL_LINE + } + return tesSUCCESS; } - // If the pk is empty, then we must be multi-signing. - if (ctx.tx.getSigningPubKey().empty()) - return checkMultiSign(ctx); - return checkSingleSign(ctx); -} + auto const idAccount = ctx.tx[~sfDelegate].value_or(ctx.tx[sfAccount]); -NotTEC -Transactor::checkSingleSign(PreclaimContext const& ctx) -{ - // Check that the value in the signing key slot is a public key. + // If the pk is empty and not simulate or simulate and signers, + // then we must be multi-signing. + if ((ctx.flags & tapDRY_RUN && ctx.tx.isFieldPresent(sfSigners)) || + (!(ctx.flags & tapDRY_RUN) && ctx.tx.getSigningPubKey().empty())) + { + STArray const& txSigners(ctx.tx.getFieldArray(sfSigners)); + return checkMultiSign(ctx.view, idAccount, txSigners, ctx.flags, ctx.j); + } + + // Check Single Sign auto const pkSigner = ctx.tx.getSigningPubKey(); + // This ternary is only needed to handle `simulate` + XRPL_ASSERT( + (ctx.flags & tapDRY_RUN) || !pkSigner.empty(), + "ripple::Transactor::checkSingleSign : non-empty signer or simulation"); + if (!(ctx.flags & tapDRY_RUN) && !publicKeyType(makeSlice(pkSigner))) { JLOG(ctx.j.trace()) << "checkSingleSign: signing public key type is unknown"; return tefBAD_AUTH; // FIXME: should be better error! } - - // Look up the account. - auto const idAccount = ctx.tx.isFieldPresent(sfDelegate) - ? ctx.tx.getAccountID(sfDelegate) - : ctx.tx.getAccountID(sfAccount); + auto const idSigner = pkSigner.empty() + ? idAccount + : calcAccountID(PublicKey(makeSlice(pkSigner))); auto const sleAccount = ctx.view.read(keylet::account(idAccount)); if (!sleAccount) return terNO_ACCOUNT; - // This ternary is only needed to handle `simulate` - XRPL_ASSERT( - (ctx.flags & tapDRY_RUN) || !pkSigner.empty(), - "ripple::Transactor::checkSingleSign : non-empty signer or simulation"); - auto const idSigner = pkSigner.empty() - ? idAccount - : calcAccountID(PublicKey(makeSlice(pkSigner))); + return checkSingleSign( + idSigner, idAccount, sleAccount, ctx.view.rules(), ctx.j); +} + +NotTEC +Transactor::checkBatchSign(PreclaimContext const& ctx) +{ + NotTEC ret = tesSUCCESS; + STArray const& signers{ctx.tx.getFieldArray(sfBatchSigners)}; + for (auto const& signer : signers) + { + auto const idAccount = signer.getAccountID(sfAccount); + + Blob const& pkSigner = signer.getFieldVL(sfSigningPubKey); + if (pkSigner.empty()) + { + STArray const& txSigners(signer.getFieldArray(sfSigners)); + if (ret = checkMultiSign( + ctx.view, idAccount, txSigners, ctx.flags, ctx.j); + !isTesSuccess(ret)) + return ret; + } + else + { + // LCOV_EXCL_START + if (!publicKeyType(makeSlice(pkSigner))) + return tefBAD_AUTH; + // LCOV_EXCL_STOP + + auto const idSigner = calcAccountID(PublicKey(makeSlice(pkSigner))); + auto const sleAccount = ctx.view.read(keylet::account(idAccount)); + + // A batch can include transactions from an un-created account ONLY + // when the account master key is the signer + if (!sleAccount) + { + if (idAccount != idSigner) + return tefBAD_AUTH; + + return tesSUCCESS; + } + + if (ret = checkSingleSign( + idSigner, idAccount, sleAccount, ctx.view.rules(), ctx.j); + !isTesSuccess(ret)) + return ret; + } + } + return ret; +} + +NotTEC +Transactor::checkSingleSign( + AccountID const& idSigner, + AccountID const& idAccount, + std::shared_ptr sleAccount, + Rules const& rules, + beast::Journal j) +{ bool const isMasterDisabled = sleAccount->isFlag(lsfDisableMaster); - if (ctx.view.rules().enabled(fixMasterKeyAsRegularKey)) + if (rules.enabled(fixMasterKeyAsRegularKey)) { // Signed with regular key. if ((*sleAccount)[~sfRegularKey] == idSigner) @@ -638,16 +714,14 @@ Transactor::checkSingleSign(PreclaimContext const& ctx) else if (sleAccount->isFieldPresent(sfRegularKey)) { // Signing key does not match master or regular key. - JLOG(ctx.j.trace()) - << "checkSingleSign: Not authorized to use account."; + JLOG(j.trace()) << "checkSingleSign: Not authorized to use account."; return tefBAD_AUTH; } else { // No regular key on account and signing key does not match master key. // FIXME: Why differentiate this case from tefBAD_AUTH? - JLOG(ctx.j.trace()) - << "checkSingleSign: Not authorized to use account."; + JLOG(j.trace()) << "checkSingleSign: Not authorized to use account."; return tefBAD_AUTH_MASTER; } @@ -655,18 +729,20 @@ Transactor::checkSingleSign(PreclaimContext const& ctx) } NotTEC -Transactor::checkMultiSign(PreclaimContext const& ctx) +Transactor::checkMultiSign( + ReadView const& view, + AccountID const& id, + STArray const& txSigners, + ApplyFlags const& flags, + beast::Journal j) { - auto const id = ctx.tx.isFieldPresent(sfDelegate) - ? ctx.tx.getAccountID(sfDelegate) - : ctx.tx.getAccountID(sfAccount); // Get mTxnAccountID's SignerList and Quorum. std::shared_ptr sleAccountSigners = - ctx.view.read(keylet::signers(id)); + view.read(keylet::signers(id)); // If the signer list doesn't exist the account is not multi-signing. if (!sleAccountSigners) { - JLOG(ctx.j.trace()) + JLOG(j.trace()) << "applyTransaction: Invalid: Not a multi-signing account."; return tefNOT_MULTI_SIGNING; } @@ -681,12 +757,11 @@ Transactor::checkMultiSign(PreclaimContext const& ctx) "ripple::Transactor::checkMultiSign : signer list ID is 0"); auto accountSigners = - SignerEntries::deserialize(*sleAccountSigners, ctx.j, "ledger"); + SignerEntries::deserialize(*sleAccountSigners, j, "ledger"); if (!accountSigners) return accountSigners.error(); // Get the array of transaction signers. - STArray const& txSigners(ctx.tx.getFieldArray(sfSigners)); // Walk the accountSigners performing a variety of checks and see if // the quorum is met. @@ -705,7 +780,7 @@ Transactor::checkMultiSign(PreclaimContext const& ctx) { if (++iter == accountSigners->end()) { - JLOG(ctx.j.trace()) + JLOG(j.trace()) << "applyTransaction: Invalid SigningAccount.Account."; return tefBAD_SIGNATURE; } @@ -713,7 +788,7 @@ Transactor::checkMultiSign(PreclaimContext const& ctx) if (iter->account != txSignerAcctID) { // The SigningAccount is not in the SignerEntries. - JLOG(ctx.j.trace()) + JLOG(j.trace()) << "applyTransaction: Invalid SigningAccount.Account."; return tefBAD_SIGNATURE; } @@ -723,16 +798,16 @@ Transactor::checkMultiSign(PreclaimContext const& ctx) // public key. auto const spk = txSigner.getFieldVL(sfSigningPubKey); - if (!(ctx.flags & tapDRY_RUN) && !publicKeyType(makeSlice(spk))) + if (!(flags & tapDRY_RUN) && !publicKeyType(makeSlice(spk))) { - JLOG(ctx.j.trace()) + JLOG(j.trace()) << "checkMultiSign: signing public key type is unknown"; return tefBAD_SIGNATURE; } // This ternary is only needed to handle `simulate` XRPL_ASSERT( - (ctx.flags & tapDRY_RUN) || !spk.empty(), + (flags & tapDRY_RUN) || !spk.empty(), "ripple::Transactor::checkMultiSign : non-empty signer or " "simulation"); AccountID const signingAcctIDFromPubKey = spk.empty() @@ -764,7 +839,7 @@ Transactor::checkMultiSign(PreclaimContext const& ctx) // In any of these cases we need to know whether the account is in // the ledger. Determine that now. - auto sleTxSignerRoot = ctx.view.read(keylet::account(txSignerAcctID)); + auto const sleTxSignerRoot = view.read(keylet::account(txSignerAcctID)); if (signingAcctIDFromPubKey == txSignerAcctID) { @@ -777,7 +852,7 @@ Transactor::checkMultiSign(PreclaimContext const& ctx) if (signerAccountFlags & lsfDisableMaster) { - JLOG(ctx.j.trace()) + JLOG(j.trace()) << "applyTransaction: Signer:Account lsfDisableMaster."; return tefMASTER_DISABLED; } @@ -789,21 +864,21 @@ Transactor::checkMultiSign(PreclaimContext const& ctx) // Public key must hash to the account's regular key. if (!sleTxSignerRoot) { - JLOG(ctx.j.trace()) << "applyTransaction: Non-phantom signer " - "lacks account root."; + JLOG(j.trace()) << "applyTransaction: Non-phantom signer " + "lacks account root."; return tefBAD_SIGNATURE; } if (!sleTxSignerRoot->isFieldPresent(sfRegularKey)) { - JLOG(ctx.j.trace()) + JLOG(j.trace()) << "applyTransaction: Account lacks RegularKey."; return tefBAD_SIGNATURE; } if (signingAcctIDFromPubKey != sleTxSignerRoot->getAccountID(sfRegularKey)) { - JLOG(ctx.j.trace()) + JLOG(j.trace()) << "applyTransaction: Account doesn't match RegularKey."; return tefBAD_SIGNATURE; } @@ -815,8 +890,7 @@ Transactor::checkMultiSign(PreclaimContext const& ctx) // Cannot perform transaction if quorum is not met. if (weightSum < sleAccountSigners->getFieldU32(sfSignerQuorum)) { - JLOG(ctx.j.trace()) - << "applyTransaction: Signers failed to meet quorum."; + JLOG(j.trace()) << "applyTransaction: Signers failed to meet quorum."; return tefBAD_QUORUM; } @@ -904,7 +978,11 @@ removeDeletedTrustLines( } } -/** Reset the context, discarding any changes made and adjust the fee */ +/** Reset the context, discarding any changes made and adjust the fee. + + @param fee The transaction fee to be charged. + @return A pair containing the transaction result and the actual fee charged. + */ std::pair Transactor::reset(XRPAmount fee) { @@ -912,9 +990,10 @@ Transactor::reset(XRPAmount fee) auto const txnAcct = view().peek(keylet::account(ctx_.tx.getAccountID(sfAccount))); + + // The account should never be missing from the ledger. But if it + // is missing then we can't very well charge it a fee, can we? if (!txnAcct) - // The account should never be missing from the ledger. But if it - // is missing then we can't very well charge it a fee, can we? return {tefINTERNAL, beast::zero}; auto const payerSle = ctx_.tx.isFieldPresent(sfDelegate) @@ -1024,7 +1103,6 @@ Transactor::operator()() { // If the tapFAIL_HARD flag is set, a tec result // must not do anything - ctx_.discard(); applied = false; } diff --git a/src/xrpld/app/tx/detail/Transactor.h b/src/xrpld/app/tx/detail/Transactor.h index 4956f021df..42d4861a63 100644 --- a/src/xrpld/app/tx/detail/Transactor.h +++ b/src/xrpld/app/tx/detail/Transactor.h @@ -37,14 +37,38 @@ public: STTx const& tx; Rules const rules; ApplyFlags flags; + std::optional parentBatchId; beast::Journal const j; + PreflightContext( + Application& app_, + STTx const& tx_, + uint256 parentBatchId_, + Rules const& rules_, + ApplyFlags flags_, + beast::Journal j_ = beast::Journal{beast::Journal::getNullSink()}) + : app(app_) + , tx(tx_) + , rules(rules_) + , flags(flags_) + , parentBatchId(parentBatchId_) + , j(j_) + { + XRPL_ASSERT( + (flags_ & tapBATCH) == tapBATCH, "Batch apply flag should be set"); + } + PreflightContext( Application& app_, STTx const& tx_, Rules const& rules_, ApplyFlags flags_, - beast::Journal j_); + beast::Journal j_ = beast::Journal{beast::Journal::getNullSink()}) + : app(app_), tx(tx_), rules(rules_), flags(flags_), j(j_) + { + XRPL_ASSERT( + (flags_ & tapBATCH) == 0, "Batch apply flag should not be set"); + } PreflightContext& operator=(PreflightContext const&) = delete; @@ -57,8 +81,9 @@ public: Application& app; ReadView const& view; TER preflightResult; - STTx const& tx; ApplyFlags flags; + STTx const& tx; + std::optional const parentBatchId; beast::Journal const j; PreclaimContext( @@ -67,14 +92,39 @@ public: TER preflightResult_, STTx const& tx_, ApplyFlags flags_, + std::optional parentBatchId_, beast::Journal j_ = beast::Journal{beast::Journal::getNullSink()}) : app(app_) , view(view_) , preflightResult(preflightResult_) - , tx(tx_) , flags(flags_) + , tx(tx_) + , parentBatchId(parentBatchId_) , j(j_) { + XRPL_ASSERT( + parentBatchId.has_value() == ((flags_ & tapBATCH) == tapBATCH), + "Parent Batch ID should be set if batch apply flag is set"); + } + + PreclaimContext( + Application& app_, + ReadView const& view_, + TER preflightResult_, + STTx const& tx_, + ApplyFlags flags_, + beast::Journal j_ = beast::Journal{beast::Journal::getNullSink()}) + : PreclaimContext( + app_, + view_, + preflightResult_, + tx_, + flags_, + std::nullopt, + j_) + { + XRPL_ASSERT( + (flags_ & tapBATCH) == 0, "Batch apply flag should not be set"); } PreclaimContext& @@ -139,6 +189,9 @@ public: static NotTEC checkSign(PreclaimContext const& ctx); + static NotTEC + checkBatchSign(PreclaimContext const& ctx); + // Returns the fee in fee units, not scaled for load. static XRPAmount calculateBaseFee(ReadView const& view, STTx const& tx); @@ -200,9 +253,19 @@ private: TER payFee(); static NotTEC - checkSingleSign(PreclaimContext const& ctx); + checkSingleSign( + AccountID const& idSigner, + AccountID const& idAccount, + std::shared_ptr sleAccount, + Rules const& rules, + beast::Journal j); static NotTEC - checkMultiSign(PreclaimContext const& ctx); + checkMultiSign( + ReadView const& view, + AccountID const& idAccount, + STArray const& txSigners, + ApplyFlags const& flags, + beast::Journal j); void trapTransaction(uint256) const; }; diff --git a/src/xrpld/app/tx/detail/apply.cpp b/src/xrpld/app/tx/detail/apply.cpp index 615fd6a92d..889a520032 100644 --- a/src/xrpld/app/tx/detail/apply.cpp +++ b/src/xrpld/app/tx/detail/apply.cpp @@ -23,6 +23,7 @@ #include #include +#include namespace ripple { @@ -43,6 +44,28 @@ checkValidity( { auto const id = tx.getTransactionID(); auto const flags = router.getFlags(id); + + // Ignore signature check on batch inner transactions + if (tx.isFlag(tfInnerBatchTxn) && rules.enabled(featureBatch)) + { + // Defensive Check: These values are also checked in Batch::preflight + if (tx.isFieldPresent(sfTxnSignature) || + !tx.getSigningPubKey().empty() || tx.isFieldPresent(sfSigners)) + return { + Validity::SigBad, + "Malformed: Invalid inner batch transaction."}; + + std::string reason; + if (!passesLocalChecks(tx, reason)) + { + router.setFlags(id, SF_LOCALBAD); + return {Validity::SigGoodOnly, reason}; + } + + router.setFlags(id, SF_SIGGOOD); + return {Validity::Valid, ""}; + } + if (flags & SF_SIGBAD) // Signature is known bad return {Validity::SigBad, "Transaction has bad signature."}; @@ -106,6 +129,16 @@ forceValidity(HashRouter& router, uint256 const& txid, Validity validity) router.setFlags(txid, flags); } +template +ApplyResult +apply(Application& app, OpenView& view, PreflightChecks&& preflightChecks) +{ + STAmountSO stAmountSO{view.rules().enabled(fixSTAmountCanonicalize)}; + NumberSO stNumberSO{view.rules().enabled(fixUniversalNumber)}; + + return doApply(preclaim(preflightChecks(), app, view), app, view); +} + ApplyResult apply( Application& app, @@ -114,12 +147,89 @@ apply( ApplyFlags flags, beast::Journal j) { - STAmountSO stAmountSO{view.rules().enabled(fixSTAmountCanonicalize)}; - NumberSO stNumberSO{view.rules().enabled(fixUniversalNumber)}; + return apply(app, view, [&]() mutable { + return preflight(app, view.rules(), tx, flags, j); + }); +} - auto pfresult = preflight(app, view.rules(), tx, flags, j); - auto pcresult = preclaim(pfresult, app, view); - return doApply(pcresult, app, view); +ApplyResult +apply( + Application& app, + OpenView& view, + uint256 const& parentBatchId, + STTx const& tx, + ApplyFlags flags, + beast::Journal j) +{ + return apply(app, view, [&]() mutable { + return preflight(app, view.rules(), parentBatchId, tx, flags, j); + }); +} + +static bool +applyBatchTransactions( + Application& app, + OpenView& batchView, + STTx const& batchTxn, + beast::Journal j) +{ + XRPL_ASSERT( + batchTxn.getTxnType() == ttBATCH && + batchTxn.getFieldArray(sfRawTransactions).size() != 0, + "Batch transaction missing sfRawTransactions"); + + auto const parentBatchId = batchTxn.getTransactionID(); + auto const mode = batchTxn.getFlags(); + + auto applyOneTransaction = + [&app, &j, &parentBatchId, &batchView](STTx&& tx) { + OpenView perTxBatchView(batch_view, batchView); + + auto const ret = + apply(app, perTxBatchView, parentBatchId, tx, tapBATCH, j); + XRPL_ASSERT( + ret.applied == (isTesSuccess(ret.ter) || isTecClaim(ret.ter)), + "Inner transaction should not be applied"); + + JLOG(j.debug()) << "BatchTrace[" << parentBatchId + << "]: " << tx.getTransactionID() << " " + << (ret.applied ? "applied" : "failure") << ": " + << transToken(ret.ter); + + // If the transaction should be applied push its changes to the + // whole-batch view. + if (ret.applied && (isTesSuccess(ret.ter) || isTecClaim(ret.ter))) + perTxBatchView.apply(batchView); + + return ret; + }; + + int applied = 0; + + for (STObject rb : batchTxn.getFieldArray(sfRawTransactions)) + { + auto const result = applyOneTransaction(STTx{std::move(rb)}); + XRPL_ASSERT( + result.applied == + (isTesSuccess(result.ter) || isTecClaim(result.ter)), + "Outer Batch failure, inner transaction should not be applied"); + + if (result.applied) + ++applied; + + if (!isTesSuccess(result.ter)) + { + if (mode & tfAllOrNothing) + return false; + + if (mode & tfUntilFailure) + break; + } + else if (mode & tfOnlyOne) + break; + } + + return applied != 0; } ApplyTransactionResult @@ -141,10 +251,22 @@ applyTransaction( try { auto const result = apply(app, view, txn, flags, j); + if (result.applied) { JLOG(j.debug()) - << "Transaction applied: " << transHuman(result.ter); + << "Transaction applied: " << transToken(result.ter); + + // The batch transaction was just applied; now we need to apply + // its inner transactions as necessary. + if (isTesSuccess(result.ter) && txn.getTxnType() == ttBATCH) + { + OpenView wholeBatchView(batch_view, view); + + if (applyBatchTransactions(app, wholeBatchView, txn, j)) + wholeBatchView.apply(view); + } + return ApplyTransactionResult::Success; } diff --git a/src/xrpld/app/tx/detail/applySteps.cpp b/src/xrpld/app/tx/detail/applySteps.cpp index 5e8c125e83..34259ebef0 100644 --- a/src/xrpld/app/tx/detail/applySteps.cpp +++ b/src/xrpld/app/tx/detail/applySteps.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -104,7 +105,6 @@ with_txn_type(TxType txnType, F&& f) #undef TRANSACTION #pragma pop_macro("TRANSACTION") - default: throw UnknownTxnType(txnType); } @@ -227,6 +227,22 @@ invoke_preclaim(PreclaimContext const& ctx) } } +/** + * @brief Calculates the base fee for a given transaction. + * + * This function determines the base fee required for the specified transaction + * by invoking the appropriate fee calculation logic based on the transaction + * type. It uses a type-dispatch mechanism to select the correct calculation + * method. + * + * @param view The ledger view to use for fee calculation. + * @param tx The transaction for which the base fee is to be calculated. + * @return The calculated base fee as an XRPAmount. + * + * @throws std::exception If an error occurs during fee calculation, including + * but not limited to unknown transaction types or internal errors, the function + * logs an error and returns an XRPAmount of zero. + */ static XRPAmount invoke_calculateBaseFee(ReadView const& view, STTx const& tx) { @@ -320,7 +336,28 @@ preflight( } catch (std::exception const& e) { - JLOG(j.fatal()) << "apply: " << e.what(); + JLOG(j.fatal()) << "apply (preflight): " << e.what(); + return {pfctx, {tefEXCEPTION, TxConsequences{tx}}}; + } +} + +PreflightResult +preflight( + Application& app, + Rules const& rules, + uint256 const& parentBatchId, + STTx const& tx, + ApplyFlags flags, + beast::Journal j) +{ + PreflightContext const pfctx(app, tx, parentBatchId, rules, flags, j); + try + { + return {pfctx, invoke_preflight(pfctx)}; + } + catch (std::exception const& e) + { + JLOG(j.fatal()) << "apply (preflight): " << e.what(); return {pfctx, {tefEXCEPTION, TxConsequences{tx}}}; } } @@ -334,18 +371,31 @@ preclaim( std::optional ctx; if (preflightResult.rules != view.rules()) { - auto secondFlight = preflight( - app, - view.rules(), - preflightResult.tx, - preflightResult.flags, - preflightResult.j); + auto secondFlight = [&]() { + if (preflightResult.parentBatchId) + return preflight( + app, + view.rules(), + preflightResult.parentBatchId.value(), + preflightResult.tx, + preflightResult.flags, + preflightResult.j); + + return preflight( + app, + view.rules(), + preflightResult.tx, + preflightResult.flags, + preflightResult.j); + }(); + ctx.emplace( app, view, secondFlight.ter, secondFlight.tx, secondFlight.flags, + secondFlight.parentBatchId, secondFlight.j); } else @@ -356,8 +406,10 @@ preclaim( preflightResult.ter, preflightResult.tx, preflightResult.flags, + preflightResult.parentBatchId, preflightResult.j); } + try { if (ctx->preflightResult != tesSUCCESS) @@ -366,7 +418,7 @@ preclaim( } catch (std::exception const& e) { - JLOG(ctx->j.fatal()) << "apply: " << e.what(); + JLOG(ctx->j.fatal()) << "apply (preclaim): " << e.what(); return {*ctx, tefEXCEPTION}; } } @@ -399,6 +451,7 @@ doApply(PreclaimResult const& preclaimResult, Application& app, OpenView& view) ApplyContext ctx( app, view, + preclaimResult.parentBatchId, preclaimResult.tx, preclaimResult.ter, calculateBaseFee(view, preclaimResult.tx), diff --git a/src/xrpld/ledger/ApplyView.h b/src/xrpld/ledger/ApplyView.h index 1abff33be0..1e4a5a112a 100644 --- a/src/xrpld/ledger/ApplyView.h +++ b/src/xrpld/ledger/ApplyView.h @@ -42,6 +42,9 @@ enum ApplyFlags : std::uint32_t { // Transaction came from a privileged source tapUNLIMITED = 0x400, + // Transaction is executing as part of a batch + tapBATCH = 0x800, + // Transaction shouldn't be applied // Signatures shouldn't be checked tapDRY_RUN = 0x1000 diff --git a/src/xrpld/ledger/ApplyViewImpl.h b/src/xrpld/ledger/ApplyViewImpl.h index 1c282565b1..d170cf71ff 100644 --- a/src/xrpld/ledger/ApplyViewImpl.h +++ b/src/xrpld/ledger/ApplyViewImpl.h @@ -58,6 +58,7 @@ public: OpenView& to, STTx const& tx, TER ter, + std::optional parentBatchId, bool isDryRun, beast::Journal j); diff --git a/src/xrpld/ledger/OpenView.h b/src/xrpld/ledger/OpenView.h index ecc618e185..a1fa195a69 100644 --- a/src/xrpld/ledger/OpenView.h +++ b/src/xrpld/ledger/OpenView.h @@ -24,6 +24,7 @@ #include #include +#include #include #include @@ -39,13 +40,21 @@ namespace ripple { Views constructed with this tag will have the rules of open ledgers applied during transaction processing. -*/ -struct open_ledger_t + */ +inline constexpr struct open_ledger_t { - explicit open_ledger_t() = default; -}; + explicit constexpr open_ledger_t() = default; +} open_ledger{}; -extern open_ledger_t const open_ledger; +/** Batch view construction tag. + + Views constructed with this tag are part of a stack of views + used during batch transaction applied. + */ +inline constexpr struct batch_view_t +{ + explicit constexpr batch_view_t() = default; +} batch_view{}; //------------------------------------------------------------------------------ @@ -97,6 +106,10 @@ private: ReadView const* base_; detail::RawStateTable items_; std::shared_ptr hold_; + + /// In batch mode, the number of transactions already executed. + std::size_t baseTxCount_ = 0; + bool open_ = true; public: @@ -142,7 +155,6 @@ public: The tx list starts empty and will contain all newly inserted tx. */ - /** @{ */ OpenView( open_ledger_t, ReadView const* base, @@ -156,7 +168,11 @@ public: : OpenView(open_ledger, &*base, rules, base) { } - /** @} */ + + OpenView(batch_view_t, OpenView& base) : OpenView(std::addressof(base)) + { + baseTxCount_ = base.txCount(); + } /** Construct a new last closed ledger. diff --git a/src/xrpld/ledger/detail/ApplyStateTable.cpp b/src/xrpld/ledger/detail/ApplyStateTable.cpp index c11a72d782..2a740093d9 100644 --- a/src/xrpld/ledger/detail/ApplyStateTable.cpp +++ b/src/xrpld/ledger/detail/ApplyStateTable.cpp @@ -116,6 +116,7 @@ ApplyStateTable::apply( STTx const& tx, TER ter, std::optional const& deliver, + std::optional const& parentBatchId, bool isDryRun, beast::Journal j) { @@ -126,9 +127,11 @@ ApplyStateTable::apply( std::optional metadata; if (!to.open() || isDryRun) { - TxMeta meta(tx.getTransactionID(), to.seq()); + TxMeta meta(tx.getTransactionID(), to.seq(), parentBatchId); + if (deliver) meta.setDeliveredAmount(*deliver); + Mods newMod; for (auto& item : items_) { diff --git a/src/xrpld/ledger/detail/ApplyStateTable.h b/src/xrpld/ledger/detail/ApplyStateTable.h index b1bac733fc..5a2e0bcf54 100644 --- a/src/xrpld/ledger/detail/ApplyStateTable.h +++ b/src/xrpld/ledger/detail/ApplyStateTable.h @@ -72,6 +72,7 @@ public: STTx const& tx, TER ter, std::optional const& deliver, + std::optional const& parentBatchId, bool isDryRun, beast::Journal j); diff --git a/src/xrpld/ledger/detail/ApplyViewImpl.cpp b/src/xrpld/ledger/detail/ApplyViewImpl.cpp index 74b71c8324..3fd9478b54 100644 --- a/src/xrpld/ledger/detail/ApplyViewImpl.cpp +++ b/src/xrpld/ledger/detail/ApplyViewImpl.cpp @@ -31,10 +31,11 @@ ApplyViewImpl::apply( OpenView& to, STTx const& tx, TER ter, + std::optional parentBatchId, bool isDryRun, beast::Journal j) { - return items_.apply(to, tx, ter, deliver_, isDryRun, j); + return items_.apply(to, tx, ter, deliver_, parentBatchId, isDryRun, j); } std::size_t diff --git a/src/xrpld/ledger/detail/OpenView.cpp b/src/xrpld/ledger/detail/OpenView.cpp index 5c62d8cef8..73e502a5e2 100644 --- a/src/xrpld/ledger/detail/OpenView.cpp +++ b/src/xrpld/ledger/detail/OpenView.cpp @@ -23,8 +23,6 @@ namespace ripple { -open_ledger_t const open_ledger{}; - class OpenView::txs_iter_impl : public txs_type::iter_base { private: @@ -124,7 +122,7 @@ OpenView::OpenView(ReadView const* base, std::shared_ptr hold) std::size_t OpenView::txCount() const { - return txs_.size(); + return baseTxCount_ + txs_.size(); } void @@ -269,7 +267,7 @@ OpenView::rawTxInsert( std::forward_as_tuple(key), std::forward_as_tuple(txn, metaData)); if (!result.second) - LogicError("rawTxInsert: duplicate TX id" + to_string(key)); + LogicError("rawTxInsert: duplicate TX id: " + to_string(key)); } } // namespace ripple diff --git a/src/xrpld/overlay/detail/PeerImp.cpp b/src/xrpld/overlay/detail/PeerImp.cpp index bca2cfd8c7..cb3a7a69f5 100644 --- a/src/xrpld/overlay/detail/PeerImp.cpp +++ b/src/xrpld/overlay/detail/PeerImp.cpp @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -1282,6 +1283,18 @@ PeerImp::handleTransaction( auto stx = std::make_shared(sit); uint256 txID = stx->getTransactionID(); + // Charge strongly for attempting to relay a txn with tfInnerBatchTxn + // LCOV_EXCL_START + if (stx->isFlag(tfInnerBatchTxn) && + getCurrentTransactionRules()->enabled(featureBatch)) + { + JLOG(p_journal_.warn()) << "Ignoring Network relayed Tx containing " + "tfInnerBatchTxn (handleTransaction)."; + fee_.update(Resource::feeModerateBurdenPeer, "inner batch txn"); + return; + } + // LCOV_EXCL_STOP + int flags; constexpr std::chrono::seconds tx_interval = 10s; @@ -2838,6 +2851,18 @@ PeerImp::checkTransaction( // VFALCO TODO Rewrite to not use exceptions try { + // charge strongly for relaying batch txns + // LCOV_EXCL_START + if (stx->isFlag(tfInnerBatchTxn) && + getCurrentTransactionRules()->enabled(featureBatch)) + { + JLOG(p_journal_.warn()) << "Ignoring Network relayed Tx containing " + "tfInnerBatchTxn (checkSignature)."; + charge(Resource::feeModerateBurdenPeer, "inner batch txn"); + return; + } + // LCOV_EXCL_STOP + // Expired? if (stx->isFieldPresent(sfLastLedgerSequence) && (stx->getFieldU32(sfLastLedgerSequence) < diff --git a/src/xrpld/rpc/detail/TransactionSign.cpp b/src/xrpld/rpc/detail/TransactionSign.cpp index 9387aba505..a4454c6e8a 100644 --- a/src/xrpld/rpc/detail/TransactionSign.cpp +++ b/src/xrpld/rpc/detail/TransactionSign.cpp @@ -464,9 +464,6 @@ transactionPreProcessImpl( hasTicketSeq ? 0 : app.getTxQ().nextQueuableSeq(sle).value(); } - if (!tx_json.isMember(jss::Flags)) - tx_json[jss::Flags] = tfFullyCanonicalSig; - if (!tx_json.isMember(jss::NetworkID)) { auto const networkId = app.config().NETWORK_ID; @@ -749,6 +746,7 @@ transactionFormatResultImpl(Transaction::pointer tpTrans, unsigned apiVersion) [[nodiscard]] static XRPAmount getTxFee(Application const& app, Config const& config, Json::Value tx) { + auto const& ledger = app.openLedger().current(); // autofilling only needed in this function so that the `STParsedJSONObject` // parsing works properly it should not be modifying the actual `tx` object if (!tx.isMember(jss::Fee)) @@ -776,6 +774,9 @@ getTxFee(Application const& app, Config const& config, Json::Value tx) if (!tx[jss::Signers].isArray()) return config.FEES.reference_fee; + if (tx[jss::Signers].size() > STTx::maxMultiSigners(&ledger->rules())) + return config.FEES.reference_fee; + // check multi-signed signers for (auto& signer : tx[jss::Signers]) { @@ -804,6 +805,10 @@ getTxFee(Application const& app, Config const& config, Json::Value tx) try { STTx const& stTx = STTx(std::move(parsed.object.value())); + std::string reason; + if (!passesLocalChecks(stTx, reason)) + return config.FEES.reference_fee; + return calculateBaseFee(*app.openLedger().current(), stTx); } catch (std::exception& e) diff --git a/src/xrpld/rpc/handlers/Simulate.cpp b/src/xrpld/rpc/handlers/Simulate.cpp index 5f69c203ff..3c175883c5 100644 --- a/src/xrpld/rpc/handlers/Simulate.cpp +++ b/src/xrpld/rpc/handlers/Simulate.cpp @@ -342,6 +342,11 @@ doSimulate(RPC::JsonContext& context) return jvResult; } + if (stTx->getTxnType() == ttBATCH) + { + return RPC::make_error(rpcNOT_IMPL); + } + std::string reason; auto transaction = std::make_shared(stTx, reason, context.app); // Actually run the transaction through the transaction processor From 8b9e21e3f5735b22ba106e3033c7f4c10fea3606 Mon Sep 17 00:00:00 2001 From: Valentin Balaschenko <13349202+vlntb@users.noreply.github.com> Date: Tue, 27 May 2025 19:32:25 +0100 Subject: [PATCH 033/244] docs: Update build instructions for Ubuntu 22.04+ (#5292) --- docs/build/environment.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/build/environment.md b/docs/build/environment.md index 7fe89ffb49..760be144d8 100644 --- a/docs/build/environment.md +++ b/docs/build/environment.md @@ -23,7 +23,7 @@ direction. ``` apt update -apt install --yes curl git libssl-dev python3.10-dev python3-pip make g++-11 libprotobuf-dev protobuf-compiler +apt install --yes curl git libssl-dev pipx python3.10-dev python3-pip make g++-11 libprotobuf-dev protobuf-compiler curl --location --remote-name \ "https://github.com/Kitware/CMake/releases/download/v3.25.1/cmake-3.25.1.tar.gz" @@ -35,7 +35,8 @@ make --jobs $(nproc) make install cd .. -pip3 install 'conan<2' +pipx install 'conan<2' +pipx ensurepath ``` [1]: https://github.com/thejohnfreeman/rippled-docker/blob/master/ubuntu-22.04/install.sh From cd777f79efd7e57224cd740e7c893b7fc26b9015 Mon Sep 17 00:00:00 2001 From: "Elliot." Date: Tue, 27 May 2025 12:11:13 -0700 Subject: [PATCH 034/244] docs: add -j $(nproc) to BUILD.md (#5288) This improves build times. --- BUILD.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/BUILD.md b/BUILD.md index fd985dce81..1ba767cd88 100644 --- a/BUILD.md +++ b/BUILD.md @@ -288,7 +288,7 @@ It fixes some source files to add missing `#include`s. Single-config generators: ``` - cmake --build . + cmake --build . -j $(nproc) ``` Multi-config generators: From cae5294b4e38e3c01c92fa5f32a59e6f15ad16f9 Mon Sep 17 00:00:00 2001 From: Bart Date: Tue, 27 May 2025 16:03:23 -0400 Subject: [PATCH 035/244] chore: Rename docs job (#5398) --- .github/workflows/doxygen.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/doxygen.yml b/.github/workflows/doxygen.yml index 0693308ef0..01e04a3f5a 100644 --- a/.github/workflows/doxygen.yml +++ b/.github/workflows/doxygen.yml @@ -10,7 +10,7 @@ concurrency: cancel-in-progress: true jobs: - job: + documentation: runs-on: ubuntu-latest permissions: contents: write From be668ee26d6a9cc4e7b885e3c3cbb2fc7da7119b Mon Sep 17 00:00:00 2001 From: Michael Legleux Date: Tue, 27 May 2025 13:46:25 -0700 Subject: [PATCH 036/244] chore: Update CPP ref source (#5453) --- cmake/RippledDocs.cmake | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmake/RippledDocs.cmake b/cmake/RippledDocs.cmake index d93bc119c0..dda277bffa 100644 --- a/cmake/RippledDocs.cmake +++ b/cmake/RippledDocs.cmake @@ -53,9 +53,9 @@ set(download_script "${CMAKE_BINARY_DIR}/docs/download-cppreference.cmake") file(WRITE "${download_script}" "file(DOWNLOAD \ - http://upload.cppreference.com/mwiki/images/b/b2/html_book_20190607.zip \ + https://github.com/PeterFeicht/cppreference-doc/releases/download/v20250209/html-book-20250209.zip \ ${CMAKE_BINARY_DIR}/docs/cppreference.zip \ - EXPECTED_HASH MD5=82b3a612d7d35a83e3cb1195a63689ab \ + EXPECTED_HASH MD5=bda585f72fbca4b817b29a3d5746567b \ )\n \ execute_process( \ COMMAND \"${CMAKE_COMMAND}\" -E tar -xf cppreference.zip \ From d71ce51901409c710735c4ceddcb7b319c14f8ef Mon Sep 17 00:00:00 2001 From: Vito Tumas <5780819+Tapanito@users.noreply.github.com> Date: Wed, 28 May 2025 12:30:03 +0200 Subject: [PATCH 037/244] feat: improve squelching configuration (#5438) This commit introduces the following changes: * Renames `vp_enable config` option to `vp_base_squelch_enable` to enable squelching for validators. * Removes `vp_squelch` config option which was used to configure whether to send squelch messages to peers or not. With this flag removed, if squelching is enabled, squelch messages will be sent. This was an option used for debugging. * Introduces a temporary `vp_base_squelch_max_trusted_peers` config option to change the max number of peers who are selected as validator message sources. This is a temporary option, which will be removed once a good value is found. * Adds a traffic counter to count the number of times peers ignored squelch messages and kept sending messages for squelched validators. * Moves the decision whether squelching is enabled and ready into Slot.h. --- src/test/overlay/compression_test.cpp | 11 +- src/test/overlay/reduce_relay_test.cpp | 205 +++++++++++++++++--- src/xrpld/core/Config.h | 25 ++- src/xrpld/core/detail/Config.cpp | 46 ++++- src/xrpld/overlay/Slot.h | 131 ++++++++++--- src/xrpld/overlay/detail/ConnectAttempt.cpp | 2 +- src/xrpld/overlay/detail/Handshake.cpp | 2 +- src/xrpld/overlay/detail/Handshake.h | 7 +- src/xrpld/overlay/detail/OverlayImpl.cpp | 22 ++- src/xrpld/overlay/detail/PeerImp.cpp | 40 ++-- src/xrpld/overlay/detail/PeerImp.h | 27 +-- src/xrpld/overlay/detail/TrafficCount.h | 4 + 12 files changed, 387 insertions(+), 135 deletions(-) diff --git a/src/test/overlay/compression_test.cpp b/src/test/overlay/compression_test.cpp index 76c38fd59b..4ecbe7f232 100644 --- a/src/test/overlay/compression_test.cpp +++ b/src/test/overlay/compression_test.cpp @@ -473,17 +473,14 @@ public: Config c; std::stringstream str; str << "[reduce_relay]\n" - << "vp_enable=1\n" - << "vp_squelch=1\n" + << "vp_base_squelch_enable=1\n" << "[compression]\n" << enable << "\n"; c.loadFromString(str.str()); auto env = std::make_shared(*this); env->app().config().COMPRESSION = c.COMPRESSION; - env->app().config().VP_REDUCE_RELAY_ENABLE = - c.VP_REDUCE_RELAY_ENABLE; - env->app().config().VP_REDUCE_RELAY_SQUELCH = - c.VP_REDUCE_RELAY_SQUELCH; + env->app().config().VP_REDUCE_RELAY_BASE_SQUELCH_ENABLE = + c.VP_REDUCE_RELAY_BASE_SQUELCH_ENABLE; return env; }; auto handshake = [&](int outboundEnable, int inboundEnable) { @@ -496,7 +493,7 @@ public: env->app().config().COMPRESSION, false, env->app().config().TX_REDUCE_RELAY_ENABLE, - env->app().config().VP_REDUCE_RELAY_ENABLE); + env->app().config().VP_REDUCE_RELAY_BASE_SQUELCH_ENABLE); http_request_type http_request; http_request.version(request.version()); http_request.base() = request.base(); diff --git a/src/test/overlay/reduce_relay_test.cpp b/src/test/overlay/reduce_relay_test.cpp index 18aebbe194..a8aafcfa06 100644 --- a/src/test/overlay/reduce_relay_test.cpp +++ b/src/test/overlay/reduce_relay_test.cpp @@ -17,6 +17,7 @@ */ //============================================================================== +#include #include #include @@ -32,6 +33,8 @@ #include +#include +#include #include #include @@ -517,7 +520,8 @@ class OverlaySim : public Overlay, public reduce_relay::SquelchHandler public: using id_t = Peer::id_t; using clock_type = ManualClock; - OverlaySim(Application& app) : slots_(app.logs(), *this), logs_(app.logs()) + OverlaySim(Application& app) + : slots_(app.logs(), *this, app.config()), logs_(app.logs()) { } @@ -986,7 +990,10 @@ protected: network_.overlay().isCountingState(validator); BEAST_EXPECT( countingState == false && - selected.size() == reduce_relay::MAX_SELECTED_PEERS); + selected.size() == + env_.app() + .config() + .VP_REDUCE_RELAY_SQUELCH_MAX_SELECTED_PEERS); } // Trigger Link Down or Peer Disconnect event @@ -1188,7 +1195,10 @@ protected: { BEAST_EXPECT( squelched == - MAX_PEERS - reduce_relay::MAX_SELECTED_PEERS); + MAX_PEERS - + env_.app() + .config() + .VP_REDUCE_RELAY_SQUELCH_MAX_SELECTED_PEERS); n++; } }, @@ -1197,7 +1207,9 @@ protected: purge, resetClock); auto selected = network_.overlay().getSelected(network_.validator(0)); - BEAST_EXPECT(selected.size() == reduce_relay::MAX_SELECTED_PEERS); + BEAST_EXPECT( + selected.size() == + env_.app().config().VP_REDUCE_RELAY_SQUELCH_MAX_SELECTED_PEERS); BEAST_EXPECT(n == 1); // only one selection round auto res = checkCounting(network_.validator(0), false); BEAST_EXPECT(res); @@ -1261,7 +1273,11 @@ protected: unsquelched++; }); BEAST_EXPECT( - unsquelched == MAX_PEERS - reduce_relay::MAX_SELECTED_PEERS); + unsquelched == + MAX_PEERS - + env_.app() + .config() + .VP_REDUCE_RELAY_SQUELCH_MAX_SELECTED_PEERS); BEAST_EXPECT(checkCounting(network_.validator(0), true)); }); } @@ -1282,7 +1298,11 @@ protected: }); auto peers = network_.overlay().getPeers(network_.validator(0)); BEAST_EXPECT( - unsquelched == MAX_PEERS - reduce_relay::MAX_SELECTED_PEERS); + unsquelched == + MAX_PEERS - + env_.app() + .config() + .VP_REDUCE_RELAY_SQUELCH_MAX_SELECTED_PEERS); BEAST_EXPECT(checkCounting(network_.validator(0), true)); }); } @@ -1314,42 +1334,164 @@ protected: void testConfig(bool log) { - doTest("Config Test", log, [&](bool log) { + doTest("Test Config - squelch enabled (legacy)", log, [&](bool log) { Config c; std::string toLoad(R"rippleConfig( [reduce_relay] vp_enable=1 -vp_squelch=1 )rippleConfig"); c.loadFromString(toLoad); - BEAST_EXPECT(c.VP_REDUCE_RELAY_ENABLE == true); - BEAST_EXPECT(c.VP_REDUCE_RELAY_SQUELCH == true); + BEAST_EXPECT(c.VP_REDUCE_RELAY_BASE_SQUELCH_ENABLE == true); + }); + + doTest("Test Config - squelch disabled (legacy)", log, [&](bool log) { + Config c; + + std::string toLoad(R"rippleConfig( +[reduce_relay] +vp_enable=0 +)rippleConfig"); + + c.loadFromString(toLoad); + BEAST_EXPECT(c.VP_REDUCE_RELAY_BASE_SQUELCH_ENABLE == false); Config c1; - toLoad = (R"rippleConfig( + toLoad = R"rippleConfig( [reduce_relay] -vp_enable=0 -vp_squelch=0 -)rippleConfig"); +)rippleConfig"; c1.loadFromString(toLoad); - BEAST_EXPECT(c1.VP_REDUCE_RELAY_ENABLE == false); - BEAST_EXPECT(c1.VP_REDUCE_RELAY_SQUELCH == false); + BEAST_EXPECT(c1.VP_REDUCE_RELAY_BASE_SQUELCH_ENABLE == false); + }); + + doTest("Test Config - squelch enabled", log, [&](bool log) { + Config c; + + std::string toLoad(R"rippleConfig( +[reduce_relay] +vp_base_squelch_enable=1 +)rippleConfig"); + + c.loadFromString(toLoad); + BEAST_EXPECT(c.VP_REDUCE_RELAY_BASE_SQUELCH_ENABLE == true); + }); + + doTest("Test Config - squelch disabled", log, [&](bool log) { + Config c; + + std::string toLoad(R"rippleConfig( +[reduce_relay] +vp_base_squelch_enable=0 +)rippleConfig"); + + c.loadFromString(toLoad); + BEAST_EXPECT(c.VP_REDUCE_RELAY_BASE_SQUELCH_ENABLE == false); + }); + + doTest("Test Config - legacy and new", log, [&](bool log) { + Config c; + + std::string toLoad(R"rippleConfig( +[reduce_relay] +vp_base_squelch_enable=0 +vp_enable=0 +)rippleConfig"); + + std::string error; + auto const expectedError = + "Invalid reduce_relay" + " cannot specify both vp_base_squelch_enable and vp_enable " + "options. " + "vp_enable was deprecated and replaced by " + "vp_base_squelch_enable"; + + try + { + c.loadFromString(toLoad); + } + catch (std::runtime_error& e) + { + error = e.what(); + } + + BEAST_EXPECT(error == expectedError); + }); + + doTest("Test Config - max selected peers", log, [&](bool log) { + Config c; + + std::string toLoad(R"rippleConfig( +[reduce_relay] +)rippleConfig"); + + c.loadFromString(toLoad); + BEAST_EXPECT(c.VP_REDUCE_RELAY_SQUELCH_MAX_SELECTED_PEERS == 5); + + Config c1; + + toLoad = R"rippleConfig( +[reduce_relay] +vp_base_squelch_max_selected_peers=6 +)rippleConfig"; + + c1.loadFromString(toLoad); + BEAST_EXPECT(c1.VP_REDUCE_RELAY_SQUELCH_MAX_SELECTED_PEERS == 6); Config c2; toLoad = R"rippleConfig( [reduce_relay] -vp_enabled=1 -vp_squelched=1 +vp_base_squelch_max_selected_peers=2 )rippleConfig"; - c2.loadFromString(toLoad); - BEAST_EXPECT(c2.VP_REDUCE_RELAY_ENABLE == false); - BEAST_EXPECT(c2.VP_REDUCE_RELAY_SQUELCH == false); + std::string error; + auto const expectedError = + "Invalid reduce_relay" + " vp_base_squelch_max_selected_peers must be " + "greater than or equal to 3"; + try + { + c2.loadFromString(toLoad); + } + catch (std::runtime_error& e) + { + error = e.what(); + } + + BEAST_EXPECT(error == expectedError); + }); + } + + void + testBaseSquelchReady(bool log) + { + doTest("BaseSquelchReady", log, [&](bool log) { + ManualClock::reset(); + auto createSlots = [&](bool baseSquelchEnabled) + -> reduce_relay::Slots { + env_.app().config().VP_REDUCE_RELAY_BASE_SQUELCH_ENABLE = + baseSquelchEnabled; + return reduce_relay::Slots( + env_.app().logs(), network_.overlay(), env_.app().config()); + }; + // base squelching must not be ready if squelching is disabled + BEAST_EXPECT(!createSlots(false).baseSquelchReady()); + + // base squelch must not be ready as not enough time passed from + // bootup + BEAST_EXPECT(!createSlots(true).baseSquelchReady()); + + ManualClock::advance(reduce_relay::WAIT_ON_BOOTUP + minutes{1}); + + // base squelch enabled and bootup time passed + BEAST_EXPECT(createSlots(true).baseSquelchReady()); + + // even if time passed, base squelching must not be ready if turned + // off in the config + BEAST_EXPECT(!createSlots(false).baseSquelchReady()); }); } @@ -1425,7 +1567,7 @@ vp_squelched=1 auto run = [&](int npeers) { handler.maxDuration_ = 0; reduce_relay::Slots slots( - env_.app().logs(), handler); + env_.app().logs(), handler, env_.app().config()); // 1st message from a new peer switches the slot // to counting state and resets the counts of all peers + // MAX_MESSAGE_THRESHOLD + 1 messages to reach the threshold @@ -1503,14 +1645,12 @@ vp_squelched=1 std::stringstream str; str << "[reduce_relay]\n" << "vp_enable=" << enable << "\n" - << "vp_squelch=" << enable << "\n" << "[compression]\n" << "1\n"; c.loadFromString(str.str()); - env_.app().config().VP_REDUCE_RELAY_ENABLE = - c.VP_REDUCE_RELAY_ENABLE; - env_.app().config().VP_REDUCE_RELAY_SQUELCH = - c.VP_REDUCE_RELAY_SQUELCH; + env_.app().config().VP_REDUCE_RELAY_BASE_SQUELCH_ENABLE = + c.VP_REDUCE_RELAY_BASE_SQUELCH_ENABLE; + env_.app().config().COMPRESSION = c.COMPRESSION; }; auto handshake = [&](int outboundEnable, int inboundEnable) { @@ -1523,7 +1663,7 @@ vp_squelched=1 env_.app().config().COMPRESSION, false, env_.app().config().TX_REDUCE_RELAY_ENABLE, - env_.app().config().VP_REDUCE_RELAY_ENABLE); + env_.app().config().VP_REDUCE_RELAY_BASE_SQUELCH_ENABLE); http_request_type http_request; http_request.version(request.version()); http_request.base() = request.base(); @@ -1563,7 +1703,13 @@ vp_squelched=1 Network network_; public: - reduce_relay_test() : env_(*this), network_(env_.app()) + reduce_relay_test() + : env_(*this, jtx::envconfig([](std::unique_ptr cfg) { + cfg->VP_REDUCE_RELAY_BASE_SQUELCH_ENABLE = true; + cfg->VP_REDUCE_RELAY_SQUELCH_MAX_SELECTED_PEERS = 6; + return cfg; + })) + , network_(env_.app()) { } @@ -1582,6 +1728,7 @@ public: testInternalHashRouter(log); testRandomSquelch(log); testHandshake(log); + testBaseSquelchReady(log); } }; diff --git a/src/xrpld/core/Config.h b/src/xrpld/core/Config.h index 4fdce92c8a..a58867958b 100644 --- a/src/xrpld/core/Config.h +++ b/src/xrpld/core/Config.h @@ -242,19 +242,18 @@ public: // size, but we allow admins to explicitly set it in the config. std::optional SWEEP_INTERVAL; - // Reduce-relay - these parameters are experimental. - // Enable reduce-relay features - // Validation/proposal reduce-relay feature - bool VP_REDUCE_RELAY_ENABLE = false; - // Send squelch message to peers. Generally this config should - // have the same value as VP_REDUCE_RELAY_ENABLE. It can be - // used for testing the feature's function without - // affecting the message relaying. To use it for testing, - // set it to false and set VP_REDUCE_RELAY_ENABLE to true. - // Squelch messages will not be sent to the peers in this case. - // Set log level to debug so that the feature function can be - // analyzed. - bool VP_REDUCE_RELAY_SQUELCH = false; + // Reduce-relay - Experimental parameters to control p2p routing algorithms + + // Enable base squelching of duplicate validation/proposal messages + bool VP_REDUCE_RELAY_BASE_SQUELCH_ENABLE = false; + + ///////////////////// !!TEMPORARY CODE BLOCK!! //////////////////////// + // Temporary squelching config for the peers selected as a source of // + // validator messages. The config must be removed once squelching is // + // made the default routing algorithm // + std::size_t VP_REDUCE_RELAY_SQUELCH_MAX_SELECTED_PEERS = 5; + ///////////////// END OF TEMPORARY CODE BLOCK ///////////////////// + // Transaction reduce-relay feature bool TX_REDUCE_RELAY_ENABLE = false; // If tx reduce-relay feature is disabled diff --git a/src/xrpld/core/detail/Config.cpp b/src/xrpld/core/detail/Config.cpp index b132987d08..1a07109b74 100644 --- a/src/xrpld/core/detail/Config.cpp +++ b/src/xrpld/core/detail/Config.cpp @@ -737,8 +737,44 @@ Config::loadFromString(std::string const& fileContents) if (exists(SECTION_REDUCE_RELAY)) { auto sec = section(SECTION_REDUCE_RELAY); - VP_REDUCE_RELAY_ENABLE = sec.value_or("vp_enable", false); - VP_REDUCE_RELAY_SQUELCH = sec.value_or("vp_squelch", false); + + ///////////////////// !!TEMPORARY CODE BLOCK!! //////////////////////// + // vp_enable config option is deprecated by vp_base_squelch_enable // + // This option is kept for backwards compatibility. When squelching // + // is the default algorithm, it must be replaced with: // + // VP_REDUCE_RELAY_BASE_SQUELCH_ENABLE = // + // sec.value_or("vp_base_squelch_enable", true); // + if (sec.exists("vp_base_squelch_enable") && sec.exists("vp_enable")) + Throw( + "Invalid " SECTION_REDUCE_RELAY + " cannot specify both vp_base_squelch_enable and vp_enable " + "options. " + "vp_enable was deprecated and replaced by " + "vp_base_squelch_enable"); + + if (sec.exists("vp_base_squelch_enable")) + VP_REDUCE_RELAY_BASE_SQUELCH_ENABLE = + sec.value_or("vp_base_squelch_enable", false); + else if (sec.exists("vp_enable")) + VP_REDUCE_RELAY_BASE_SQUELCH_ENABLE = + sec.value_or("vp_enable", false); + else + VP_REDUCE_RELAY_BASE_SQUELCH_ENABLE = false; + ///////////////// !!END OF TEMPORARY CODE BLOCK!! ///////////////////// + + ///////////////////// !!TEMPORARY CODE BLOCK!! /////////////////////// + // Temporary squelching config for the peers selected as a source of // + // validator messages. The config must be removed once squelching is // + // made the default routing algorithm. // + VP_REDUCE_RELAY_SQUELCH_MAX_SELECTED_PEERS = + sec.value_or("vp_base_squelch_max_selected_peers", 5); + if (VP_REDUCE_RELAY_SQUELCH_MAX_SELECTED_PEERS < 3) + Throw( + "Invalid " SECTION_REDUCE_RELAY + " vp_base_squelch_max_selected_peers must be " + "greater than or equal to 3"); + ///////////////// !!END OF TEMPORARY CODE BLOCK!! ///////////////////// + TX_REDUCE_RELAY_ENABLE = sec.value_or("tx_enable", false); TX_REDUCE_RELAY_METRICS = sec.value_or("tx_metrics", false); TX_REDUCE_RELAY_MIN_PEERS = sec.value_or("tx_min_peers", 20); @@ -747,9 +783,9 @@ Config::loadFromString(std::string const& fileContents) TX_REDUCE_RELAY_MIN_PEERS < 10) Throw( "Invalid " SECTION_REDUCE_RELAY - ", tx_min_peers must be greater or equal to 10" - ", tx_relay_percentage must be greater or equal to 10 " - "and less or equal to 100"); + ", tx_min_peers must be greater than or equal to 10" + ", tx_relay_percentage must be greater than or equal to 10 " + "and less than or equal to 100"); } if (getSingleSection(secConfig, SECTION_MAX_TRANSACTIONS, strTemp, j_)) diff --git a/src/xrpld/overlay/Slot.h b/src/xrpld/overlay/Slot.h index 6ae3c9a142..0956eb06f7 100644 --- a/src/xrpld/overlay/Slot.h +++ b/src/xrpld/overlay/Slot.h @@ -20,6 +20,7 @@ #ifndef RIPPLE_OVERLAY_SLOT_H_INCLUDED #define RIPPLE_OVERLAY_SLOT_H_INCLUDED +#include #include #include @@ -32,7 +33,6 @@ #include #include -#include #include #include #include @@ -109,16 +109,25 @@ private: using id_t = Peer::id_t; using time_point = typename clock_type::time_point; + // a callback to report ignored squelches + using ignored_squelch_callback = std::function; + /** Constructor * @param journal Journal for logging * @param handler Squelch/Unsquelch implementation + * @param maxSelectedPeers the maximum number of peers to be selected as + * validator message source */ - Slot(SquelchHandler const& handler, beast::Journal journal) + Slot( + SquelchHandler const& handler, + beast::Journal journal, + uint16_t maxSelectedPeers) : reachedThreshold_(0) , lastSelected_(clock_type::now()) , state_(SlotState::Counting) , handler_(handler) , journal_(journal) + , maxSelectedPeers_(maxSelectedPeers) { } @@ -129,7 +138,7 @@ private: * slot's state to Counting. If the number of messages for the peer is > * MIN_MESSAGE_THRESHOLD then add peer to considered peers pool. If the * number of considered peers who reached MAX_MESSAGE_THRESHOLD is - * MAX_SELECTED_PEERS then randomly select MAX_SELECTED_PEERS from + * maxSelectedPeers_ then randomly select maxSelectedPeers_ from * considered peers, and call squelch handler for each peer, which is not * selected and not already in Squelched state. Set the state for those * peers to Squelched and reset the count of all peers. Set slot's state to @@ -139,9 +148,14 @@ private: * @param id Peer id which received the message * @param type Message type (Validation and Propose Set only, * others are ignored, future use) + * @param callback A callback to report ignored squelches */ void - update(PublicKey const& validator, id_t id, protocol::MessageType type); + update( + PublicKey const& validator, + id_t id, + protocol::MessageType type, + ignored_squelch_callback callback); /** Handle peer deletion when a peer disconnects. * If the peer is in Selected state then @@ -223,17 +237,26 @@ private: time_point expire; // squelch expiration time time_point lastMessage; // time last message received }; + std::unordered_map peers_; // peer's data + // pool of peers considered as the source of messages // from validator - peers that reached MIN_MESSAGE_THRESHOLD std::unordered_set considered_; + // number of peers that reached MAX_MESSAGE_THRESHOLD std::uint16_t reachedThreshold_; + // last time peers were selected, used to age the slot typename clock_type::time_point lastSelected_; + SlotState state_; // slot's state SquelchHandler const& handler_; // squelch/unsquelch handler beast::Journal const journal_; // logging + + // the maximum number of peers that should be selected as a validator + // message source + uint16_t const maxSelectedPeers_; }; template @@ -264,7 +287,8 @@ void Slot::update( PublicKey const& validator, id_t id, - protocol::MessageType type) + protocol::MessageType type, + ignored_squelch_callback callback) { using namespace std::chrono; auto now = clock_type::now(); @@ -302,6 +326,10 @@ Slot::update( peer.lastMessage = now; + // report if we received a message from a squelched peer + if (peer.state == PeerState::Squelched) + callback(); + if (state_ != SlotState::Counting || peer.state == PeerState::Squelched) return; @@ -319,17 +347,17 @@ Slot::update( return; } - if (reachedThreshold_ == MAX_SELECTED_PEERS) + if (reachedThreshold_ == maxSelectedPeers_) { - // Randomly select MAX_SELECTED_PEERS peers from considered. + // Randomly select maxSelectedPeers_ peers from considered. // Exclude peers that have been idling > IDLED - // it's possible that deleteIdlePeer() has not been called yet. - // If number of remaining peers != MAX_SELECTED_PEERS + // If number of remaining peers != maxSelectedPeers_ // then reset the Counting state and let deleteIdlePeer() handle // idled peers. std::unordered_set selected; auto const consideredPoolSize = considered_.size(); - while (selected.size() != MAX_SELECTED_PEERS && considered_.size() != 0) + while (selected.size() != maxSelectedPeers_ && considered_.size() != 0) { auto i = considered_.size() == 1 ? 0 : rand_int(considered_.size() - 1); @@ -347,7 +375,7 @@ Slot::update( selected.insert(id); } - if (selected.size() != MAX_SELECTED_PEERS) + if (selected.size() != maxSelectedPeers_) { JLOG(journal_.trace()) << "update: selection failed " << Slice(validator) << " " << id; @@ -364,7 +392,7 @@ Slot::update( << *std::next(s, 1) << " " << *std::next(s, 2); XRPL_ASSERT( - peers_.size() >= MAX_SELECTED_PEERS, + peers_.size() >= maxSelectedPeers_, "ripple::reduce_relay::Slot::update : minimum peers"); // squelch peers which are not selected and @@ -382,7 +410,7 @@ Slot::update( str << k << " "; v.state = PeerState::Squelched; std::chrono::seconds duration = - getSquelchDuration(peers_.size() - MAX_SELECTED_PEERS); + getSquelchDuration(peers_.size() - maxSelectedPeers_); v.expire = now + duration; handler_.squelch(validator, k, duration.count()); } @@ -544,15 +572,41 @@ class Slots final public: /** - * @param app Applicaton reference + * @param logs reference to the logger * @param handler Squelch/unsquelch implementation + * @param config reference to the global config */ - Slots(Logs& logs, SquelchHandler const& handler) - : handler_(handler), logs_(logs), journal_(logs.journal("Slots")) + Slots(Logs& logs, SquelchHandler const& handler, Config const& config) + : handler_(handler) + , logs_(logs) + , journal_(logs.journal("Slots")) + , baseSquelchEnabled_(config.VP_REDUCE_RELAY_BASE_SQUELCH_ENABLE) + , maxSelectedPeers_(config.VP_REDUCE_RELAY_SQUELCH_MAX_SELECTED_PEERS) { } ~Slots() = default; - /** Calls Slot::update of Slot associated with the validator. + + /** Check if base squelching feature is enabled and ready */ + bool + baseSquelchReady() + { + return baseSquelchEnabled_ && reduceRelayReady(); + } + + /** Check if reduce_relay::WAIT_ON_BOOTUP time passed since startup */ + bool + reduceRelayReady() + { + if (!reduceRelayReady_) + reduceRelayReady_ = + reduce_relay::epoch(clock_type::now()) > + reduce_relay::WAIT_ON_BOOTUP; + + return reduceRelayReady_; + } + + /** Calls Slot::update of Slot associated with the validator, with a noop + * callback. * @param key Message's hash * @param validator Validator's public key * @param id Peer's id which received the message @@ -563,7 +617,25 @@ public: uint256 const& key, PublicKey const& validator, id_t id, - protocol::MessageType type); + protocol::MessageType type) + { + updateSlotAndSquelch(key, validator, id, type, []() {}); + } + + /** Calls Slot::update of Slot associated with the validator. + * @param key Message's hash + * @param validator Validator's public key + * @param id Peer's id which received the message + * @param type Received protocol message type + * @param callback A callback to report ignored validations + */ + void + updateSlotAndSquelch( + uint256 const& key, + PublicKey const& validator, + id_t id, + protocol::MessageType type, + typename Slot::ignored_squelch_callback callback); /** Check if peers stopped relaying messages * and if slots stopped receiving messages from the validator. @@ -651,10 +723,16 @@ private: bool addPeerMessage(uint256 const& key, id_t id); + std::atomic_bool reduceRelayReady_{false}; + hash_map> slots_; SquelchHandler const& handler_; // squelch/unsquelch handler Logs& logs_; beast::Journal const journal_; + + bool const baseSquelchEnabled_; + uint16_t const maxSelectedPeers_; + // Maintain aged container of message/peers. This is required // to discard duplicate message from the same peer. A message // is aged after IDLED seconds. A message received IDLED seconds @@ -702,7 +780,8 @@ Slots::updateSlotAndSquelch( uint256 const& key, PublicKey const& validator, id_t id, - protocol::MessageType type) + protocol::MessageType type, + typename Slot::ignored_squelch_callback callback) { if (!addPeerMessage(key, id)) return; @@ -712,15 +791,17 @@ Slots::updateSlotAndSquelch( { JLOG(journal_.trace()) << "updateSlotAndSquelch: new slot " << Slice(validator); - auto it = slots_ - .emplace(std::make_pair( - validator, - Slot(handler_, logs_.journal("Slot")))) - .first; - it->second.update(validator, id, type); + auto it = + slots_ + .emplace(std::make_pair( + validator, + Slot( + handler_, logs_.journal("Slot"), maxSelectedPeers_))) + .first; + it->second.update(validator, id, type, callback); } else - it->second.update(validator, id, type); + it->second.update(validator, id, type, callback); } template diff --git a/src/xrpld/overlay/detail/ConnectAttempt.cpp b/src/xrpld/overlay/detail/ConnectAttempt.cpp index 30763b1357..84fbd36d32 100644 --- a/src/xrpld/overlay/detail/ConnectAttempt.cpp +++ b/src/xrpld/overlay/detail/ConnectAttempt.cpp @@ -209,7 +209,7 @@ ConnectAttempt::onHandshake(error_code ec) app_.config().COMPRESSION, app_.config().LEDGER_REPLAY, app_.config().TX_REDUCE_RELAY_ENABLE, - app_.config().VP_REDUCE_RELAY_ENABLE); + app_.config().VP_REDUCE_RELAY_BASE_SQUELCH_ENABLE); buildHandshake( req_, diff --git a/src/xrpld/overlay/detail/Handshake.cpp b/src/xrpld/overlay/detail/Handshake.cpp index 657d28072f..e3617a1d98 100644 --- a/src/xrpld/overlay/detail/Handshake.cpp +++ b/src/xrpld/overlay/detail/Handshake.cpp @@ -414,7 +414,7 @@ makeResponse( app.config().COMPRESSION, app.config().LEDGER_REPLAY, app.config().TX_REDUCE_RELAY_ENABLE, - app.config().VP_REDUCE_RELAY_ENABLE)); + app.config().VP_REDUCE_RELAY_BASE_SQUELCH_ENABLE)); buildHandshake(resp, sharedValue, networkID, public_ip, remote_ip, app); diff --git a/src/xrpld/overlay/detail/Handshake.h b/src/xrpld/overlay/detail/Handshake.h index 37f138b88b..1cd733ef56 100644 --- a/src/xrpld/overlay/detail/Handshake.h +++ b/src/xrpld/overlay/detail/Handshake.h @@ -139,7 +139,7 @@ makeResponse( // compression feature static constexpr char FEATURE_COMPR[] = "compr"; -// validation/proposal reduce-relay feature +// validation/proposal reduce-relay base squelch feature static constexpr char FEATURE_VPRR[] = "vprr"; // transaction reduce-relay feature static constexpr char FEATURE_TXRR[] = "txrr"; @@ -221,7 +221,7 @@ peerFeatureEnabled( @param txReduceRelayEnabled if true then transaction reduce-relay feature is enabled @param vpReduceRelayEnabled if true then validation/proposal reduce-relay - feature is enabled + base squelch feature is enabled @return X-Protocol-Ctl header value */ std::string @@ -241,8 +241,7 @@ makeFeaturesRequestHeader( @param txReduceRelayEnabled if true then transaction reduce-relay feature is enabled @param vpReduceRelayEnabled if true then validation/proposal reduce-relay - feature is enabled - @param vpReduceRelayEnabled if true then reduce-relay feature is enabled + base squelch feature is enabled @return X-Protocol-Ctl header value */ std::string diff --git a/src/xrpld/overlay/detail/OverlayImpl.cpp b/src/xrpld/overlay/detail/OverlayImpl.cpp index e1ccc2ee84..3cc5b2a024 100644 --- a/src/xrpld/overlay/detail/OverlayImpl.cpp +++ b/src/xrpld/overlay/detail/OverlayImpl.cpp @@ -142,7 +142,7 @@ OverlayImpl::OverlayImpl( , m_resolver(resolver) , next_id_(1) , timer_count_(0) - , slots_(app.logs(), *this) + , slots_(app.logs(), *this, app.config()) , m_stats( std::bind(&OverlayImpl::collect_metrics, this), collector, @@ -1390,8 +1390,7 @@ makeSquelchMessage( void OverlayImpl::unsquelch(PublicKey const& validator, Peer::id_t id) const { - if (auto peer = findPeerByShortID(id); - peer && app_.config().VP_REDUCE_RELAY_SQUELCH) + if (auto peer = findPeerByShortID(id); peer) { // optimize - multiple message with different // validator might be sent to the same peer @@ -1405,8 +1404,7 @@ OverlayImpl::squelch( Peer::id_t id, uint32_t squelchDuration) const { - if (auto peer = findPeerByShortID(id); - peer && app_.config().VP_REDUCE_RELAY_SQUELCH) + if (auto peer = findPeerByShortID(id); peer) { peer->send(makeSquelchMessage(validator, true, squelchDuration)); } @@ -1419,6 +1417,9 @@ OverlayImpl::updateSlotAndSquelch( std::set&& peers, protocol::MessageType type) { + if (!slots_.baseSquelchReady()) + return; + if (!strand_.running_in_this_thread()) return post( strand_, @@ -1427,7 +1428,9 @@ OverlayImpl::updateSlotAndSquelch( }); for (auto id : peers) - slots_.updateSlotAndSquelch(key, validator, id, type); + slots_.updateSlotAndSquelch(key, validator, id, type, [&]() { + reportInboundTraffic(TrafficCount::squelch_ignored, 0); + }); } void @@ -1437,12 +1440,17 @@ OverlayImpl::updateSlotAndSquelch( Peer::id_t peer, protocol::MessageType type) { + if (!slots_.baseSquelchReady()) + return; + if (!strand_.running_in_this_thread()) return post(strand_, [this, key, validator, peer, type]() { updateSlotAndSquelch(key, validator, peer, type); }); - slots_.updateSlotAndSquelch(key, validator, peer, type); + slots_.updateSlotAndSquelch(key, validator, peer, type, [&]() { + reportInboundTraffic(TrafficCount::squelch_ignored, 0); + }); } void diff --git a/src/xrpld/overlay/detail/PeerImp.cpp b/src/xrpld/overlay/detail/PeerImp.cpp index cb3a7a69f5..68894fb234 100644 --- a/src/xrpld/overlay/detail/PeerImp.cpp +++ b/src/xrpld/overlay/detail/PeerImp.cpp @@ -113,20 +113,21 @@ PeerImp::PeerImp( headers_, FEATURE_TXRR, app_.config().TX_REDUCE_RELAY_ENABLE)) - , vpReduceRelayEnabled_(app_.config().VP_REDUCE_RELAY_ENABLE) , ledgerReplayEnabled_(peerFeatureEnabled( headers_, FEATURE_LEDGER_REPLAY, app_.config().LEDGER_REPLAY)) , ledgerReplayMsgHandler_(app, app.getLedgerReplayer()) { - JLOG(journal_.info()) << "compression enabled " - << (compressionEnabled_ == Compressed::On) - << " vp reduce-relay enabled " - << vpReduceRelayEnabled_ - << " tx reduce-relay enabled " - << txReduceRelayEnabled_ << " on " << remote_address_ - << " " << id_; + JLOG(journal_.info()) + << "compression enabled " << (compressionEnabled_ == Compressed::On) + << " vp reduce-relay base squelch enabled " + << peerFeatureEnabled( + headers_, + FEATURE_VPRR, + app_.config().VP_REDUCE_RELAY_BASE_SQUELCH_ENABLE) + << " tx reduce-relay enabled " << txReduceRelayEnabled_ << " on " + << remote_address_ << " " << id_; } PeerImp::~PeerImp() @@ -1733,8 +1734,7 @@ PeerImp::onMessage(std::shared_ptr const& m) { // Count unique messages (Slots has it's own 'HashRouter'), which a peer // receives within IDLED seconds since the message has been relayed. - if (reduceRelayReady() && relayed && - (stopwatch().now() - *relayed) < reduce_relay::IDLED) + if (relayed && (stopwatch().now() - *relayed) < reduce_relay::IDLED) overlay_.updateSlotAndSquelch( suppression, publicKey, id_, protocol::mtPROPOSE_LEDGER); @@ -2381,10 +2381,8 @@ PeerImp::onMessage(std::shared_ptr const& m) { // Count unique messages (Slots has it's own 'HashRouter'), which a // peer receives within IDLED seconds since the message has been - // relayed. Wait WAIT_ON_BOOTUP time to let the server establish - // connections to peers. - if (reduceRelayReady() && relayed && - (stopwatch().now() - *relayed) < reduce_relay::IDLED) + // relayed. + if (relayed && (stopwatch().now() - *relayed) < reduce_relay::IDLED) overlay_.updateSlotAndSquelch( key, val->getSignerPublic(), id_, protocol::mtVALIDATION); @@ -3005,7 +3003,7 @@ PeerImp::checkPropose( // as part of the squelch logic. auto haveMessage = app_.overlay().relay( *packet, peerPos.suppressionID(), peerPos.publicKey()); - if (reduceRelayReady() && !haveMessage.empty()) + if (!haveMessage.empty()) overlay_.updateSlotAndSquelch( peerPos.suppressionID(), peerPos.publicKey(), @@ -3040,7 +3038,7 @@ PeerImp::checkValidation( // as part of the squelch logic. auto haveMessage = overlay_.relay(*packet, key, val->getSignerPublic()); - if (reduceRelayReady() && !haveMessage.empty()) + if (!haveMessage.empty()) { overlay_.updateSlotAndSquelch( key, @@ -3506,16 +3504,6 @@ PeerImp::isHighLatency() const return latency_ >= peerHighLatency; } -bool -PeerImp::reduceRelayReady() -{ - if (!reduceRelayReady_) - reduceRelayReady_ = - reduce_relay::epoch(UptimeClock::now()) > - reduce_relay::WAIT_ON_BOOTUP; - return vpReduceRelayEnabled_ && reduceRelayReady_; -} - void PeerImp::Metrics::add_message(std::uint64_t bytes) { diff --git a/src/xrpld/overlay/detail/PeerImp.h b/src/xrpld/overlay/detail/PeerImp.h index 8fbafa1ee9..ecd3fc7f63 100644 --- a/src/xrpld/overlay/detail/PeerImp.h +++ b/src/xrpld/overlay/detail/PeerImp.h @@ -116,7 +116,6 @@ private: clock_type::time_point const creationTime_; reduce_relay::Squelch squelch_; - inline static std::atomic_bool reduceRelayReady_{false}; // Notes on thread locking: // @@ -190,9 +189,7 @@ private: hash_set txQueue_; // true if tx reduce-relay feature is enabled on the peer. bool txReduceRelayEnabled_ = false; - // true if validation/proposal reduce-relay feature is enabled - // on the peer. - bool vpReduceRelayEnabled_ = false; + bool ledgerReplayEnabled_ = false; LedgerReplayMsgHandler ledgerReplayMsgHandler_; @@ -521,11 +518,6 @@ private: handleHaveTransactions( std::shared_ptr const& m); - // Check if reduce-relay feature is enabled and - // reduce_relay::WAIT_ON_BOOTUP time passed since the start - bool - reduceRelayReady(); - public: //-------------------------------------------------------------------------- // @@ -705,7 +697,6 @@ PeerImp::PeerImp( headers_, FEATURE_TXRR, app_.config().TX_REDUCE_RELAY_ENABLE)) - , vpReduceRelayEnabled_(app_.config().VP_REDUCE_RELAY_ENABLE) , ledgerReplayEnabled_(peerFeatureEnabled( headers_, FEATURE_LEDGER_REPLAY, @@ -714,13 +705,15 @@ PeerImp::PeerImp( { read_buffer_.commit(boost::asio::buffer_copy( read_buffer_.prepare(boost::asio::buffer_size(buffers)), buffers)); - JLOG(journal_.info()) << "compression enabled " - << (compressionEnabled_ == Compressed::On) - << " vp reduce-relay enabled " - << vpReduceRelayEnabled_ - << " tx reduce-relay enabled " - << txReduceRelayEnabled_ << " on " << remote_address_ - << " " << id_; + JLOG(journal_.info()) + << "compression enabled " << (compressionEnabled_ == Compressed::On) + << " vp reduce-relay base squelch enabled " + << peerFeatureEnabled( + headers_, + FEATURE_VPRR, + app_.config().VP_REDUCE_RELAY_BASE_SQUELCH_ENABLE) + << " tx reduce-relay enabled " << txReduceRelayEnabled_ << " on " + << remote_address_ << " " << id_; } template diff --git a/src/xrpld/overlay/detail/TrafficCount.h b/src/xrpld/overlay/detail/TrafficCount.h index e93163683b..8dc02def5f 100644 --- a/src/xrpld/overlay/detail/TrafficCount.h +++ b/src/xrpld/overlay/detail/TrafficCount.h @@ -109,6 +109,8 @@ public: squelch, squelch_suppressed, // egress traffic amount suppressed by squelching + squelch_ignored, // the traffic amount that came from peers ignoring + // squelch messages // TMHaveSet message: get_set, // transaction sets we try to get @@ -262,6 +264,7 @@ public: {validatorlist, "validator_lists"}, {squelch, "squelch"}, {squelch_suppressed, "squelch_suppressed"}, + {squelch_ignored, "squelch_ignored"}, {get_set, "set_get"}, {share_set, "set_share"}, {ld_tsc_get, "ledger_data_Transaction_Set_candidate_get"}, @@ -326,6 +329,7 @@ protected: {validatorlist, {validatorlist}}, {squelch, {squelch}}, {squelch_suppressed, {squelch_suppressed}}, + {squelch_ignored, {squelch_ignored}}, {get_set, {get_set}}, {share_set, {share_set}}, {ld_tsc_get, {ld_tsc_get}}, From 9e1fe9a85e800b11bd82e594eff5198f64c57c3c Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Wed, 28 May 2025 15:28:18 +0100 Subject: [PATCH 038/244] Fix: Improve handling of expired credentials in `VaultDeposit` (#5452) This change returns `tecEXPIRED` from VaultDeposit to allow the Transactor to remove the expired credentials. --- src/test/app/Vault_test.cpp | 96 ++++++++++++++++++++---- src/xrpld/app/misc/CredentialHelpers.cpp | 6 +- src/xrpld/ledger/detail/View.cpp | 19 ++++- 3 files changed, 100 insertions(+), 21 deletions(-) diff --git a/src/test/app/Vault_test.cpp b/src/test/app/Vault_test.cpp index 67cc3812df..5aab737669 100644 --- a/src/test/app/Vault_test.cpp +++ b/src/test/app/Vault_test.cpp @@ -2273,6 +2273,8 @@ class Vault_test : public beast::unit_test::suite env(pay(issuer, owner, asset(500))); env.trust(asset(1000), depositor); env(pay(issuer, depositor, asset(500))); + env.trust(asset(1000), charlie); + env(pay(issuer, charlie, asset(5))); env.close(); auto [tx, keylet] = vault.create( @@ -2362,7 +2364,7 @@ class Vault_test : public beast::unit_test::suite env(credentials::create(depositor, credIssuer1, credType)); env(credentials::accept(depositor, credIssuer1, credType)); env(credentials::create(charlie, credIssuer1, credType)); - env(credentials::accept(charlie, credIssuer1, credType)); + // charlie's credential not accepted env.close(); auto credSle = env.le(credKeylet); BEAST_EXPECT(credSle != nullptr); @@ -2376,7 +2378,7 @@ class Vault_test : public beast::unit_test::suite tx = vault.deposit( {.depositor = charlie, .id = keylet.key, .amount = asset(50)}); - env(tx, ter{tecINSUFFICIENT_FUNDS}); + env(tx, ter{tecNO_AUTH}); env.close(); } @@ -2384,6 +2386,8 @@ class Vault_test : public beast::unit_test::suite testcase("private vault depositor lost authorization"); env(credentials::deleteCred( credIssuer1, depositor, credIssuer1, credType)); + env(credentials::deleteCred( + credIssuer1, charlie, credIssuer1, credType)); env.close(); auto credSle = env.le(credKeylet); BEAST_EXPECT(credSle == nullptr); @@ -2396,18 +2400,84 @@ class Vault_test : public beast::unit_test::suite env.close(); } - { - testcase("private vault depositor new authorization"); - env(credentials::create(depositor, credIssuer2, credType)); - env(credentials::accept(depositor, credIssuer2, credType)); - env.close(); + auto const shares = [&env, keylet = keylet, this]() -> Asset { + auto const vault = env.le(keylet); + BEAST_EXPECT(vault != nullptr); + return MPTIssue(vault->at(sfShareMPTID)); + }(); - auto tx = vault.deposit( - {.depositor = depositor, - .id = keylet.key, - .amount = asset(50)}); - env(tx); - env.close(); + { + testcase("private vault expired authorization"); + uint32_t const closeTime = env.current() + ->info() + .parentCloseTime.time_since_epoch() + .count(); + { + auto tx0 = + credentials::create(depositor, credIssuer2, credType); + tx0[sfExpiration] = closeTime + 20; + env(tx0); + tx0 = credentials::create(charlie, credIssuer2, credType); + tx0[sfExpiration] = closeTime + 20; + env(tx0); + env.close(); + + env(credentials::accept(depositor, credIssuer2, credType)); + env(credentials::accept(charlie, credIssuer2, credType)); + env.close(); + } + + { + auto tx1 = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(50)}); + env(tx1); + env.close(); + + auto const tokenKeylet = keylet::mptoken( + shares.get().getMptID(), depositor.id()); + BEAST_EXPECT(env.le(tokenKeylet) != nullptr); + } + + { + // time advance + env.close(); + env.close(); + env.close(); + + auto const credsKeylet = + credentials::keylet(depositor, credIssuer2, credType); + BEAST_EXPECT(env.le(credsKeylet) != nullptr); + + auto tx2 = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(1)}); + env(tx2, ter{tecEXPIRED}); + env.close(); + + BEAST_EXPECT(env.le(credsKeylet) == nullptr); + } + + { + auto const credsKeylet = + credentials::keylet(charlie, credIssuer2, credType); + BEAST_EXPECT(env.le(credsKeylet) != nullptr); + auto const tokenKeylet = keylet::mptoken( + shares.get().getMptID(), charlie.id()); + BEAST_EXPECT(env.le(tokenKeylet) == nullptr); + + auto tx3 = vault.deposit( + {.depositor = charlie, + .id = keylet.key, + .amount = asset(2)}); + env(tx3, ter{tecEXPIRED}); + + env.close(); + BEAST_EXPECT(env.le(credsKeylet) == nullptr); + BEAST_EXPECT(env.le(tokenKeylet) == nullptr); + } } { diff --git a/src/xrpld/app/misc/CredentialHelpers.cpp b/src/xrpld/app/misc/CredentialHelpers.cpp index 03ad1f9c80..81355f1792 100644 --- a/src/xrpld/app/misc/CredentialHelpers.cpp +++ b/src/xrpld/app/misc/CredentialHelpers.cpp @@ -336,9 +336,7 @@ verifyValidDomain( credentials.push_back(keyletCredential.key); } - // Result intentionally ignored. - [[maybe_unused]] bool _ = credentials::removeExpired(view, credentials, j); - + bool const foundExpired = credentials::removeExpired(view, credentials, j); for (auto const& h : credentials) { auto sleCredential = view.read(keylet::credential(h)); @@ -349,7 +347,7 @@ verifyValidDomain( return tesSUCCESS; } - return tecNO_PERMISSION; + return foundExpired ? tecEXPIRED : tecNO_PERMISSION; } TER diff --git a/src/xrpld/ledger/detail/View.cpp b/src/xrpld/ledger/detail/View.cpp index aa6e2dda8f..e9499a287a 100644 --- a/src/xrpld/ledger/detail/View.cpp +++ b/src/xrpld/ledger/detail/View.cpp @@ -2391,8 +2391,19 @@ enforceMPTokenAuthorization( auto const keylet = keylet::mptoken(mptIssuanceID, account); auto const sleToken = view.read(keylet); // NOTE: might be null auto const maybeDomainID = sleIssuance->at(~sfDomainID); - bool const authorizedByDomain = maybeDomainID.has_value() && - verifyValidDomain(view, account, *maybeDomainID, j) == tesSUCCESS; + bool expired = false; + bool const authorizedByDomain = [&]() -> bool { + // NOTE: defensive here, shuld be checked in preclaim + if (!maybeDomainID.has_value()) + return false; // LCOV_EXCL_LINE + + auto const ter = verifyValidDomain(view, account, *maybeDomainID, j); + if (isTesSuccess(ter)) + return true; + if (ter == tecEXPIRED) + expired = true; + return false; + }(); if (!authorizedByDomain && sleToken == nullptr) { @@ -2403,14 +2414,14 @@ enforceMPTokenAuthorization( // 3. Account has all expired credentials (deleted in verifyValidDomain) // // Either way, return tecNO_AUTH and there is nothing else to do - return tecNO_AUTH; + return expired ? tecEXPIRED : tecNO_AUTH; } else if (!authorizedByDomain && maybeDomainID.has_value()) { // Found an MPToken but the account is not authorized and we expect // it to have been authorized by the domain. This could be because the // credentials used to create the MPToken have expired or been deleted. - return tecNO_AUTH; + return expired ? tecEXPIRED : tecNO_AUTH; } else if (!authorizedByDomain) { From 05105743e94f90100a5edae14d46d8b7e4af8b59 Mon Sep 17 00:00:00 2001 From: Mayukha Vadari Date: Thu, 29 May 2025 12:28:09 -0400 Subject: [PATCH 039/244] chore[tests]: improve env.meta usage (#5457) This commit changes the ledger close in env.meta to be conditional on if it hasn't already been closed (i.e. the current ledger doesn't have any transactions in it). This change will make it a bit easier to use, as it will still work if you close the ledger outside of this usage. Previously, if you accidentally closed the ledger outside of the meta function, it would segfault and it was incredibly difficult to debug. --- src/test/jtx/Env.h | 17 ++++++++++------- src/test/jtx/impl/Env.cpp | 7 ++++++- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/src/test/jtx/Env.h b/src/test/jtx/Env.h index de6b83362d..2b5397b903 100644 --- a/src/test/jtx/Env.h +++ b/src/test/jtx/Env.h @@ -589,13 +589,16 @@ public: } /** Return metadata for the last JTx. - - Effects: - - The open ledger is closed as if by a call - to close(). The metadata for the last - transaction ID, if any, is returned. - */ + * + * NOTE: this has a side effect of closing the open ledger. + * The ledger will only be closed if it includes transactions. + * + * Effects: + * + * The open ledger is closed as if by a call + * to close(). The metadata for the last + * transaction ID, if any, is returned. + */ std::shared_ptr meta(); diff --git a/src/test/jtx/impl/Env.cpp b/src/test/jtx/impl/Env.cpp index ac00d3eed1..e45042e310 100644 --- a/src/test/jtx/impl/Env.cpp +++ b/src/test/jtx/impl/Env.cpp @@ -446,7 +446,12 @@ Env::postconditions( std::shared_ptr Env::meta() { - close(); + if (current()->txCount() != 0) + { + // close the ledger if it has not already been closed + // (metadata is not finalized until the ledger is closed) + close(); + } auto const item = closed()->txRead(txid_); return item.second; } From dacecd24ba7949b6fea4b63186bf6789c5e3f9ff Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Thu, 29 May 2025 21:53:31 +0100 Subject: [PATCH 040/244] Fix unit build error (#5459) This change fixes the issue that there is a `using namespace` statement inside a namespace scope. --- src/test/app/Vault_test.cpp | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/test/app/Vault_test.cpp b/src/test/app/Vault_test.cpp index 5aab737669..ccac0e2819 100644 --- a/src/test/app/Vault_test.cpp +++ b/src/test/app/Vault_test.cpp @@ -53,10 +53,11 @@ namespace ripple { -using namespace test::jtx; - class Vault_test : public beast::unit_test::suite { + using PrettyAsset = ripple::test::jtx::PrettyAsset; + using PrettyAmount = ripple::test::jtx::PrettyAmount; + static auto constexpr negativeAmount = [](PrettyAsset const& asset) -> PrettyAmount { return {STAmount{asset.raw(), 1ul, 0, true, STAmount::unchecked{}}, ""}; @@ -1210,6 +1211,7 @@ class Vault_test : public beast::unit_test::suite testCreateFailMPT() { using namespace test::jtx; + Env env{*this, supported_amendments() | featureSingleAssetVault}; Account issuer{"issuer"}; Account owner{"owner"}; @@ -1231,6 +1233,7 @@ class Vault_test : public beast::unit_test::suite testNonTransferableShares() { using namespace test::jtx; + Env env{*this, supported_amendments() | featureSingleAssetVault}; Account issuer{"issuer"}; Account owner{"owner"}; @@ -1787,6 +1790,8 @@ class Vault_test : public beast::unit_test::suite void testWithIOU() { + using namespace test::jtx; + auto testCase = [&, this]( std::function Date: Fri, 30 May 2025 14:46:08 +0200 Subject: [PATCH 041/244] docs: update example keyserver host in SECURITY.md (#5460) --- SECURITY.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SECURITY.md b/SECURITY.md index 4e845735d4..eb7437d2f9 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -83,7 +83,7 @@ To report a qualifying bug, please send a detailed report to: |Long Key ID | `0xCD49A0AFC57929BE` | |Fingerprint | `24E6 3B02 37E0 FA9C 5E96 8974 CD49 A0AF C579 29BE` | -The full PGP key for this address, which is also available on several key servers (e.g. on [keys.gnupg.net](https://keys.gnupg.net)), is: +The full PGP key for this address, which is also available on several key servers (e.g. on [keyserver.ubuntu.com](https://keyserver.ubuntu.com)), is: ``` -----BEGIN PGP PUBLIC KEY BLOCK----- mQINBFUwGHYBEAC0wpGpBPkd8W1UdQjg9+cEFzeIEJRaoZoeuJD8mofwI5Ejnjdt From 0a34b5c691d3a205146a706b460473b3bfb84205 Mon Sep 17 00:00:00 2001 From: Shawn Xie <35279399+shawnxie999@users.noreply.github.com> Date: Fri, 30 May 2025 13:24:48 -0400 Subject: [PATCH 042/244] Add support for XLS-81 Permissioned DEX (#5404) Modified transactions: - OfferCreate - Payment Modified RPCs: - book_changes - subscribe - book_offers - ripple_path_find - path_find Spec: https://github.com/XRPLF/XRPL-Standards/pull/281 --- include/xrpl/protocol/Book.h | 40 +- include/xrpl/protocol/ErrorCodes.h | 5 +- include/xrpl/protocol/LedgerFormats.h | 1 + include/xrpl/protocol/TxFlags.h | 4 +- include/xrpl/protocol/UintTypes.h | 3 + include/xrpl/protocol/detail/features.macro | 1 + .../xrpl/protocol/detail/ledger_entries.macro | 3 + include/xrpl/protocol/detail/sfields.macro | 2 + .../xrpl/protocol/detail/transactions.macro | 2 + src/libxrpl/protocol/Book.cpp | 2 +- src/libxrpl/protocol/ErrorCodes.cpp | 3 +- src/libxrpl/protocol/Indexes.cpp | 19 +- src/libxrpl/protocol/InnerObjectFormats.cpp | 7 + src/test/app/AMMExtended_test.cpp | 1 + src/test/app/CrossingLimits_test.cpp | 7 +- src/test/app/DeliverMin_test.cpp | 4 +- src/test/app/Discrepancy_test.cpp | 3 +- src/test/app/Flow_test.cpp | 15 +- src/test/app/Freeze_test.cpp | 8 +- src/test/app/Offer_test.cpp | 39 +- src/test/app/Path_test.cpp | 971 ++++++++-- src/test/app/PayStrand_test.cpp | 40 +- src/test/app/PermissionedDEX_test.cpp | 1595 +++++++++++++++++ src/test/app/SetAuth_test.cpp | 3 +- src/test/app/TheoreticalQuality_test.cpp | 1 + src/test/app/TrustAndBalance_test.cpp | 3 +- src/test/jtx.h | 2 + src/test/jtx/domain.h | 45 + src/test/jtx/impl/domain.cpp | 36 + src/test/jtx/impl/paths.cpp | 15 + src/test/jtx/impl/permissioned_dex.cpp | 85 + src/test/jtx/permissioned_dex.h | 51 + src/test/ledger/BookDirs_test.cpp | 18 +- src/test/ledger/Directory_test.cpp | 6 +- src/test/ledger/Invariants_test.cpp | 231 ++- src/test/ledger/PaymentSandbox_test.cpp | 3 +- src/test/protocol/Issue_test.cpp | 489 ++++- src/test/rpc/BookChanges_test.cpp | 53 +- src/test/rpc/Book_test.cpp | 292 ++- src/test/rpc/GatewayBalances_test.cpp | 5 +- src/test/rpc/JSONRPC_test.cpp | 22 + src/test/rpc/NoRipple_test.cpp | 3 +- src/test/rpc/Subscribe_test.cpp | 55 + src/xrpld/app/ledger/OrderBookDB.cpp | 78 +- src/xrpld/app/ledger/OrderBookDB.h | 18 +- src/xrpld/app/misc/PermissionedDEXHelpers.cpp | 88 + src/xrpld/app/misc/PermissionedDEXHelpers.h | 43 + src/xrpld/app/paths/Flow.cpp | 2 + src/xrpld/app/paths/Flow.h | 1 + src/xrpld/app/paths/PathRequest.cpp | 18 + src/xrpld/app/paths/PathRequest.h | 3 + src/xrpld/app/paths/Pathfinder.cpp | 12 +- src/xrpld/app/paths/Pathfinder.h | 2 + src/xrpld/app/paths/RippleCalc.cpp | 3 + src/xrpld/app/paths/RippleCalc.h | 2 + src/xrpld/app/paths/detail/BookStep.cpp | 6 +- src/xrpld/app/paths/detail/PaySteps.cpp | 7 + src/xrpld/app/paths/detail/Steps.h | 7 + src/xrpld/app/tx/detail/AMMCreate.cpp | 2 +- src/xrpld/app/tx/detail/CashCheck.cpp | 1 + src/xrpld/app/tx/detail/CreateOffer.cpp | 141 +- src/xrpld/app/tx/detail/CreateOffer.h | 18 +- src/xrpld/app/tx/detail/InvariantCheck.cpp | 83 + src/xrpld/app/tx/detail/InvariantCheck.h | 26 +- src/xrpld/app/tx/detail/OfferStream.cpp | 14 + src/xrpld/app/tx/detail/Payment.cpp | 17 + src/xrpld/app/tx/detail/XChainBridge.cpp | 1 + src/xrpld/ledger/detail/View.cpp | 22 +- src/xrpld/rpc/BookChanges.h | 27 +- src/xrpld/rpc/detail/TransactionSign.cpp | 18 + src/xrpld/rpc/handlers/BookOffers.cpp | 18 +- src/xrpld/rpc/handlers/Subscribe.cpp | 14 + src/xrpld/rpc/handlers/Unsubscribe.cpp | 14 + 73 files changed, 4591 insertions(+), 308 deletions(-) create mode 100644 src/test/app/PermissionedDEX_test.cpp create mode 100644 src/test/jtx/domain.h create mode 100644 src/test/jtx/impl/domain.cpp create mode 100644 src/test/jtx/impl/permissioned_dex.cpp create mode 100644 src/test/jtx/permissioned_dex.h create mode 100644 src/xrpld/app/misc/PermissionedDEXHelpers.cpp create mode 100644 src/xrpld/app/misc/PermissionedDEXHelpers.h diff --git a/include/xrpl/protocol/Book.h b/include/xrpl/protocol/Book.h index 0fcff0df80..a8b9afacac 100644 --- a/include/xrpl/protocol/Book.h +++ b/include/xrpl/protocol/Book.h @@ -21,6 +21,7 @@ #define RIPPLE_PROTOCOL_BOOK_H_INCLUDED #include +#include #include #include @@ -36,12 +37,17 @@ class Book final : public CountedObject public: Issue in; Issue out; + std::optional domain; Book() { } - Book(Issue const& in_, Issue const& out_) : in(in_), out(out_) + Book( + Issue const& in_, + Issue const& out_, + std::optional const& domain_) + : in(in_), out(out_), domain(domain_) { } }; @@ -61,6 +67,8 @@ hash_append(Hasher& h, Book const& b) { using beast::hash_append; hash_append(h, b.in, b.out); + if (b.domain) + hash_append(h, *(b.domain)); } Book @@ -71,7 +79,8 @@ reversed(Book const& book); [[nodiscard]] inline constexpr bool operator==(Book const& lhs, Book const& rhs) { - return (lhs.in == rhs.in) && (lhs.out == rhs.out); + return (lhs.in == rhs.in) && (lhs.out == rhs.out) && + (lhs.domain == rhs.domain); } /** @} */ @@ -82,7 +91,18 @@ operator<=>(Book const& lhs, Book const& rhs) { if (auto const c{lhs.in <=> rhs.in}; c != 0) return c; - return lhs.out <=> rhs.out; + if (auto const c{lhs.out <=> rhs.out}; c != 0) + return c; + + // Manually compare optionals + if (lhs.domain && rhs.domain) + return *lhs.domain <=> *rhs.domain; // Compare values if both exist + if (!lhs.domain && rhs.domain) + return std::weak_ordering::less; // Empty is considered less + if (lhs.domain && !rhs.domain) + return std::weak_ordering::greater; // Non-empty is greater + + return std::weak_ordering::equivalent; // Both are empty } /** @} */ @@ -126,9 +146,11 @@ template <> struct hash { private: - using hasher = std::hash; + using issue_hasher = std::hash; + using uint256_hasher = ripple::uint256::hasher; - hasher m_hasher; + issue_hasher m_issue_hasher; + uint256_hasher m_uint256_hasher; public: hash() = default; @@ -139,8 +161,12 @@ public: value_type operator()(argument_type const& value) const { - value_type result(m_hasher(value.in)); - boost::hash_combine(result, m_hasher(value.out)); + value_type result(m_issue_hasher(value.in)); + boost::hash_combine(result, m_issue_hasher(value.out)); + + if (value.domain) + boost::hash_combine(result, m_uint256_hasher(*value.domain)); + return result; } }; diff --git a/include/xrpl/protocol/ErrorCodes.h b/include/xrpl/protocol/ErrorCodes.h index 66b4dd178c..9c9319ba42 100644 --- a/include/xrpl/protocol/ErrorCodes.h +++ b/include/xrpl/protocol/ErrorCodes.h @@ -154,7 +154,10 @@ enum error_code_i { // Simulate rpcTX_SIGNED = 96, - rpcLAST = rpcTX_SIGNED // rpcLAST should always equal the last code. + // Pathfinding + rpcDOMAIN_MALFORMED = 97, + + rpcLAST = rpcDOMAIN_MALFORMED // rpcLAST should always equal the last code. }; /** Codes returned in the `warnings` array of certain RPC commands. diff --git a/include/xrpl/protocol/LedgerFormats.h b/include/xrpl/protocol/LedgerFormats.h index 3edd656213..58ebbe69cc 100644 --- a/include/xrpl/protocol/LedgerFormats.h +++ b/include/xrpl/protocol/LedgerFormats.h @@ -152,6 +152,7 @@ enum LedgerSpecificFlags { // ltOFFER lsfPassive = 0x00010000, lsfSell = 0x00020000, // True, offer was placed as a sell. + lsfHybrid = 0x00040000, // True, offer is hybrid. // ltRIPPLE_STATE lsfLowReserve = 0x00010000, // True, if entry counts toward reserve. diff --git a/include/xrpl/protocol/TxFlags.h b/include/xrpl/protocol/TxFlags.h index 31c3ffa205..80f6a78727 100644 --- a/include/xrpl/protocol/TxFlags.h +++ b/include/xrpl/protocol/TxFlags.h @@ -98,9 +98,9 @@ constexpr std::uint32_t tfPassive = 0x00010000; constexpr std::uint32_t tfImmediateOrCancel = 0x00020000; constexpr std::uint32_t tfFillOrKill = 0x00040000; constexpr std::uint32_t tfSell = 0x00080000; - +constexpr std::uint32_t tfHybrid = 0x00100000; constexpr std::uint32_t tfOfferCreateMask = - ~(tfUniversal | tfPassive | tfImmediateOrCancel | tfFillOrKill | tfSell); + ~(tfUniversal | tfPassive | tfImmediateOrCancel | tfFillOrKill | tfSell | tfHybrid); // Payment flags: constexpr std::uint32_t tfNoRippleDirect = 0x00010000; diff --git a/include/xrpl/protocol/UintTypes.h b/include/xrpl/protocol/UintTypes.h index d6cdc9350e..1d6b3e23dc 100644 --- a/include/xrpl/protocol/UintTypes.h +++ b/include/xrpl/protocol/UintTypes.h @@ -63,6 +63,9 @@ using NodeID = base_uint<160, detail::NodeIDTag>; * and a 160-bit account */ using MPTID = base_uint<192>; +/** Domain is a 256-bit hash representing a specific domain. */ +using Domain = base_uint<256>; + /** XRP currency. */ Currency const& xrpCurrency(); diff --git a/include/xrpl/protocol/detail/features.macro b/include/xrpl/protocol/detail/features.macro index e61d3a8005..61828d4758 100644 --- a/include/xrpl/protocol/detail/features.macro +++ b/include/xrpl/protocol/detail/features.macro @@ -32,6 +32,7 @@ // If you add an amendment here, then do not forget to increment `numFeatures` // in include/xrpl/protocol/Feature.h. +XRPL_FEATURE(PermissionedDEX, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(Batch, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(SingleAssetVault, Supported::no, VoteBehavior::DefaultNo) XRPL_FEATURE(PermissionDelegation, Supported::yes, VoteBehavior::DefaultNo) diff --git a/include/xrpl/protocol/detail/ledger_entries.macro b/include/xrpl/protocol/detail/ledger_entries.macro index a902b32026..06fe9d45bd 100644 --- a/include/xrpl/protocol/detail/ledger_entries.macro +++ b/include/xrpl/protocol/detail/ledger_entries.macro @@ -188,6 +188,7 @@ LEDGER_ENTRY(ltDIR_NODE, 0x0064, DirectoryNode, directory, ({ {sfNFTokenID, soeOPTIONAL}, {sfPreviousTxnID, soeOPTIONAL}, {sfPreviousTxnLgrSeq, soeOPTIONAL}, + {sfDomainID, soeOPTIONAL} })) /** The ledger object which lists details about amendments on the network. @@ -249,6 +250,8 @@ LEDGER_ENTRY(ltOFFER, 0x006f, Offer, offer, ({ {sfPreviousTxnID, soeREQUIRED}, {sfPreviousTxnLgrSeq, soeREQUIRED}, {sfExpiration, soeOPTIONAL}, + {sfDomainID, soeOPTIONAL}, + {sfAdditionalBooks, soeOPTIONAL}, })) /** A ledger object which describes a deposit preauthorization. diff --git a/include/xrpl/protocol/detail/sfields.macro b/include/xrpl/protocol/detail/sfields.macro index dbef597ea0..2053ac94bb 100644 --- a/include/xrpl/protocol/detail/sfields.macro +++ b/include/xrpl/protocol/detail/sfields.macro @@ -360,6 +360,7 @@ UNTYPED_SFIELD(sfPriceData, OBJECT, 32) UNTYPED_SFIELD(sfCredential, OBJECT, 33) UNTYPED_SFIELD(sfRawTransaction, OBJECT, 34) UNTYPED_SFIELD(sfBatchSigner, OBJECT, 35) +UNTYPED_SFIELD(sfBook, OBJECT, 36) // array of objects (common) // ARRAY/1 is reserved for end of array @@ -375,6 +376,7 @@ UNTYPED_SFIELD(sfMemos, ARRAY, 9) UNTYPED_SFIELD(sfNFTokens, ARRAY, 10) UNTYPED_SFIELD(sfHooks, ARRAY, 11) UNTYPED_SFIELD(sfVoteSlots, ARRAY, 12) +UNTYPED_SFIELD(sfAdditionalBooks, ARRAY, 13) // array of objects (uncommon) UNTYPED_SFIELD(sfMajorities, ARRAY, 16) diff --git a/include/xrpl/protocol/detail/transactions.macro b/include/xrpl/protocol/detail/transactions.macro index 5d5faae505..6992410e4c 100644 --- a/include/xrpl/protocol/detail/transactions.macro +++ b/include/xrpl/protocol/detail/transactions.macro @@ -38,6 +38,7 @@ TRANSACTION(ttPAYMENT, 0, Payment, Delegation::delegatable, ({ {sfDestinationTag, soeOPTIONAL}, {sfDeliverMin, soeOPTIONAL, soeMPTSupported}, {sfCredentialIDs, soeOPTIONAL}, + {sfDomainID, soeOPTIONAL}, })) /** This transaction type creates an escrow object. */ @@ -93,6 +94,7 @@ TRANSACTION(ttOFFER_CREATE, 7, OfferCreate, Delegation::delegatable, ({ {sfTakerGets, soeREQUIRED}, {sfExpiration, soeOPTIONAL}, {sfOfferSequence, soeOPTIONAL}, + {sfDomainID, soeOPTIONAL}, })) /** This transaction type cancels existing offers to trade one asset for another. */ diff --git a/src/libxrpl/protocol/Book.cpp b/src/libxrpl/protocol/Book.cpp index cfd1fc61dc..2114deab6b 100644 --- a/src/libxrpl/protocol/Book.cpp +++ b/src/libxrpl/protocol/Book.cpp @@ -48,7 +48,7 @@ operator<<(std::ostream& os, Book const& x) Book reversed(Book const& book) { - return Book(book.out, book.in); + return Book(book.out, book.in, book.domain); } } // namespace ripple diff --git a/src/libxrpl/protocol/ErrorCodes.cpp b/src/libxrpl/protocol/ErrorCodes.cpp index b3d1b812b5..3109f51d05 100644 --- a/src/libxrpl/protocol/ErrorCodes.cpp +++ b/src/libxrpl/protocol/ErrorCodes.cpp @@ -116,7 +116,8 @@ constexpr static ErrorInfo unorderedErrorInfos[]{ {rpcUNKNOWN_COMMAND, "unknownCmd", "Unknown method.", 405}, {rpcORACLE_MALFORMED, "oracleMalformed", "Oracle request is malformed.", 400}, {rpcBAD_CREDENTIALS, "badCredentials", "Credentials do not exist, are not accepted, or have expired.", 400}, - {rpcTX_SIGNED, "transactionSigned", "Transaction should not be signed.", 400}}; + {rpcTX_SIGNED, "transactionSigned", "Transaction should not be signed.", 400}, + {rpcDOMAIN_MALFORMED, "domainMalformed", "Domain is malformed.", 400}}; // clang-format on // Sort and validate unorderedErrorInfos at compile time. Should be diff --git a/src/libxrpl/protocol/Indexes.cpp b/src/libxrpl/protocol/Indexes.cpp index 2426092d13..486945992a 100644 --- a/src/libxrpl/protocol/Indexes.cpp +++ b/src/libxrpl/protocol/Indexes.cpp @@ -117,12 +117,19 @@ getBookBase(Book const& book) XRPL_ASSERT( isConsistent(book), "ripple::getBookBase : input is consistent"); - auto const index = indexHash( - LedgerNameSpace::BOOK_DIR, - book.in.currency, - book.out.currency, - book.in.account, - book.out.account); + auto const index = book.domain ? indexHash( + LedgerNameSpace::BOOK_DIR, + book.in.currency, + book.out.currency, + book.in.account, + book.out.account, + *(book.domain)) + : indexHash( + LedgerNameSpace::BOOK_DIR, + book.in.currency, + book.out.currency, + book.in.account, + book.out.account); // Return with quality 0. auto k = keylet::quality({ltDIR_NODE, index}, 0); diff --git a/src/libxrpl/protocol/InnerObjectFormats.cpp b/src/libxrpl/protocol/InnerObjectFormats.cpp index 3f3b1e00c0..2de5e6624e 100644 --- a/src/libxrpl/protocol/InnerObjectFormats.cpp +++ b/src/libxrpl/protocol/InnerObjectFormats.cpp @@ -165,6 +165,13 @@ InnerObjectFormats::InnerObjectFormats() {sfSigningPubKey, soeOPTIONAL}, {sfTxnSignature, soeOPTIONAL}, {sfSigners, soeOPTIONAL}}); + + add(sfBook.jsonName, + sfBook.getCode(), + { + {sfBookDirectory, soeREQUIRED}, + {sfBookNode, soeREQUIRED}, + }); } InnerObjectFormats const& diff --git a/src/test/app/AMMExtended_test.cpp b/src/test/app/AMMExtended_test.cpp index d7caed9601..f9750eaa53 100644 --- a/src/test/app/AMMExtended_test.cpp +++ b/src/test/app/AMMExtended_test.cpp @@ -2156,6 +2156,7 @@ private: OfferCrossing::no, std::nullopt, smax, + std::nullopt, flowJournal); }(); diff --git a/src/test/app/CrossingLimits_test.cpp b/src/test/app/CrossingLimits_test.cpp index 1e19a178c2..cef0b03399 100644 --- a/src/test/app/CrossingLimits_test.cpp +++ b/src/test/app/CrossingLimits_test.cpp @@ -558,8 +558,11 @@ public: using namespace jtx; auto const sa = supported_amendments(); testAll(sa); - testAll(sa - featureFlowSortStrands); - testAll(sa - featureFlowCross - featureFlowSortStrands); + testAll(sa - featurePermissionedDEX); + testAll(sa - featureFlowSortStrands - featurePermissionedDEX); + testAll( + sa - featureFlowCross - featureFlowSortStrands - + featurePermissionedDEX); } }; diff --git a/src/test/app/DeliverMin_test.cpp b/src/test/app/DeliverMin_test.cpp index b079b93680..4ee7c9c72e 100644 --- a/src/test/app/DeliverMin_test.cpp +++ b/src/test/app/DeliverMin_test.cpp @@ -143,7 +143,9 @@ public: { using namespace jtx; auto const sa = supported_amendments(); - test_convert_all_of_an_asset(sa - featureFlowCross); + test_convert_all_of_an_asset( + sa - featureFlowCross - featurePermissionedDEX); + test_convert_all_of_an_asset(sa - featurePermissionedDEX); test_convert_all_of_an_asset(sa); } }; diff --git a/src/test/app/Discrepancy_test.cpp b/src/test/app/Discrepancy_test.cpp index 8e306282a7..bc72b2fd16 100644 --- a/src/test/app/Discrepancy_test.cpp +++ b/src/test/app/Discrepancy_test.cpp @@ -147,7 +147,8 @@ public: { using namespace test::jtx; auto const sa = supported_amendments(); - testXRPDiscrepancy(sa - featureFlowCross); + testXRPDiscrepancy(sa - featureFlowCross - featurePermissionedDEX); + testXRPDiscrepancy(sa - featurePermissionedDEX); testXRPDiscrepancy(sa); } }; diff --git a/src/test/app/Flow_test.cpp b/src/test/app/Flow_test.cpp index ae65432ac7..d0b8686db6 100644 --- a/src/test/app/Flow_test.cpp +++ b/src/test/app/Flow_test.cpp @@ -494,6 +494,7 @@ struct Flow_test : public beast::unit_test::suite OfferCrossing::no, std::nullopt, smax, + std::nullopt, flowJournal); }(); @@ -1475,7 +1476,8 @@ struct Flow_test : public beast::unit_test::suite using namespace jtx; auto const sa = supported_amendments(); - testWithFeats(sa - featureFlowCross); + testWithFeats(sa - featureFlowCross - featurePermissionedDEX); + testWithFeats(sa - featurePermissionedDEX); testWithFeats(sa); testEmptyStrand(sa); } @@ -1490,13 +1492,16 @@ struct Flow_manual_test : public Flow_test auto const all = supported_amendments(); FeatureBitset const flowCross{featureFlowCross}; FeatureBitset const f1513{fix1513}; + FeatureBitset const permDex{featurePermissionedDEX}; - testWithFeats(all - flowCross - f1513); - testWithFeats(all - flowCross); - testWithFeats(all - f1513); + testWithFeats(all - flowCross - f1513 - permDex); + testWithFeats(all - flowCross - permDex); + testWithFeats(all - f1513 - permDex); + testWithFeats(all - permDex); testWithFeats(all); - testEmptyStrand(all - f1513); + testEmptyStrand(all - f1513 - permDex); + testEmptyStrand(all - permDex); testEmptyStrand(all); } }; diff --git a/src/test/app/Freeze_test.cpp b/src/test/app/Freeze_test.cpp index 36578cbc6b..b28e794688 100644 --- a/src/test/app/Freeze_test.cpp +++ b/src/test/app/Freeze_test.cpp @@ -2020,9 +2020,11 @@ public: }; using namespace test::jtx; auto const sa = supported_amendments(); - testAll(sa - featureFlowCross - featureDeepFreeze); - testAll(sa - featureFlowCross); - testAll(sa - featureDeepFreeze); + testAll( + sa - featureFlowCross - featureDeepFreeze - featurePermissionedDEX); + testAll(sa - featureFlowCross - featurePermissionedDEX); + testAll(sa - featureDeepFreeze - featurePermissionedDEX); + testAll(sa - featurePermissionedDEX); testAll(sa); } }; diff --git a/src/test/app/Offer_test.cpp b/src/test/app/Offer_test.cpp index 4da8d8101e..0891b27df8 100644 --- a/src/test/app/Offer_test.cpp +++ b/src/test/app/Offer_test.cpp @@ -5419,13 +5419,16 @@ public: static FeatureBitset const immediateOfferKilled{ featureImmediateOfferKilled}; FeatureBitset const fillOrKill{fixFillOrKill}; + FeatureBitset const permDEX{featurePermissionedDEX}; - static std::array const feats{ - all - takerDryOffer - immediateOfferKilled, - all - flowCross - takerDryOffer - immediateOfferKilled, - all - flowCross - immediateOfferKilled, - all - rmSmallIncreasedQOffers - immediateOfferKilled - fillOrKill, - all - fillOrKill, + static std::array const feats{ + all - takerDryOffer - immediateOfferKilled - permDEX, + all - flowCross - takerDryOffer - immediateOfferKilled - permDEX, + all - flowCross - immediateOfferKilled - permDEX, + all - rmSmallIncreasedQOffers - immediateOfferKilled - fillOrKill - + permDEX, + all - fillOrKill - permDEX, + all - permDEX, all}; if (BEAST_EXPECT(instance < feats.size())) @@ -5479,12 +5482,21 @@ class OfferWOFillOrKill_test : public OfferBaseUtil_test } }; +class OfferWOPermDEX_test : public OfferBaseUtil_test +{ + void + run() override + { + OfferBaseUtil_test::run(5); + } +}; + class OfferAllFeatures_test : public OfferBaseUtil_test { void run() override { - OfferBaseUtil_test::run(5, true); + OfferBaseUtil_test::run(6, true); } }; @@ -5500,14 +5512,16 @@ class Offer_manual_test : public OfferBaseUtil_test FeatureBitset const immediateOfferKilled{featureImmediateOfferKilled}; FeatureBitset const takerDryOffer{fixTakerDryOfferRemoval}; FeatureBitset const fillOrKill{fixFillOrKill}; + FeatureBitset const permDEX{featurePermissionedDEX}; - testAll(all - flowCross - f1513 - immediateOfferKilled); - testAll(all - flowCross - immediateOfferKilled); - testAll(all - immediateOfferKilled - fillOrKill); - testAll(all - fillOrKill); + testAll(all - flowCross - f1513 - immediateOfferKilled - permDEX); + testAll(all - flowCross - immediateOfferKilled - permDEX); + testAll(all - immediateOfferKilled - fillOrKill - permDEX); + testAll(all - fillOrKill - permDEX); + testAll(all - permDEX); testAll(all); - testAll(all - flowCross - takerDryOffer); + testAll(all - flowCross - takerDryOffer - permDEX); } }; @@ -5516,6 +5530,7 @@ BEAST_DEFINE_TESTSUITE_PRIO(OfferWOFlowCross, tx, ripple, 2); BEAST_DEFINE_TESTSUITE_PRIO(OfferWTakerDryOffer, tx, ripple, 2); BEAST_DEFINE_TESTSUITE_PRIO(OfferWOSmallQOffers, tx, ripple, 2); BEAST_DEFINE_TESTSUITE_PRIO(OfferWOFillOrKill, tx, ripple, 2); +BEAST_DEFINE_TESTSUITE_PRIO(OfferWOPermDEX, tx, ripple, 2); BEAST_DEFINE_TESTSUITE_PRIO(OfferAllFeatures, tx, ripple, 2); BEAST_DEFINE_TESTSUITE_MANUAL_PRIO(Offer_manual, tx, ripple, 20); diff --git a/src/test/app/Path_test.cpp b/src/test/app/Path_test.cpp index f325b0d2be..6ff22a5dc7 100644 --- a/src/test/app/Path_test.cpp +++ b/src/test/app/Path_test.cpp @@ -18,11 +18,12 @@ //============================================================================== #include +#include +#include #include +#include -#include #include -#include #include #include #include @@ -34,7 +35,12 @@ #include #include +#include +#include #include +#include +#include +#include namespace ripple { namespace test { @@ -126,7 +132,8 @@ public: jtx::Account const& dst, STAmount const& saDstAmount, std::optional const& saSendMax = std::nullopt, - std::optional const& saSrcCurrency = std::nullopt) + std::optional const& saSrcCurrency = std::nullopt, + std::optional const& domain = std::nullopt) { using namespace jtx; @@ -163,6 +170,8 @@ public: j[jss::currency] = to_string(saSrcCurrency.value()); sc.append(j); } + if (domain) + params[jss::domain] = to_string(*domain); Json::Value result; gate g; @@ -187,10 +196,11 @@ public: jtx::Account const& dst, STAmount const& saDstAmount, std::optional const& saSendMax = std::nullopt, - std::optional const& saSrcCurrency = std::nullopt) + std::optional const& saSrcCurrency = std::nullopt, + std::optional const& domain = std::nullopt) { Json::Value result = find_paths_request( - env, src, dst, saDstAmount, saSendMax, saSrcCurrency); + env, src, dst, saDstAmount, saSendMax, saSrcCurrency, domain); BEAST_EXPECT(!result.isMember(jss::error)); STAmount da; @@ -363,9 +373,11 @@ public: } void - path_find() + path_find(bool const domainEnabled) { - testcase("path find"); + testcase( + std::string("path find") + (domainEnabled ? " w/ " : " w/o ") + + "domain"); using namespace jtx; Env env = pathTestEnv(); auto const gw = Account("gateway"); @@ -377,31 +389,50 @@ public: env(pay(gw, "alice", USD(70))); env(pay(gw, "bob", USD(50))); + std::optional domainID; + if (domainEnabled) + domainID = setupDomain(env, {"alice", "bob", gw}); + STPathSet st; STAmount sa; - std::tie(st, sa, std::ignore) = - find_paths(env, "alice", "bob", Account("bob")["USD"](5)); + std::tie(st, sa, std::ignore) = find_paths( + env, + "alice", + "bob", + Account("bob")["USD"](5), + std::nullopt, + std::nullopt, + domainID); BEAST_EXPECT(same(st, stpath("gateway"))); BEAST_EXPECT(equal(sa, Account("alice")["USD"](5))); } void - xrp_to_xrp() + xrp_to_xrp(bool const domainEnabled) { using namespace jtx; - testcase("XRP to XRP"); + testcase( + std::string("XRP to XRP") + (domainEnabled ? " w/ " : " w/o ") + + "domain"); Env env = pathTestEnv(); env.fund(XRP(10000), "alice", "bob"); env.close(); - auto const result = find_paths(env, "alice", "bob", XRP(5)); + std::optional domainID; + if (domainEnabled) + domainID = setupDomain(env, {"alice", "bob"}); + + auto const result = find_paths( + env, "alice", "bob", XRP(5), std::nullopt, std::nullopt, domainID); BEAST_EXPECT(std::get<0>(result).empty()); } void - path_find_consume_all() + path_find_consume_all(bool const domainEnabled) { - testcase("path find consume all"); + testcase( + std::string("path find consume all") + + (domainEnabled ? " w/ " : " w/o ") + "domain"); using namespace jtx; { @@ -414,11 +445,22 @@ public: env.trust(Account("alice")["USD"](100), "dan"); env.trust(Account("dan")["USD"](100), "edward"); + std::optional domainID; + if (domainEnabled) + domainID = setupDomain( + env, {"alice", "bob", "carol", "dan", "edward"}); + STPathSet st; STAmount sa; STAmount da; std::tie(st, sa, da) = find_paths( - env, "alice", "edward", Account("edward")["USD"](-1)); + env, + "alice", + "edward", + Account("edward")["USD"](-1), + std::nullopt, + std::nullopt, + domainID); BEAST_EXPECT(same(st, stpath("dan"), stpath("bob", "carol"))); BEAST_EXPECT(equal(sa, Account("alice")["USD"](110))); BEAST_EXPECT(equal(da, Account("edward")["USD"](110))); @@ -431,8 +473,22 @@ public: env.fund(XRP(10000), "alice", "bob", "carol", gw); env.close(); env.trust(USD(100), "bob", "carol"); + env.close(); env(pay(gw, "carol", USD(100))); - env(offer("carol", XRP(100), USD(100))); + env.close(); + + std::optional domainID; + if (domainEnabled) + { + domainID = + setupDomain(env, {"alice", "bob", "carol", "gateway"}); + env(offer("carol", XRP(100), USD(100)), domain(*domainID)); + } + else + { + env(offer("carol", XRP(100), USD(100))); + } + env.close(); STPathSet st; STAmount sa; @@ -442,23 +498,44 @@ public: "alice", "bob", Account("bob")["AUD"](-1), - std::optional(XRP(100000000))); + std::optional(XRP(1000000)), + std::nullopt, + domainID); BEAST_EXPECT(st.empty()); std::tie(st, sa, da) = find_paths( env, "alice", "bob", Account("bob")["USD"](-1), - std::optional(XRP(100000000))); + std::optional(XRP(1000000)), + std::nullopt, + domainID); BEAST_EXPECT(sa == XRP(100)); BEAST_EXPECT(equal(da, Account("bob")["USD"](100))); + + // if domain is used, finding path in the open offerbook will return + // empty result + if (domainEnabled) + { + std::tie(st, sa, da) = find_paths( + env, + "alice", + "bob", + Account("bob")["USD"](-1), + std::optional(XRP(1000000)), + std::nullopt, + std::nullopt); // not specifying a domain + BEAST_EXPECT(st.empty()); + } } } void - alternative_path_consume_both() + alternative_path_consume_both(bool const domainEnabled) { - testcase("alternative path consume both"); + testcase( + std::string("alternative path consume both") + + (domainEnabled ? " w/ " : " w/o ") + "domain"); using namespace jtx; Env env = pathTestEnv(); auto const gw = Account("gateway"); @@ -471,10 +548,26 @@ public: env.trust(gw2_USD(800), "alice"); env.trust(USD(700), "bob"); env.trust(gw2_USD(900), "bob"); - env(pay(gw, "alice", USD(70))); - env(pay(gw2, "alice", gw2_USD(70))); - env(pay("alice", "bob", Account("bob")["USD"](140)), - paths(Account("alice")["USD"])); + + std::optional domainID; + if (domainEnabled) + { + domainID = + setupDomain(env, {"alice", "bob", "gateway", "gateway2"}); + env(pay(gw, "alice", USD(70)), domain(*domainID)); + env(pay(gw2, "alice", gw2_USD(70)), domain(*domainID)); + env(pay("alice", "bob", Account("bob")["USD"](140)), + paths(Account("alice")["USD"]), + domain(*domainID)); + } + else + { + env(pay(gw, "alice", USD(70))); + env(pay(gw2, "alice", gw2_USD(70))); + env(pay("alice", "bob", Account("bob")["USD"](140)), + paths(Account("alice")["USD"])); + } + env.require(balance("alice", USD(0))); env.require(balance("alice", gw2_USD(0))); env.require(balance("bob", USD(70))); @@ -486,9 +579,11 @@ public: } void - alternative_paths_consume_best_transfer() + alternative_paths_consume_best_transfer(bool const domainEnabled) { - testcase("alternative paths consume best transfer"); + testcase( + std::string("alternative paths consume best transfer") + + (domainEnabled ? " w/ " : " w/o ") + "domain"); using namespace jtx; Env env = pathTestEnv(); auto const gw = Account("gateway"); @@ -502,9 +597,22 @@ public: env.trust(gw2_USD(800), "alice"); env.trust(USD(700), "bob"); env.trust(gw2_USD(900), "bob"); - env(pay(gw, "alice", USD(70))); - env(pay(gw2, "alice", gw2_USD(70))); - env(pay("alice", "bob", USD(70))); + + std::optional domainID; + if (domainEnabled) + { + domainID = + setupDomain(env, {"alice", "bob", "gateway", "gateway2"}); + env(pay(gw, "alice", USD(70)), domain(*domainID)); + env(pay(gw2, "alice", gw2_USD(70)), domain(*domainID)); + env(pay("alice", "bob", USD(70)), domain(*domainID)); + } + else + { + env(pay(gw, "alice", USD(70))); + env(pay(gw2, "alice", gw2_USD(70))); + env(pay("alice", "bob", USD(70))); + } env.require(balance("alice", USD(0))); env.require(balance("alice", gw2_USD(70))); env.require(balance("bob", USD(70))); @@ -548,9 +656,13 @@ public: } void - alternative_paths_limit_returned_paths_to_best_quality() + alternative_paths_limit_returned_paths_to_best_quality( + bool const domainEnabled) { - testcase("alternative paths - limit returned paths to best quality"); + testcase( + std::string( + "alternative paths - limit returned paths to best quality") + + (domainEnabled ? " w/ " : " w/o ") + "domain"); using namespace jtx; Env env = pathTestEnv(); auto const gw = Account("gateway"); @@ -566,14 +678,31 @@ public: env.trust(gw2_USD(800), "alice", "bob"); env.trust(Account("alice")["USD"](800), "dan"); env.trust(Account("bob")["USD"](800), "dan"); + env.close(); env(pay(gw2, "alice", gw2_USD(100))); + env.close(); env(pay("carol", "alice", Account("carol")["USD"](100))); + env.close(); env(pay(gw, "alice", USD(100))); + env.close(); + + std::optional domainID; + if (domainEnabled) + { + domainID = + setupDomain(env, {"alice", "bob", "carol", "dan", gw, gw2}); + } STPathSet st; STAmount sa; - std::tie(st, sa, std::ignore) = - find_paths(env, "alice", "bob", Account("bob")["USD"](5)); + std::tie(st, sa, std::ignore) = find_paths( + env, + "alice", + "bob", + Account("bob")["USD"](5), + std::nullopt, + std::nullopt, + domainID); BEAST_EXPECT(same( st, stpath("gateway"), @@ -584,9 +713,11 @@ public: } void - issues_path_negative_issue() + issues_path_negative_issue(bool const domainEnabled) { - testcase("path negative: Issue #5"); + testcase( + std::string("path negative: Issue #5") + + (domainEnabled ? " w/ " : " w/o ") + "domain"); using namespace jtx; Env env = pathTestEnv(); env.fund(XRP(10000), "alice", "bob", "carol", "dan"); @@ -597,14 +728,35 @@ public: env(pay("bob", "carol", Account("bob")["USD"](75))); env.require(balance("bob", Account("carol")["USD"](-75))); env.require(balance("carol", Account("bob")["USD"](75))); + env.close(); - auto result = - find_paths(env, "alice", "bob", Account("bob")["USD"](25)); + std::optional domainID; + if (domainEnabled) + { + domainID = setupDomain(env, {"alice", "bob", "carol", "dan"}); + } + + auto result = find_paths( + env, + "alice", + "bob", + Account("bob")["USD"](25), + std::nullopt, + std::nullopt, + domainID); BEAST_EXPECT(std::get<0>(result).empty()); env(pay("alice", "bob", Account("alice")["USD"](25)), ter(tecPATH_DRY)); + env.close(); - result = find_paths(env, "alice", "bob", Account("alice")["USD"](25)); + result = find_paths( + env, + "alice", + "bob", + Account("alice")["USD"](25), + std::nullopt, + std::nullopt, + domainID); BEAST_EXPECT(std::get<0>(result).empty()); env.require(balance("alice", Account("bob")["USD"](0))); @@ -671,9 +823,11 @@ public: // bob will hold gateway AUD // alice pays bob gateway AUD using XRP void - via_offers_via_gateway() + via_offers_via_gateway(bool const domainEnabled) { - testcase("via gateway"); + testcase( + std::string("via gateway") + (domainEnabled ? " w/ " : " w/o ") + + "domain"); using namespace jtx; Env env = pathTestEnv(); auto const gw = Account("gateway"); @@ -681,15 +835,43 @@ public: env.fund(XRP(10000), "alice", "bob", "carol", gw); env.close(); env(rate(gw, 1.1)); + env.close(); env.trust(AUD(100), "bob", "carol"); + env.close(); env(pay(gw, "carol", AUD(50))); - env(offer("carol", XRP(50), AUD(50))); - env(pay("alice", "bob", AUD(10)), sendmax(XRP(100)), paths(XRP)); + env.close(); + + std::optional domainID; + if (domainEnabled) + { + domainID = setupDomain(env, {"alice", "bob", "carol", gw}); + env(offer("carol", XRP(50), AUD(50)), domain(*domainID)); + env.close(); + env(pay("alice", "bob", AUD(10)), + sendmax(XRP(100)), + paths(XRP), + domain(*domainID)); + env.close(); + } + else + { + env(offer("carol", XRP(50), AUD(50))); + env.close(); + env(pay("alice", "bob", AUD(10)), sendmax(XRP(100)), paths(XRP)); + env.close(); + } + env.require(balance("bob", AUD(10))); env.require(balance("carol", AUD(39))); - auto const result = - find_paths(env, "alice", "bob", Account("bob")["USD"](25)); + auto const result = find_paths( + env, + "alice", + "bob", + Account("bob")["USD"](25), + std::nullopt, + std::nullopt, + domainID); BEAST_EXPECT(std::get<0>(result).empty()); } @@ -865,9 +1047,11 @@ public: } void - path_find_01() + path_find_01(bool const domainEnabled) { - testcase("Path Find: XRP -> XRP and XRP -> IOU"); + testcase( + std::string("Path Find: XRP -> XRP and XRP -> IOU") + + (domainEnabled ? " w/ " : " w/o ") + "domain"); using namespace jtx; Env env = pathTestEnv(); Account A1{"A1"}; @@ -899,16 +1083,28 @@ public: env(pay(G3, M1, G3["ABC"](25000))); env.close(); - env(offer(M1, G1["XYZ"](1000), G2["XYZ"](1000))); - env(offer(M1, XRP(10000), G3["ABC"](1000))); + std::optional domainID; + if (domainEnabled) + { + domainID = setupDomain(env, {A1, A2, A3, G1, G2, G3, M1}); + env(offer(M1, G1["XYZ"](1000), G2["XYZ"](1000)), domain(*domainID)); + env(offer(M1, XRP(10000), G3["ABC"](1000)), domain(*domainID)); + env.close(); + } + else + { + env(offer(M1, G1["XYZ"](1000), G2["XYZ"](1000))); + env(offer(M1, XRP(10000), G3["ABC"](1000))); + env.close(); + } STPathSet st; STAmount sa, da; { auto const& send_amt = XRP(10); - std::tie(st, sa, da) = - find_paths(env, A1, A2, send_amt, std::nullopt, xrpCurrency()); + std::tie(st, sa, da) = find_paths( + env, A1, A2, send_amt, std::nullopt, xrpCurrency(), domainID); BEAST_EXPECT(equal(da, send_amt)); BEAST_EXPECT(st.empty()); } @@ -918,15 +1114,21 @@ public: // does not exist. auto const& send_amt = XRP(200); std::tie(st, sa, da) = find_paths( - env, A1, Account{"A0"}, send_amt, std::nullopt, xrpCurrency()); + env, + A1, + Account{"A0"}, + send_amt, + std::nullopt, + xrpCurrency(), + domainID); BEAST_EXPECT(equal(da, send_amt)); BEAST_EXPECT(st.empty()); } { auto const& send_amt = G3["ABC"](10); - std::tie(st, sa, da) = - find_paths(env, A2, G3, send_amt, std::nullopt, xrpCurrency()); + std::tie(st, sa, da) = find_paths( + env, A2, G3, send_amt, std::nullopt, xrpCurrency(), domainID); BEAST_EXPECT(equal(da, send_amt)); BEAST_EXPECT(equal(sa, XRP(100))); BEAST_EXPECT(same(st, stpath(IPE(G3["ABC"])))); @@ -934,8 +1136,8 @@ public: { auto const& send_amt = A2["ABC"](1); - std::tie(st, sa, da) = - find_paths(env, A1, A2, send_amt, std::nullopt, xrpCurrency()); + std::tie(st, sa, da) = find_paths( + env, A1, A2, send_amt, std::nullopt, xrpCurrency(), domainID); BEAST_EXPECT(equal(da, send_amt)); BEAST_EXPECT(equal(sa, XRP(10))); BEAST_EXPECT(same(st, stpath(IPE(G3["ABC"]), G3))); @@ -943,8 +1145,8 @@ public: { auto const& send_amt = A3["ABC"](1); - std::tie(st, sa, da) = - find_paths(env, A1, A3, send_amt, std::nullopt, xrpCurrency()); + std::tie(st, sa, da) = find_paths( + env, A1, A3, send_amt, std::nullopt, xrpCurrency(), domainID); BEAST_EXPECT(equal(da, send_amt)); BEAST_EXPECT(equal(sa, XRP(10))); BEAST_EXPECT(same(st, stpath(IPE(G3["ABC"]), G3, A2))); @@ -952,9 +1154,11 @@ public: } void - path_find_02() + path_find_02(bool const domainEnabled) { - testcase("Path Find: non-XRP -> XRP"); + testcase( + std::string("Path Find: non-XRP -> XRP") + + (domainEnabled ? " w/ " : " w/o ") + "domain"); using namespace jtx; Env env = pathTestEnv(); Account A1{"A1"}; @@ -975,23 +1179,53 @@ public: env(pay(G3, M1, G3["ABC"](1200))); env.close(); - env(offer(M1, G3["ABC"](1000), XRP(10000))); + std::optional domainID; + if (domainEnabled) + { + domainID = setupDomain(env, {A1, A2, G3, M1}); + env(offer(M1, G3["ABC"](1000), XRP(10000)), domain(*domainID)); + } + else + { + env(offer(M1, G3["ABC"](1000), XRP(10000))); + } STPathSet st; STAmount sa, da; - auto const& send_amt = XRP(10); - std::tie(st, sa, da) = - find_paths(env, A1, A2, send_amt, std::nullopt, A2["ABC"].currency); - BEAST_EXPECT(equal(da, send_amt)); - BEAST_EXPECT(equal(sa, A1["ABC"](1))); - BEAST_EXPECT(same(st, stpath(G3, IPE(xrpIssue())))); + + { + std::tie(st, sa, da) = find_paths( + env, + A1, + A2, + send_amt, + std::nullopt, + A2["ABC"].currency, + domainID); + BEAST_EXPECT(equal(da, send_amt)); + BEAST_EXPECT(equal(sa, A1["ABC"](1))); + BEAST_EXPECT(same(st, stpath(G3, IPE(xrpIssue())))); + } + + // domain offer will not be considered in pathfinding for non-domain + // paths + if (domainEnabled) + { + std::tie(st, sa, da) = find_paths( + env, A1, A2, send_amt, std::nullopt, A2["ABC"].currency); + BEAST_EXPECT(equal(da, send_amt)); + BEAST_EXPECT(st.empty()); + } } void - path_find_04() + path_find_04(bool const domainEnabled) { - testcase("Path Find: Bitstamp and SnapSwap, liquidity with no offers"); + testcase( + std::string( + "Path Find: Bitstamp and SnapSwap, liquidity with no offers") + + (domainEnabled ? " w/ " : " w/o ") + "domain"); using namespace jtx; Env env = pathTestEnv(); Account A1{"A1"}; @@ -1019,13 +1253,23 @@ public: env(pay(G2SW, M1, G2SW["HKD"](5000))); env.close(); + std::optional domainID; + if (domainEnabled) + domainID = setupDomain(env, {A1, A2, G1BS, G2SW, M1}); + STPathSet st; STAmount sa, da; { auto const& send_amt = A2["HKD"](10); std::tie(st, sa, da) = find_paths( - env, A1, A2, send_amt, std::nullopt, A2["HKD"].currency); + env, + A1, + A2, + send_amt, + std::nullopt, + A2["HKD"].currency, + domainID); BEAST_EXPECT(equal(da, send_amt)); BEAST_EXPECT(equal(sa, A1["HKD"](10))); BEAST_EXPECT(same(st, stpath(G1BS, M1, G2SW))); @@ -1034,7 +1278,13 @@ public: { auto const& send_amt = A1["HKD"](10); std::tie(st, sa, da) = find_paths( - env, A2, A1, send_amt, std::nullopt, A1["HKD"].currency); + env, + A2, + A1, + send_amt, + std::nullopt, + A1["HKD"].currency, + domainID); BEAST_EXPECT(equal(da, send_amt)); BEAST_EXPECT(equal(sa, A2["HKD"](10))); BEAST_EXPECT(same(st, stpath(G2SW, M1, G1BS))); @@ -1043,7 +1293,13 @@ public: { auto const& send_amt = A2["HKD"](10); std::tie(st, sa, da) = find_paths( - env, G1BS, A2, send_amt, std::nullopt, A1["HKD"].currency); + env, + G1BS, + A2, + send_amt, + std::nullopt, + A1["HKD"].currency, + domainID); BEAST_EXPECT(equal(da, send_amt)); BEAST_EXPECT(equal(sa, G1BS["HKD"](10))); BEAST_EXPECT(same(st, stpath(M1, G2SW))); @@ -1052,7 +1308,13 @@ public: { auto const& send_amt = M1["HKD"](10); std::tie(st, sa, da) = find_paths( - env, M1, G1BS, send_amt, std::nullopt, A1["HKD"].currency); + env, + M1, + G1BS, + send_amt, + std::nullopt, + A1["HKD"].currency, + domainID); BEAST_EXPECT(equal(da, send_amt)); BEAST_EXPECT(equal(sa, M1["HKD"](10))); BEAST_EXPECT(st.empty()); @@ -1061,7 +1323,13 @@ public: { auto const& send_amt = A1["HKD"](10); std::tie(st, sa, da) = find_paths( - env, G2SW, A1, send_amt, std::nullopt, A1["HKD"].currency); + env, + G2SW, + A1, + send_amt, + std::nullopt, + A1["HKD"].currency, + domainID); BEAST_EXPECT(equal(da, send_amt)); BEAST_EXPECT(equal(sa, G2SW["HKD"](10))); BEAST_EXPECT(same(st, stpath(M1, G1BS))); @@ -1069,9 +1337,11 @@ public: } void - path_find_05() + path_find_05(bool const domainEnabled) { - testcase("Path Find: non-XRP -> non-XRP, same currency"); + testcase( + std::string("Path Find: non-XRP -> non-XRP, same currency") + + (domainEnabled ? " w/ " : " w/o ") + "domain"); using namespace jtx; Env env = pathTestEnv(); Account A1{"A1"}; @@ -1108,9 +1378,21 @@ public: env(pay(G2, M2, G2["HKD"](5000))); env.close(); - env(offer(M1, G1["HKD"](1000), G2["HKD"](1000))); - env(offer(M2, XRP(10000), G2["HKD"](1000))); - env(offer(M2, G1["HKD"](1000), XRP(10000))); + std::optional domainID; + if (domainEnabled) + { + domainID = + setupDomain(env, {A1, A2, A3, A4, G1, G2, G3, G4, M1, M2}); + env(offer(M1, G1["HKD"](1000), G2["HKD"](1000)), domain(*domainID)); + env(offer(M2, XRP(10000), G2["HKD"](1000)), domain(*domainID)); + env(offer(M2, G1["HKD"](1000), XRP(10000)), domain(*domainID)); + } + else + { + env(offer(M1, G1["HKD"](1000), G2["HKD"](1000))); + env(offer(M2, XRP(10000), G2["HKD"](1000))); + env(offer(M2, G1["HKD"](1000), XRP(10000))); + } STPathSet st; STAmount sa, da; @@ -1120,7 +1402,13 @@ public: // Source -> Destination (repay source issuer) auto const& send_amt = G1["HKD"](10); std::tie(st, sa, da) = find_paths( - env, A1, G1, send_amt, std::nullopt, G1["HKD"].currency); + env, + A1, + G1, + send_amt, + std::nullopt, + G1["HKD"].currency, + domainID); BEAST_EXPECT(st.empty()); BEAST_EXPECT(equal(da, send_amt)); BEAST_EXPECT(equal(sa, A1["HKD"](10))); @@ -1131,7 +1419,13 @@ public: // Source -> Destination (repay destination issuer) auto const& send_amt = A1["HKD"](10); std::tie(st, sa, da) = find_paths( - env, A1, G1, send_amt, std::nullopt, G1["HKD"].currency); + env, + A1, + G1, + send_amt, + std::nullopt, + G1["HKD"].currency, + domainID); BEAST_EXPECT(st.empty()); BEAST_EXPECT(equal(da, send_amt)); BEAST_EXPECT(equal(sa, A1["HKD"](10))); @@ -1142,7 +1436,13 @@ public: // Source -> AC -> Destination auto const& send_amt = A3["HKD"](10); std::tie(st, sa, da) = find_paths( - env, A1, A3, send_amt, std::nullopt, G1["HKD"].currency); + env, + A1, + A3, + send_amt, + std::nullopt, + G1["HKD"].currency, + domainID); BEAST_EXPECT(equal(da, send_amt)); BEAST_EXPECT(equal(sa, A1["HKD"](10))); BEAST_EXPECT(same(st, stpath(G1))); @@ -1153,7 +1453,13 @@ public: // Source -> OB -> Destination auto const& send_amt = G2["HKD"](10); std::tie(st, sa, da) = find_paths( - env, G1, G2, send_amt, std::nullopt, G1["HKD"].currency); + env, + G1, + G2, + send_amt, + std::nullopt, + G1["HKD"].currency, + domainID); BEAST_EXPECT(equal(da, send_amt)); BEAST_EXPECT(equal(sa, G1["HKD"](10))); BEAST_EXPECT(same( @@ -1169,7 +1475,13 @@ public: // Source -> AC -> OB -> Destination auto const& send_amt = G2["HKD"](10); std::tie(st, sa, da) = find_paths( - env, A1, G2, send_amt, std::nullopt, G1["HKD"].currency); + env, + A1, + G2, + send_amt, + std::nullopt, + G1["HKD"].currency, + domainID); BEAST_EXPECT(equal(da, send_amt)); BEAST_EXPECT(equal(sa, A1["HKD"](10))); BEAST_EXPECT(same( @@ -1182,10 +1494,17 @@ public: { // I4) XRP bridge" -- - // Source -> AC -> OB to XRP -> OB from XRP -> AC -> Destination + // Source -> AC -> OB to XRP -> OB from XRP -> AC -> + // Destination auto const& send_amt = A2["HKD"](10); std::tie(st, sa, da) = find_paths( - env, A1, A2, send_amt, std::nullopt, G1["HKD"].currency); + env, + A1, + A2, + send_amt, + std::nullopt, + G1["HKD"].currency, + domainID); BEAST_EXPECT(equal(da, send_amt)); BEAST_EXPECT(equal(sa, A1["HKD"](10))); BEAST_EXPECT(same( @@ -1198,9 +1517,11 @@ public: } void - path_find_06() + path_find_06(bool const domainEnabled) { - testcase("Path Find: non-XRP -> non-XRP, same currency)"); + testcase( + std::string("Path Find: non-XRP -> non-XRP, same currency)") + + (domainEnabled ? " w/ " : " w/o ") + "domain"); using namespace jtx; Env env = pathTestEnv(); Account A1{"A1"}; @@ -1227,24 +1548,36 @@ public: env(pay(G2, M1, G2["HKD"](5000))); env.close(); - env(offer(M1, G1["HKD"](1000), G2["HKD"](1000))); + std::optional domainID; + if (domainEnabled) + { + domainID = setupDomain(env, {A1, A2, A3, G1, G2, M1}); + env(offer(M1, G1["HKD"](1000), G2["HKD"](1000)), domain(*domainID)); + } + else + { + env(offer(M1, G1["HKD"](1000), G2["HKD"](1000))); + } // E) Gateway to user // Source -> OB -> AC -> Destination auto const& send_amt = A2["HKD"](10); STPathSet st; STAmount sa, da; - std::tie(st, sa, da) = - find_paths(env, G1, A2, send_amt, std::nullopt, G1["HKD"].currency); + std::tie(st, sa, da) = find_paths( + env, G1, A2, send_amt, std::nullopt, G1["HKD"].currency, domainID); BEAST_EXPECT(equal(da, send_amt)); BEAST_EXPECT(equal(sa, G1["HKD"](10))); BEAST_EXPECT(same(st, stpath(M1, G2), stpath(IPE(G2["HKD"]), G2))); } void - receive_max() + receive_max(bool const domainEnabled) { - testcase("Receive max"); + testcase( + std::string("Receive max") + (domainEnabled ? " w/ " : " w/o ") + + "domain"); + using namespace jtx; auto const alice = Account("alice"); auto const bob = Account("bob"); @@ -1260,10 +1593,28 @@ public: env.close(); env(pay(gw, charlie, USD(10))); env.close(); - env(offer(charlie, XRP(10), USD(10))); - env.close(); - auto [st, sa, da] = - find_paths(env, alice, bob, USD(-1), XRP(100).value()); + + std::optional domainID; + if (domainEnabled) + { + domainID = setupDomain(env, {alice, bob, charlie, gw}); + env(offer(charlie, XRP(10), USD(10)), domain(*domainID)); + env.close(); + } + else + { + env(offer(charlie, XRP(10), USD(10))); + env.close(); + } + + auto [st, sa, da] = find_paths( + env, + alice, + bob, + USD(-1), + XRP(100).value(), + std::nullopt, + domainID); BEAST_EXPECT(sa == XRP(10)); BEAST_EXPECT(equal(da, USD(10))); if (BEAST_EXPECT(st.size() == 1 && st[0].size() == 1)) @@ -1283,10 +1634,28 @@ public: env.close(); env(pay(gw, alice, USD(10))); env.close(); - env(offer(charlie, USD(10), XRP(10))); - env.close(); - auto [st, sa, da] = - find_paths(env, alice, bob, drops(-1), USD(100).value()); + + std::optional domainID; + if (domainEnabled) + { + domainID = setupDomain(env, {alice, bob, charlie, gw}); + env(offer(charlie, USD(10), XRP(10)), domain(*domainID)); + env.close(); + } + else + { + env(offer(charlie, USD(10), XRP(10))); + env.close(); + } + + auto [st, sa, da] = find_paths( + env, + alice, + bob, + drops(-1), + USD(100).value(), + std::nullopt, + domainID); BEAST_EXPECT(sa == USD(10)); BEAST_EXPECT(equal(da, XRP(10))); if (BEAST_EXPECT(st.size() == 1 && st[0].size() == 1)) @@ -1363,6 +1732,360 @@ public: test("no ripple -> no ripple", false, false, false); } + void + hybrid_offer_path() + { + testcase("Hybrid offer path"); + using namespace jtx; + + // test cases copied from path_find_05 and ensures path results for + // different combinations of open/domain/hybrid offers. `func` is a + // lambda param that creates different types of offers + auto testPathfind = [&](auto func, bool const domainEnabled = false) { + Env env = pathTestEnv(); + Account A1{"A1"}; + Account A2{"A2"}; + Account A3{"A3"}; + Account A4{"A4"}; + Account G1{"G1"}; + Account G2{"G2"}; + Account G3{"G3"}; + Account G4{"G4"}; + Account M1{"M1"}; + Account M2{"M2"}; + + env.fund(XRP(1000), A1, A2, A3, G1, G2, G3, G4); + env.fund(XRP(10000), A4); + env.fund(XRP(11000), M1, M2); + env.close(); + + env.trust(G1["HKD"](2000), A1); + env.trust(G2["HKD"](2000), A2); + env.trust(G1["HKD"](2000), A3); + env.trust(G1["HKD"](100000), M1); + env.trust(G2["HKD"](100000), M1); + env.trust(G1["HKD"](100000), M2); + env.trust(G2["HKD"](100000), M2); + env.close(); + + env(pay(G1, A1, G1["HKD"](1000))); + env(pay(G2, A2, G2["HKD"](1000))); + env(pay(G1, A3, G1["HKD"](1000))); + env(pay(G1, M1, G1["HKD"](1200))); + env(pay(G2, M1, G2["HKD"](5000))); + env(pay(G1, M2, G1["HKD"](1200))); + env(pay(G2, M2, G2["HKD"](5000))); + env.close(); + + std::optional domainID = + setupDomain(env, {A1, A2, A3, A4, G1, G2, G3, G4, M1, M2}); + BEAST_EXPECT(domainID); + + func(env, M1, M2, G1, G2, *domainID); + + STPathSet st; + STAmount sa, da; + + { + // A) Borrow or repay -- + // Source -> Destination (repay source issuer) + auto const& send_amt = G1["HKD"](10); + std::tie(st, sa, da) = find_paths( + env, + A1, + G1, + send_amt, + std::nullopt, + G1["HKD"].currency, + domainEnabled ? domainID : std::nullopt); + BEAST_EXPECT(st.empty()); + BEAST_EXPECT(equal(da, send_amt)); + BEAST_EXPECT(equal(sa, A1["HKD"](10))); + } + + { + // A2) Borrow or repay -- + // Source -> Destination (repay destination issuer) + auto const& send_amt = A1["HKD"](10); + std::tie(st, sa, da) = find_paths( + env, + A1, + G1, + send_amt, + std::nullopt, + G1["HKD"].currency, + domainEnabled ? domainID : std::nullopt); + BEAST_EXPECT(st.empty()); + BEAST_EXPECT(equal(da, send_amt)); + BEAST_EXPECT(equal(sa, A1["HKD"](10))); + } + + { + // B) Common gateway -- + // Source -> AC -> Destination + auto const& send_amt = A3["HKD"](10); + std::tie(st, sa, da) = find_paths( + env, + A1, + A3, + send_amt, + std::nullopt, + G1["HKD"].currency, + domainEnabled ? domainID : std::nullopt); + BEAST_EXPECT(equal(da, send_amt)); + BEAST_EXPECT(equal(sa, A1["HKD"](10))); + BEAST_EXPECT(same(st, stpath(G1))); + } + + { + // C) Gateway to gateway -- + // Source -> OB -> Destination + auto const& send_amt = G2["HKD"](10); + std::tie(st, sa, da) = find_paths( + env, + G1, + G2, + send_amt, + std::nullopt, + G1["HKD"].currency, + domainEnabled ? domainID : std::nullopt); + BEAST_EXPECT(equal(da, send_amt)); + BEAST_EXPECT(equal(sa, G1["HKD"](10))); + BEAST_EXPECT(same( + st, + stpath(IPE(G2["HKD"])), + stpath(M1), + stpath(M2), + stpath(IPE(xrpIssue()), IPE(G2["HKD"])))); + } + + { + // D) User to unlinked gateway via order book -- + // Source -> AC -> OB -> Destination + auto const& send_amt = G2["HKD"](10); + std::tie(st, sa, da) = find_paths( + env, + A1, + G2, + send_amt, + std::nullopt, + G1["HKD"].currency, + domainEnabled ? domainID : std::nullopt); + BEAST_EXPECT(equal(da, send_amt)); + BEAST_EXPECT(equal(sa, A1["HKD"](10))); + BEAST_EXPECT(same( + st, + stpath(G1, M1), + stpath(G1, M2), + stpath(G1, IPE(G2["HKD"])), + stpath(G1, IPE(xrpIssue()), IPE(G2["HKD"])))); + } + + { + // I4) XRP bridge" -- + // Source -> AC -> OB to XRP -> OB from XRP -> AC -> + // Destination + auto const& send_amt = A2["HKD"](10); + std::tie(st, sa, da) = find_paths( + env, + A1, + A2, + send_amt, + std::nullopt, + G1["HKD"].currency, + domainEnabled ? domainID : std::nullopt); + BEAST_EXPECT(equal(da, send_amt)); + BEAST_EXPECT(equal(sa, A1["HKD"](10))); + BEAST_EXPECT(same( + st, + stpath(G1, M1, G2), + stpath(G1, M2, G2), + stpath(G1, IPE(G2["HKD"]), G2), + stpath(G1, IPE(xrpIssue()), IPE(G2["HKD"]), G2))); + } + }; + + // the following tests exercise different combinations of open/hybrid + // offers to make sure that hybrid offers work in pathfinding for open + // order book + { + testPathfind([](Env& env, + Account M1, + Account M2, + Account G1, + Account G2, + uint256 domainID) { + env(offer(M1, G1["HKD"](1000), G2["HKD"](1000)), + domain(domainID), + txflags(tfHybrid)); + env(offer(M2, XRP(10000), G2["HKD"](1000))); + env(offer(M2, G1["HKD"](1000), XRP(10000))); + }); + + testPathfind([](Env& env, + Account M1, + Account M2, + Account G1, + Account G2, + uint256 domainID) { + env(offer(M1, G1["HKD"](1000), G2["HKD"](1000)), + domain(domainID), + txflags(tfHybrid)); + env(offer(M2, XRP(10000), G2["HKD"](1000)), + domain(domainID), + txflags(tfHybrid)); + env(offer(M2, G1["HKD"](1000), XRP(10000))); + }); + + testPathfind([](Env& env, + Account M1, + Account M2, + Account G1, + Account G2, + uint256 domainID) { + env(offer(M1, G1["HKD"](1000), G2["HKD"](1000)), + domain(domainID), + txflags(tfHybrid)); + env(offer(M2, XRP(10000), G2["HKD"](1000)), + domain(domainID), + txflags(tfHybrid)); + env(offer(M2, G1["HKD"](1000), XRP(10000)), + domain(domainID), + txflags(tfHybrid)); + }); + + testPathfind([](Env& env, + Account M1, + Account M2, + Account G1, + Account G2, + uint256 domainID) { + env(offer(M1, G1["HKD"](1000), G2["HKD"](1000))); + env(offer(M2, XRP(10000), G2["HKD"](1000))); + env(offer(M2, G1["HKD"](1000), XRP(10000)), + domain(domainID), + txflags(tfHybrid)); + }); + + testPathfind([](Env& env, + Account M1, + Account M2, + Account G1, + Account G2, + uint256 domainID) { + env(offer(M1, G1["HKD"](1000), G2["HKD"](1000))); + env(offer(M2, XRP(10000), G2["HKD"](1000)), + domain(domainID), + txflags(tfHybrid)); + env(offer(M2, G1["HKD"](1000), XRP(10000)), + domain(domainID), + txflags(tfHybrid)); + }); + } + + // the following tests exercise different combinations of domain/hybrid + // offers to make sure that hybrid offers work in pathfinding for domain + // order book + { + testPathfind( + [](Env& env, + Account M1, + Account M2, + Account G1, + Account G2, + uint256 domainID) { + env(offer(M1, G1["HKD"](1000), G2["HKD"](1000)), + domain(domainID), + txflags(tfHybrid)); + env(offer(M2, XRP(10000), G2["HKD"](1000)), + domain(domainID)); + env(offer(M2, G1["HKD"](1000), XRP(10000)), + domain(domainID)); + }, + true); + + testPathfind( + [](Env& env, + Account M1, + Account M2, + Account G1, + Account G2, + uint256 domainID) { + env(offer(M1, G1["HKD"](1000), G2["HKD"](1000)), + domain(domainID), + txflags(tfHybrid)); + env(offer(M2, XRP(10000), G2["HKD"](1000)), + domain(domainID), + txflags(tfHybrid)); + env(offer(M2, G1["HKD"](1000), XRP(10000)), + domain(domainID)); + }, + true); + + testPathfind( + [](Env& env, + Account M1, + Account M2, + Account G1, + Account G2, + uint256 domainID) { + env(offer(M1, G1["HKD"](1000), G2["HKD"](1000)), + domain(domainID)); + env(offer(M2, XRP(10000), G2["HKD"](1000)), + domain(domainID)); + env(offer(M2, G1["HKD"](1000), XRP(10000)), + domain(domainID), + txflags(tfHybrid)); + }, + true); + + testPathfind( + [](Env& env, + Account M1, + Account M2, + Account G1, + Account G2, + uint256 domainID) { + env(offer(M1, G1["HKD"](1000), G2["HKD"](1000)), + domain(domainID)); + env(offer(M2, XRP(10000), G2["HKD"](1000)), + domain(domainID), + txflags(tfHybrid)); + env(offer(M2, G1["HKD"](1000), XRP(10000)), + domain(domainID), + txflags(tfHybrid)); + }, + true); + } + } + + void + amm_domain_path() + { + testcase("AMM not used in domain path"); + using namespace jtx; + Env env = pathTestEnv(); + PermissionedDEX permDex(env); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + permDex; + AMM amm(env, alice, XRP(10), USD(50)); + + STPathSet st; + STAmount sa, da; + + auto const& send_amt = XRP(1); + + // doing pathfind with domain won't include amm + std::tie(st, sa, da) = find_paths( + env, bob, carol, send_amt, std::nullopt, USD.currency, domainID); + BEAST_EXPECT(st.empty()); + + // a non-domain pathfind returns amm in the path + std::tie(st, sa, da) = + find_paths(env, bob, carol, send_amt, std::nullopt, USD.currency); + BEAST_EXPECT(same(st, stpath(gw, IPE(xrpIssue())))); + } + void run() override { @@ -1370,35 +2093,43 @@ public: no_direct_path_no_intermediary_no_alternatives(); direct_path_no_intermediary(); payment_auto_path_find(); - path_find(); - path_find_consume_all(); - alternative_path_consume_both(); - alternative_paths_consume_best_transfer(); + indirect_paths_path_find(); alternative_paths_consume_best_transfer_first(); - alternative_paths_limit_returned_paths_to_best_quality(); - issues_path_negative_issue(); issues_path_negative_ripple_client_issue_23_smaller(); issues_path_negative_ripple_client_issue_23_larger(); - via_offers_via_gateway(); - indirect_paths_path_find(); quality_paths_quality_set_and_test(); trust_auto_clear_trust_normal_clear(); trust_auto_clear_trust_auto_clear(); - xrp_to_xrp(); - receive_max(); noripple_combinations(); - // The following path_find_NN tests are data driven tests - // that were originally implemented in js/coffee and migrated - // here. The quantities and currencies used are taken directly from - // those legacy tests, which in some cases probably represented - // customer use cases. + for (bool const domainEnabled : {false, true}) + { + path_find(domainEnabled); + path_find_consume_all(domainEnabled); + alternative_path_consume_both(domainEnabled); + alternative_paths_consume_best_transfer(domainEnabled); + alternative_paths_limit_returned_paths_to_best_quality( + domainEnabled); + issues_path_negative_issue(domainEnabled); + via_offers_via_gateway(domainEnabled); + xrp_to_xrp(domainEnabled); + receive_max(domainEnabled); - path_find_01(); - path_find_02(); - path_find_04(); - path_find_05(); - path_find_06(); + // The following path_find_NN tests are data driven tests + // that were originally implemented in js/coffee and migrated + // here. The quantities and currencies used are taken directly from + // those legacy tests, which in some cases probably represented + // customer use cases. + + path_find_01(domainEnabled); + path_find_02(domainEnabled); + path_find_04(domainEnabled); + path_find_05(domainEnabled); + path_find_06(domainEnabled); + } + + hybrid_offer_path(); + amm_domain_path(); } }; diff --git a/src/test/app/PayStrand_test.cpp b/src/test/app/PayStrand_test.cpp index 4d743d9d7c..9188da62ac 100644 --- a/src/test/app/PayStrand_test.cpp +++ b/src/test/app/PayStrand_test.cpp @@ -27,6 +27,9 @@ #include #include #include +#include + +#include namespace ripple { namespace test { @@ -656,6 +659,7 @@ struct PayStrand_test : public beast::unit_test::suite true, OfferCrossing::no, ammContext, + std::nullopt, env.app().logs().journal("Flow")); BEAST_EXPECT(ter == expTer); if (sizeof...(expSteps) != 0) @@ -684,6 +688,7 @@ struct PayStrand_test : public beast::unit_test::suite true, OfferCrossing::no, ammContext, + std::nullopt, env.app().logs().journal("Flow")); (void)_; BEAST_EXPECT(ter == tesSUCCESS); @@ -701,6 +706,7 @@ struct PayStrand_test : public beast::unit_test::suite true, OfferCrossing::no, ammContext, + std::nullopt, env.app().logs().journal("Flow")); (void)_; BEAST_EXPECT(ter == tesSUCCESS); @@ -738,7 +744,7 @@ struct PayStrand_test : public beast::unit_test::suite STPath(), tesSUCCESS, D{alice, gw, usdC}, - B{USD, EUR}, + B{USD, EUR, std::nullopt}, D{gw, bob, eurC}); // Path with explicit offer @@ -749,7 +755,7 @@ struct PayStrand_test : public beast::unit_test::suite STPath({ipe(EUR)}), tesSUCCESS, D{alice, gw, usdC}, - B{USD, EUR}, + B{USD, EUR, std::nullopt}, D{gw, bob, eurC}); // Path with offer that changes issuer only @@ -761,7 +767,7 @@ struct PayStrand_test : public beast::unit_test::suite STPath({iape(carol)}), tesSUCCESS, D{alice, gw, usdC}, - B{USD, carol["USD"]}, + B{USD, carol["USD"], std::nullopt}, D{carol, bob, usdC}); // Path with XRP src currency @@ -772,7 +778,7 @@ struct PayStrand_test : public beast::unit_test::suite STPath({ipe(USD)}), tesSUCCESS, XRPS{alice}, - B{XRP, USD}, + B{XRP, USD, std::nullopt}, D{gw, bob, usdC}); // Path with XRP dst currency. @@ -787,7 +793,7 @@ struct PayStrand_test : public beast::unit_test::suite xrpAccount()}}), tesSUCCESS, D{alice, gw, usdC}, - B{USD, XRP}, + B{USD, XRP, std::nullopt}, XRPS{bob}); // Path with XRP cross currency bridged payment @@ -798,8 +804,8 @@ struct PayStrand_test : public beast::unit_test::suite STPath({cpe(xrpCurrency())}), tesSUCCESS, D{alice, gw, usdC}, - B{USD, XRP}, - B{XRP, EUR}, + B{USD, XRP, std::nullopt}, + B{XRP, EUR, std::nullopt}, D{gw, bob, eurC}); // XRP -> XRP transaction can't include a path @@ -821,6 +827,7 @@ struct PayStrand_test : public beast::unit_test::suite true, OfferCrossing::no, ammContext, + std::nullopt, flowJournal); BEAST_EXPECT(r.first == temBAD_PATH); } @@ -837,6 +844,7 @@ struct PayStrand_test : public beast::unit_test::suite true, OfferCrossing::no, ammContext, + std::nullopt, flowJournal); BEAST_EXPECT(r.first == temBAD_PATH); } @@ -853,6 +861,7 @@ struct PayStrand_test : public beast::unit_test::suite true, OfferCrossing::no, ammContext, + std::nullopt, flowJournal); BEAST_EXPECT(r.first == temBAD_PATH); } @@ -990,6 +999,7 @@ struct PayStrand_test : public beast::unit_test::suite true, OfferCrossing::no, ammContext, + std::nullopt, env.app().logs().journal("Flow")); BEAST_EXPECT(ter == tesSUCCESS); BEAST_EXPECT(equal(strand, D{alice, gw, usdC})); @@ -1017,12 +1027,13 @@ struct PayStrand_test : public beast::unit_test::suite false, OfferCrossing::no, ammContext, + std::nullopt, env.app().logs().journal("Flow")); BEAST_EXPECT(ter == tesSUCCESS); BEAST_EXPECT(equal( strand, D{alice, gw, usdC}, - B{USD.issue(), xrpIssue()}, + B{USD.issue(), xrpIssue(), std::nullopt}, XRPS{bob})); } } @@ -1201,6 +1212,7 @@ struct PayStrand_test : public beast::unit_test::suite dstAcc, noAccount(), pathSet, + std::nullopt, env.app().logs(), &inputs); BEAST_EXPECT(r.result() == temBAD_PATH); @@ -1213,6 +1225,7 @@ struct PayStrand_test : public beast::unit_test::suite noAccount(), srcAcc, pathSet, + std::nullopt, env.app().logs(), &inputs); BEAST_EXPECT(r.result() == temBAD_PATH); @@ -1225,6 +1238,7 @@ struct PayStrand_test : public beast::unit_test::suite dstAcc, srcAcc, pathSet, + std::nullopt, env.app().logs(), &inputs); BEAST_EXPECT(r.result() == temBAD_PATH); @@ -1237,6 +1251,7 @@ struct PayStrand_test : public beast::unit_test::suite dstAcc, srcAcc, pathSet, + std::nullopt, env.app().logs(), &inputs); BEAST_EXPECT(r.result() == temBAD_PATH); @@ -1253,13 +1268,16 @@ struct PayStrand_test : public beast::unit_test::suite { using namespace jtx; auto const sa = supported_amendments(); - testToStrand(sa - featureFlowCross); + testToStrand(sa - featureFlowCross - featurePermissionedDEX); + testToStrand(sa - featurePermissionedDEX); testToStrand(sa); - testRIPD1373(sa - featureFlowCross); + testRIPD1373(sa - featureFlowCross - featurePermissionedDEX); + testRIPD1373(sa - featurePermissionedDEX); testRIPD1373(sa); - testLoop(sa - featureFlowCross); + testLoop(sa - featureFlowCross - featurePermissionedDEX); + testLoop(sa - featurePermissionedDEX); testLoop(sa); testNoAccount(sa); diff --git a/src/test/app/PermissionedDEX_test.cpp b/src/test/app/PermissionedDEX_test.cpp new file mode 100644 index 0000000000..693381debf --- /dev/null +++ b/src/test/app/PermissionedDEX_test.cpp @@ -0,0 +1,1595 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { +namespace test { + +using namespace jtx; + +class PermissionedDEX_test : public beast::unit_test::suite +{ + [[nodiscard]] bool + offerExists(Env const& env, Account const& account, std::uint32_t offerSeq) + { + return static_cast(env.le(keylet::offer(account.id(), offerSeq))); + } + + [[nodiscard]] bool + checkOffer( + Env const& env, + Account const& account, + std::uint32_t offerSeq, + STAmount const& takerPays, + STAmount const& takerGets, + uint32_t const flags = 0, + bool const domainOffer = false) + { + auto offerInDir = [&](uint256 const& directory, + uint64_t const pageIndex, + std::optional domain = + std::nullopt) -> bool { + auto const page = env.le(keylet::page(directory, pageIndex)); + if (!page) + return false; + + if (domain != (*page)[~sfDomainID]) + return false; + + auto const& indexes = page->getFieldV256(sfIndexes); + for (auto const& index : indexes) + { + if (index == keylet::offer(account, offerSeq).key) + return true; + } + + return false; + }; + + auto const sle = env.le(keylet::offer(account.id(), offerSeq)); + if (!sle) + return false; + if (sle->getFieldAmount(sfTakerGets) != takerGets) + return false; + if (sle->getFieldAmount(sfTakerPays) != takerPays) + return false; + if (sle->getFlags() != flags) + return false; + if (domainOffer && !sle->isFieldPresent(sfDomainID)) + return false; + if (!domainOffer && sle->isFieldPresent(sfDomainID)) + return false; + if (!offerInDir( + sle->getFieldH256(sfBookDirectory), + sle->getFieldU64(sfBookNode), + (*sle)[~sfDomainID])) + return false; + + if (sle->isFlag(lsfHybrid)) + { + if (!sle->isFieldPresent(sfDomainID)) + return false; + if (!sle->isFieldPresent(sfAdditionalBooks)) + return false; + if (sle->getFieldArray(sfAdditionalBooks).size() != 1) + return false; + + auto const& additionalBookDirs = + sle->getFieldArray(sfAdditionalBooks); + + for (auto const& bookDir : additionalBookDirs) + { + auto const& dirIndex = bookDir.getFieldH256(sfBookDirectory); + auto const& dirNode = bookDir.getFieldU64(sfBookNode); + + // the directory is for the open order book, so the dir + // doesn't have domainID + if (!offerInDir(dirIndex, dirNode, std::nullopt)) + return false; + } + } + else + { + if (sle->isFieldPresent(sfAdditionalBooks)) + return false; + } + + return true; + } + + uint256 + getBookDirKey( + Book const& book, + STAmount const& takerPays, + STAmount const& takerGets) + { + return keylet::quality( + keylet::book(book), getRate(takerGets, takerPays)) + .key; + } + + std::optional + getDefaultOfferDirKey( + Env const& env, + Account const& account, + std::uint32_t offerSeq) + { + if (auto const sle = env.le(keylet::offer(account.id(), offerSeq))) + return Keylet(ltDIR_NODE, (*sle)[sfBookDirectory]).key; + + return {}; + } + + [[nodiscard]] bool + checkDirectorySize(Env const& env, uint256 directory, std::uint32_t dirSize) + { + std::optional pageIndex{0}; + std::uint32_t dirCnt = 0; + + do + { + auto const page = env.le(keylet::page(directory, *pageIndex)); + if (!page) + break; + + pageIndex = (*page)[~sfIndexNext]; + dirCnt += (*page)[sfIndexes].size(); + + } while (pageIndex.value_or(0)); + + return dirCnt == dirSize; + } + + void + testOfferCreate(FeatureBitset features) + { + testcase("OfferCreate"); + + // test preflight + { + Env env(*this, features - featurePermissionedDEX); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + env(offer(bob, XRP(10), USD(10)), + domain(domainID), + ter(temDISABLED)); + env.close(); + + env.enableFeature(featurePermissionedDEX); + env.close(); + env(offer(bob, XRP(10), USD(10)), domain(domainID)); + env.close(); + } + + // test preflight: permissioned dex cannot be used without enable + // flowcross + { + Env env(*this, features - featureFlowCross); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + env(offer(bob, XRP(10), USD(10)), + domain(domainID), + ter(temDISABLED)); + env.close(); + + env.enableFeature(featureFlowCross); + env.close(); + env(offer(bob, XRP(10), USD(10)), domain(domainID)); + env.close(); + } + + // preclaim - someone outside of the domain cannot create domain offer + { + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + // create devin account who is not part of the domain + Account devin("devin"); + env.fund(XRP(1000), devin); + env.close(); + env.trust(USD(1000), devin); + env.close(); + env(pay(gw, devin, USD(100))); + env.close(); + + env(offer(devin, XRP(10), USD(10)), + domain(domainID), + ter(tecNO_PERMISSION)); + env.close(); + + // domain owner also issues a credential for devin + env(credentials::create(devin, domainOwner, credType)); + env.close(); + + // devin still cannot create offer since he didn't accept credential + env(offer(devin, XRP(10), USD(10)), + domain(domainID), + ter(tecNO_PERMISSION)); + env.close(); + + env(credentials::accept(devin, domainOwner, credType)); + env.close(); + + env(offer(devin, XRP(10), USD(10)), domain(domainID)); + env.close(); + } + + // preclaim - someone with expired cred cannot create domain offer + { + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + // create devin account who is not part of the domain + Account devin("devin"); + env.fund(XRP(1000), devin); + env.close(); + env.trust(USD(1000), devin); + env.close(); + env(pay(gw, devin, USD(100))); + env.close(); + + auto jv = credentials::create(devin, domainOwner, credType); + uint32_t const t = env.current() + ->info() + .parentCloseTime.time_since_epoch() + .count(); + jv[sfExpiration.jsonName] = t + 20; + env(jv); + + env(credentials::accept(devin, domainOwner, credType)); + env.close(); + + // devin can still create offer while his cred is not expired + env(offer(devin, XRP(10), USD(10)), domain(domainID)); + env.close(); + + // time advance + env.close(std::chrono::seconds(20)); + + // devin cannot create offer with expired cred + env(offer(devin, XRP(10), USD(10)), + domain(domainID), + ter(tecNO_PERMISSION)); + env.close(); + } + + // preclaim - cannot create an offer in a non existent domain + { + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + uint256 const badDomain{ + "F10D0CC9A0F9A3CBF585B80BE09A186483668FDBDD39AA7E3370F3649CE134" + "E5"}; + + env(offer(bob, XRP(10), USD(10)), + domain(badDomain), + ter(tecNO_PERMISSION)); + env.close(); + } + + // apply - offer can be created even if takergets issuer is not in + // domain + { + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + env(credentials::deleteCred( + domainOwner, gw, domainOwner, credType)); + env.close(); + + auto const bobOfferSeq{env.seq(bob)}; + env(offer(bob, XRP(10), USD(10)), domain(domainID)); + env.close(); + + BEAST_EXPECT( + checkOffer(env, bob, bobOfferSeq, XRP(10), USD(10), 0, true)); + } + + // apply - offer can be created even if takerpays issuer is not in + // domain + { + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + env(credentials::deleteCred( + domainOwner, gw, domainOwner, credType)); + env.close(); + + auto const bobOfferSeq{env.seq(bob)}; + env(offer(bob, USD(10), XRP(10)), domain(domainID)); + env.close(); + + BEAST_EXPECT( + checkOffer(env, bob, bobOfferSeq, USD(10), XRP(10), 0, true)); + } + + // apply - two domain offers cross with each other + { + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + auto const bobOfferSeq{env.seq(bob)}; + env(offer(bob, XRP(10), USD(10)), domain(domainID)); + env.close(); + + BEAST_EXPECT( + checkOffer(env, bob, bobOfferSeq, XRP(10), USD(10), 0, true)); + BEAST_EXPECT(ownerCount(env, bob) == 3); + + // a non domain offer cannot cross with domain offer + env(offer(carol, USD(10), XRP(10))); + env.close(); + + BEAST_EXPECT( + checkOffer(env, bob, bobOfferSeq, XRP(10), USD(10), 0, true)); + + auto const aliceOfferSeq{env.seq(alice)}; + env(offer(alice, USD(10), XRP(10)), domain(domainID)); + env.close(); + + BEAST_EXPECT(!offerExists(env, alice, aliceOfferSeq)); + BEAST_EXPECT(!offerExists(env, bob, bobOfferSeq)); + BEAST_EXPECT(ownerCount(env, alice) == 2); + } + + // apply - create lots of domain offers + { + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + std::vector offerSeqs; + offerSeqs.reserve(100); + + for (size_t i = 0; i <= 100; i++) + { + auto const bobOfferSeq{env.seq(bob)}; + offerSeqs.emplace_back(bobOfferSeq); + + env(offer(bob, XRP(10), USD(10)), domain(domainID)); + env.close(); + BEAST_EXPECT(checkOffer( + env, bob, bobOfferSeq, XRP(10), USD(10), 0, true)); + } + + for (auto const offerSeq : offerSeqs) + { + env(offer_cancel(bob, offerSeq)); + env.close(); + BEAST_EXPECT(!offerExists(env, bob, offerSeq)); + } + } + } + + void + testPayment(FeatureBitset features) + { + testcase("Payment"); + + // test preflight - without enabling featurePermissionedDEX amendment + { + Env env(*this, features - featurePermissionedDEX); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + env(pay(bob, alice, USD(10)), + path(~USD), + sendmax(XRP(10)), + domain(domainID), + ter(temDISABLED)); + env.close(); + + env.enableFeature(featurePermissionedDEX); + env.close(); + + env(offer(bob, XRP(10), USD(10)), domain(domainID)); + env.close(); + + env(pay(bob, alice, USD(10)), + path(~USD), + sendmax(XRP(10)), + domain(domainID)); + env.close(); + } + + // preclaim - cannot send payment with non existent domain + { + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + uint256 const badDomain{ + "F10D0CC9A0F9A3CBF585B80BE09A186483668FDBDD39AA7E3370F3649CE134" + "E5"}; + + env(pay(bob, alice, USD(10)), + path(~USD), + sendmax(XRP(10)), + domain(badDomain), + ter(tecNO_PERMISSION)); + env.close(); + } + + // preclaim - payment with non-domain destination fails + { + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + env(offer(bob, XRP(10), USD(10)), domain(domainID)); + env.close(); + + // create devin account who is not part of the domain + Account devin("devin"); + env.fund(XRP(1000), devin); + env.close(); + env.trust(USD(1000), devin); + env.close(); + env(pay(gw, devin, USD(100))); + env.close(); + + // devin is not part of domain + env(pay(alice, devin, USD(10)), + path(~USD), + sendmax(XRP(10)), + domain(domainID), + ter(tecNO_PERMISSION)); + env.close(); + + // domain owner also issues a credential for devin + env(credentials::create(devin, domainOwner, credType)); + env.close(); + + // devin has not yet accepted cred + env(pay(alice, devin, USD(10)), + path(~USD), + sendmax(XRP(10)), + domain(domainID), + ter(tecNO_PERMISSION)); + env.close(); + + env(credentials::accept(devin, domainOwner, credType)); + env.close(); + + // devin can now receive payment after he is in domain + env(pay(alice, devin, USD(10)), + path(~USD), + sendmax(XRP(10)), + domain(domainID)); + env.close(); + } + + // preclaim - non-domain sender cannot send payment + { + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + env(offer(bob, XRP(10), USD(10)), domain(domainID)); + env.close(); + + // create devin account who is not part of the domain + Account devin("devin"); + env.fund(XRP(1000), devin); + env.close(); + env.trust(USD(1000), devin); + env.close(); + env(pay(gw, devin, USD(100))); + env.close(); + + // devin tries to send domain payment + env(pay(devin, alice, USD(10)), + path(~USD), + sendmax(XRP(10)), + domain(domainID), + ter(tecNO_PERMISSION)); + env.close(); + + // domain owner also issues a credential for devin + env(credentials::create(devin, domainOwner, credType)); + env.close(); + + // devin has not yet accepted cred + env(pay(devin, alice, USD(10)), + path(~USD), + sendmax(XRP(10)), + domain(domainID), + ter(tecNO_PERMISSION)); + env.close(); + + env(credentials::accept(devin, domainOwner, credType)); + env.close(); + + // devin can now send payment after he is in domain + env(pay(devin, alice, USD(10)), + path(~USD), + sendmax(XRP(10)), + domain(domainID)); + env.close(); + } + + // apply - domain owner can always send and receive domain payment + { + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + env(offer(bob, XRP(10), USD(10)), domain(domainID)); + env.close(); + + // domain owner can always be destination + env(pay(alice, domainOwner, USD(10)), + path(~USD), + sendmax(XRP(10)), + domain(domainID)); + env.close(); + + env(offer(bob, XRP(10), USD(10)), domain(domainID)); + env.close(); + + // domain owner can send + env(pay(domainOwner, alice, USD(10)), + path(~USD), + sendmax(XRP(10)), + domain(domainID)); + env.close(); + } + } + + void + testBookStep(FeatureBitset features) + { + testcase("Book step"); + + // test domain cross currency payment consuming one offer + { + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + // create a regular offer without domain + auto const regularOfferSeq{env.seq(bob)}; + env(offer(bob, XRP(10), USD(10))); + env.close(); + BEAST_EXPECT( + checkOffer(env, bob, regularOfferSeq, XRP(10), USD(10))); + + auto const regularDirKey = + getDefaultOfferDirKey(env, bob, regularOfferSeq); + BEAST_EXPECT(regularDirKey); + BEAST_EXPECT(checkDirectorySize(env, *regularDirKey, 1)); + + // a domain payment cannot consume regular offers + env(pay(alice, carol, USD(10)), + path(~USD), + sendmax(XRP(10)), + domain(domainID), + ter(tecPATH_PARTIAL)); + env.close(); + + // create a domain offer + auto const domainOfferSeq{env.seq(bob)}; + env(offer(bob, XRP(10), USD(10)), domain(domainID)); + env.close(); + + BEAST_EXPECT(checkOffer( + env, bob, domainOfferSeq, XRP(10), USD(10), 0, true)); + + auto const domainDirKey = + getDefaultOfferDirKey(env, bob, domainOfferSeq); + BEAST_EXPECT(domainDirKey); + BEAST_EXPECT(checkDirectorySize(env, *domainDirKey, 1)); + + // cross-currency permissioned payment consumed + // domain offer instead of regular offer + env(pay(alice, carol, USD(10)), + path(~USD), + sendmax(XRP(10)), + domain(domainID)); + env.close(); + BEAST_EXPECT(!offerExists(env, bob, domainOfferSeq)); + BEAST_EXPECT( + checkOffer(env, bob, regularOfferSeq, XRP(10), USD(10))); + + // domain directory is empty + BEAST_EXPECT(checkDirectorySize(env, *domainDirKey, 0)); + BEAST_EXPECT(checkDirectorySize(env, *regularDirKey, 1)); + } + + // test domain payment consuming two offers in the path + { + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + auto const EUR = gw["EUR"]; + env.trust(EUR(1000), alice); + env.close(); + env.trust(EUR(1000), bob); + env.close(); + env.trust(EUR(1000), carol); + env.close(); + env(pay(gw, bob, EUR(100))); + env.close(); + + // create XRP/USD domain offer + auto const usdOfferSeq{env.seq(bob)}; + env(offer(bob, XRP(10), USD(10)), domain(domainID)); + env.close(); + + BEAST_EXPECT( + checkOffer(env, bob, usdOfferSeq, XRP(10), USD(10), 0, true)); + + // payment fail because there isn't eur offer + env(pay(alice, carol, EUR(10)), + path(~USD, ~EUR), + sendmax(XRP(10)), + domain(domainID), + ter(tecPATH_PARTIAL)); + env.close(); + BEAST_EXPECT( + checkOffer(env, bob, usdOfferSeq, XRP(10), USD(10), 0, true)); + + // bob creates a regular USD/EUR offer + auto const regularOfferSeq{env.seq(bob)}; + env(offer(bob, USD(10), EUR(10))); + env.close(); + BEAST_EXPECT( + checkOffer(env, bob, regularOfferSeq, USD(10), EUR(10))); + + // alice tries to pay again, but still fails because the regular + // offer cannot be consumed + env(pay(alice, carol, EUR(10)), + path(~USD, ~EUR), + sendmax(XRP(10)), + domain(domainID), + ter(tecPATH_PARTIAL)); + env.close(); + + // bob creates a domain USD/EUR offer + auto const eurOfferSeq{env.seq(bob)}; + env(offer(bob, USD(10), EUR(10)), domain(domainID)); + env.close(); + BEAST_EXPECT( + checkOffer(env, bob, eurOfferSeq, USD(10), EUR(10), 0, true)); + + // alice successfully consume two domain offers: xrp/usd and usd/eur + env(pay(alice, carol, EUR(5)), + sendmax(XRP(5)), + domain(domainID), + path(~USD, ~EUR)); + env.close(); + + BEAST_EXPECT( + checkOffer(env, bob, usdOfferSeq, XRP(5), USD(5), 0, true)); + BEAST_EXPECT( + checkOffer(env, bob, eurOfferSeq, USD(5), EUR(5), 0, true)); + + // alice successfully consume two domain offers and deletes them + // we compute path this time using `paths` + env(pay(alice, carol, EUR(5)), + sendmax(XRP(5)), + domain(domainID), + paths(XRP)); + env.close(); + + BEAST_EXPECT(!offerExists(env, bob, usdOfferSeq)); + BEAST_EXPECT(!offerExists(env, bob, eurOfferSeq)); + + // regular offer is not consumed + BEAST_EXPECT( + checkOffer(env, bob, regularOfferSeq, USD(10), EUR(10))); + } + + // domain payment cannot consume offer from another domain + { + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + // Fund devin and create USD trustline + Account badDomainOwner("badDomainOwner"); + Account devin("devin"); + env.fund(XRP(1000), badDomainOwner, devin); + env.close(); + env.trust(USD(1000), devin); + env.close(); + env(pay(gw, devin, USD(100))); + env.close(); + + auto const badCredType = "badCred"; + pdomain::Credentials credentials{{badDomainOwner, badCredType}}; + env(pdomain::setTx(badDomainOwner, credentials)); + + auto objects = pdomain::getObjects(badDomainOwner, env); + auto const badDomainID = objects.begin()->first; + + env(credentials::create(devin, badDomainOwner, badCredType)); + env.close(); + env(credentials::accept(devin, badDomainOwner, badCredType)); + + // devin creates a domain offer in another domain + env(offer(devin, XRP(10), USD(10)), domain(badDomainID)); + env.close(); + + // domain payment can't consume an offer from another domain + env(pay(alice, carol, USD(10)), + path(~USD), + sendmax(XRP(10)), + domain(domainID), + ter(tecPATH_PARTIAL)); + env.close(); + + // bob creates an offer under the right domain + auto const bobOfferSeq{env.seq(bob)}; + env(offer(bob, XRP(10), USD(10)), domain(domainID)); + env.close(); + BEAST_EXPECT( + checkOffer(env, bob, bobOfferSeq, XRP(10), USD(10), 0, true)); + + // domain payment now consumes from the right domain + env(pay(alice, carol, USD(10)), + path(~USD), + sendmax(XRP(10)), + domain(domainID)); + env.close(); + + BEAST_EXPECT(!offerExists(env, bob, bobOfferSeq)); + } + + // sanity check: devin, who is part of the domain but doesn't have a + // trustline with USD issuer, can successfully make a payment using + // offer + { + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + env(offer(bob, XRP(10), USD(10)), domain(domainID)); + env.close(); + + // fund devin but don't create a USD trustline with gateway + Account devin("devin"); + env.fund(XRP(1000), devin); + env.close(); + + // domain owner also issues a credential for devin + env(credentials::create(devin, domainOwner, credType)); + env.close(); + + env(credentials::accept(devin, domainOwner, credType)); + env.close(); + + // successful payment because offer is consumed + env(pay(devin, alice, USD(10)), sendmax(XRP(10)), domain(domainID)); + env.close(); + } + + // offer becomes unfunded when offer owner's cred expires + { + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + // create devin account who is not part of the domain + Account devin("devin"); + env.fund(XRP(1000), devin); + env.close(); + env.trust(USD(1000), devin); + env.close(); + env(pay(gw, devin, USD(100))); + env.close(); + + auto jv = credentials::create(devin, domainOwner, credType); + uint32_t const t = env.current() + ->info() + .parentCloseTime.time_since_epoch() + .count(); + jv[sfExpiration.jsonName] = t + 20; + env(jv); + + env(credentials::accept(devin, domainOwner, credType)); + env.close(); + + // devin can still create offer while his cred is not expired + auto const offerSeq{env.seq(devin)}; + env(offer(devin, XRP(10), USD(10)), domain(domainID)); + env.close(); + + // devin's offer can still be consumed while his cred isn't expired + env(pay(alice, carol, USD(5)), + path(~USD), + sendmax(XRP(5)), + domain(domainID)); + env.close(); + BEAST_EXPECT( + checkOffer(env, devin, offerSeq, XRP(5), USD(5), 0, true)); + + // advance time + env.close(std::chrono::seconds(20)); + + // devin's offer is unfunded now due to expired cred + env(pay(alice, carol, USD(5)), + path(~USD), + sendmax(XRP(5)), + domain(domainID), + ter(tecPATH_PARTIAL)); + env.close(); + BEAST_EXPECT( + checkOffer(env, devin, offerSeq, XRP(5), USD(5), 0, true)); + } + + // offer becomes unfunded when offer owner's cred is removed + { + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + auto const offerSeq{env.seq(bob)}; + env(offer(bob, XRP(10), USD(10)), domain(domainID)); + env.close(); + + // bob's offer can still be consumed while his cred exists + env(pay(alice, carol, USD(5)), + path(~USD), + sendmax(XRP(5)), + domain(domainID)); + env.close(); + BEAST_EXPECT( + checkOffer(env, bob, offerSeq, XRP(5), USD(5), 0, true)); + + // remove bob's cred + env(credentials::deleteCred( + domainOwner, bob, domainOwner, credType)); + env.close(); + + // bob's offer is unfunded now due to expired cred + env(pay(alice, carol, USD(5)), + path(~USD), + sendmax(XRP(5)), + domain(domainID), + ter(tecPATH_PARTIAL)); + env.close(); + BEAST_EXPECT( + checkOffer(env, bob, offerSeq, XRP(5), USD(5), 0, true)); + } + } + + void + testRippling(FeatureBitset features) + { + testcase("Rippling"); + + // test a non-domain account can still be part of rippling in a domain + // payment. If the domain wishes to control who is allowed to ripple + // through, they should set the rippling individually + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + auto const EURA = alice["EUR"]; + auto const EURB = bob["EUR"]; + + env.trust(EURA(100), bob); + env.trust(EURB(100), carol); + env.close(); + + // remove bob from domain + env(credentials::deleteCred(domainOwner, bob, domainOwner, credType)); + env.close(); + + // alice can still ripple through bob even though he's not part + // of the domain, this is intentional + env(pay(alice, carol, EURB(10)), paths(EURA), domain(domainID)); + env.close(); + env.require(balance(bob, EURA(10)), balance(carol, EURB(10))); + + // carol sets no ripple on bob + env(trust(carol, bob["EUR"](0), bob, tfSetNoRipple)); + env.close(); + + // payment no longer works because carol has no ripple on bob + env(pay(alice, carol, EURB(5)), + paths(EURA), + domain(domainID), + ter(tecPATH_DRY)); + env.close(); + env.require(balance(bob, EURA(10)), balance(carol, EURB(10))); + } + + void + testOfferTokenIssuerInDomain(FeatureBitset features) + { + testcase("Offer token issuer in domain"); + + // whether the issuer is in the domain should NOT affect whether an + // offer can be consumed in domain payment + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + // create an xrp/usd offer with usd as takergets + auto const bobOffer1Seq{env.seq(bob)}; + env(offer(bob, XRP(10), USD(10)), domain(domainID)); + env.close(); + + // create an usd/xrp offer with usd as takerpays + auto const bobOffer2Seq{env.seq(bob)}; + env(offer(bob, USD(10), XRP(10)), domain(domainID), txflags(tfPassive)); + env.close(); + + BEAST_EXPECT( + checkOffer(env, bob, bobOffer1Seq, XRP(10), USD(10), 0, true)); + BEAST_EXPECT(checkOffer( + env, bob, bobOffer2Seq, USD(10), XRP(10), lsfPassive, true)); + + // remove gateway from domain + env(credentials::deleteCred(domainOwner, gw, domainOwner, credType)); + env.close(); + + // payment succeeds even if issuer is not in domain + // xrp/usd offer is consumed + env(pay(alice, carol, USD(10)), + path(~USD), + sendmax(XRP(10)), + domain(domainID)); + env.close(); + BEAST_EXPECT(!offerExists(env, bob, bobOffer1Seq)); + + // payment succeeds even if issuer is not in domain + // usd/xrp offer is consumed + env(pay(alice, carol, XRP(10)), + path(~XRP), + sendmax(USD(10)), + domain(domainID)); + env.close(); + BEAST_EXPECT(!offerExists(env, bob, bobOffer2Seq)); + } + + void + testRemoveUnfundedOffer(FeatureBitset features) + { + testcase("Remove unfunded offer"); + + // checking that an unfunded offer will be implictly removed by a + // successfuly payment tx + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + auto const aliceOfferSeq{env.seq(alice)}; + env(offer(alice, XRP(100), USD(100)), domain(domainID)); + env.close(); + + auto const bobOfferSeq{env.seq(bob)}; + env(offer(bob, XRP(20), USD(20)), domain(domainID)); + env.close(); + + BEAST_EXPECT( + checkOffer(env, bob, bobOfferSeq, XRP(20), USD(20), 0, true)); + BEAST_EXPECT( + checkOffer(env, alice, aliceOfferSeq, XRP(100), USD(100), 0, true)); + + auto const domainDirKey = getDefaultOfferDirKey(env, bob, bobOfferSeq); + BEAST_EXPECT(domainDirKey); + BEAST_EXPECT(checkDirectorySize(env, *domainDirKey, 2)); + + // remove alice from domain and thus alice's offer becomes unfunded + env(credentials::deleteCred(domainOwner, alice, domainOwner, credType)); + env.close(); + + env(pay(gw, carol, USD(10)), + path(~USD), + sendmax(XRP(10)), + domain(domainID)); + env.close(); + + BEAST_EXPECT( + checkOffer(env, bob, bobOfferSeq, XRP(10), USD(10), 0, true)); + + // alice's unfunded offer is removed implicitly + BEAST_EXPECT(!offerExists(env, alice, aliceOfferSeq)); + BEAST_EXPECT(checkDirectorySize(env, *domainDirKey, 1)); + } + + void + testAmmNotUsed(FeatureBitset features) + { + testcase("AMM not used"); + + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + AMM amm(env, alice, XRP(10), USD(50)); + + // a domain payment isn't able to consume AMM + env(pay(bob, carol, USD(5)), + path(~USD), + sendmax(XRP(5)), + domain(domainID), + ter(tecPATH_PARTIAL)); + env.close(); + + // a non domain payment can use AMM + env(pay(bob, carol, USD(5)), path(~USD), sendmax(XRP(5))); + env.close(); + + // USD amount in AMM is changed + auto [xrp, usd, lpt] = amm.balances(XRP, USD); + BEAST_EXPECT(usd == USD(45)); + } + + void + testHybridOfferCreate(FeatureBitset features) + { + testcase("Hybrid offer create"); + + // test preflight - invalid hybrid flag + { + Env env(*this, features - featurePermissionedDEX); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + env(offer(bob, XRP(10), USD(10)), + domain(domainID), + txflags(tfHybrid), + ter(temDISABLED)); + env.close(); + + env(offer(bob, XRP(10), USD(10)), + txflags(tfHybrid), + ter(temINVALID_FLAG)); + env.close(); + + env.enableFeature(featurePermissionedDEX); + env.close(); + + // hybrid offer must have domainID + env(offer(bob, XRP(10), USD(10)), + txflags(tfHybrid), + ter(temINVALID_FLAG)); + env.close(); + + // hybrid offer must have domainID + auto const offerSeq{env.seq(bob)}; + env(offer(bob, XRP(10), USD(10)), + txflags(tfHybrid), + domain(domainID)); + env.close(); + BEAST_EXPECT(checkOffer( + env, bob, offerSeq, XRP(10), USD(10), lsfHybrid, true)); + } + + // apply - domain offer can cross with hybrid + { + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + auto const bobOfferSeq{env.seq(bob)}; + env(offer(bob, XRP(10), USD(10)), + txflags(tfHybrid), + domain(domainID)); + env.close(); + + BEAST_EXPECT(checkOffer( + env, bob, bobOfferSeq, XRP(10), USD(10), lsfHybrid, true)); + BEAST_EXPECT(offerExists(env, bob, bobOfferSeq)); + BEAST_EXPECT(ownerCount(env, bob) == 3); + + auto const aliceOfferSeq{env.seq(alice)}; + env(offer(alice, USD(10), XRP(10)), domain(domainID)); + env.close(); + + BEAST_EXPECT(!offerExists(env, alice, aliceOfferSeq)); + BEAST_EXPECT(!offerExists(env, bob, bobOfferSeq)); + BEAST_EXPECT(ownerCount(env, alice) == 2); + } + + // apply - open offer can cross with hybrid + { + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + auto const bobOfferSeq{env.seq(bob)}; + env(offer(bob, XRP(10), USD(10)), + txflags(tfHybrid), + domain(domainID)); + env.close(); + + BEAST_EXPECT(offerExists(env, bob, bobOfferSeq)); + BEAST_EXPECT(ownerCount(env, bob) == 3); + BEAST_EXPECT(checkOffer( + env, bob, bobOfferSeq, XRP(10), USD(10), lsfHybrid, true)); + + auto const aliceOfferSeq{env.seq(alice)}; + env(offer(alice, USD(10), XRP(10))); + env.close(); + + BEAST_EXPECT(!offerExists(env, alice, aliceOfferSeq)); + BEAST_EXPECT(!offerExists(env, bob, bobOfferSeq)); + BEAST_EXPECT(ownerCount(env, alice) == 2); + } + + // apply - by default, hybrid offer tries to cross with offers in the + // domain book + { + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + auto const bobOfferSeq{env.seq(bob)}; + env(offer(bob, XRP(10), USD(10)), domain(domainID)); + env.close(); + + BEAST_EXPECT( + checkOffer(env, bob, bobOfferSeq, XRP(10), USD(10), 0, true)); + BEAST_EXPECT(ownerCount(env, bob) == 3); + + // hybrid offer auto crosses with domain offer + auto const aliceOfferSeq{env.seq(alice)}; + env(offer(alice, USD(10), XRP(10)), + domain(domainID), + txflags(tfHybrid)); + env.close(); + + BEAST_EXPECT(!offerExists(env, alice, aliceOfferSeq)); + BEAST_EXPECT(!offerExists(env, bob, bobOfferSeq)); + BEAST_EXPECT(ownerCount(env, alice) == 2); + } + + // apply - hybrid offer does not automatically cross with open offers + // because by default, it only tries to cross domain offers + { + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + auto const bobOfferSeq{env.seq(bob)}; + env(offer(bob, XRP(10), USD(10))); + env.close(); + + BEAST_EXPECT( + checkOffer(env, bob, bobOfferSeq, XRP(10), USD(10), 0, false)); + BEAST_EXPECT(ownerCount(env, bob) == 3); + + // hybrid offer auto crosses with domain offer + auto const aliceOfferSeq{env.seq(alice)}; + env(offer(alice, USD(10), XRP(10)), + domain(domainID), + txflags(tfHybrid)); + env.close(); + + BEAST_EXPECT(offerExists(env, alice, aliceOfferSeq)); + BEAST_EXPECT(offerExists(env, bob, bobOfferSeq)); + BEAST_EXPECT( + checkOffer(env, bob, bobOfferSeq, XRP(10), USD(10), 0, false)); + BEAST_EXPECT(checkOffer( + env, alice, aliceOfferSeq, USD(10), XRP(10), lsfHybrid, true)); + BEAST_EXPECT(ownerCount(env, alice) == 3); + } + } + + void + testHybridInvalidOffer(FeatureBitset features) + { + testcase("Hybrid invalid offer"); + + // bob has a hybrid offer and then he is removed from domain. + // in this case, the hybrid offer will be considered as unfunded even in + // a regular payment + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + auto const hybridOfferSeq{env.seq(bob)}; + env(offer(bob, XRP(50), USD(50)), txflags(tfHybrid), domain(domainID)); + env.close(); + + // remove bob from domain + env(credentials::deleteCred(domainOwner, bob, domainOwner, credType)); + env.close(); + + // bob's hybrid offer is unfunded and can not be consumed in a domain + // payment + env(pay(alice, carol, USD(5)), + path(~USD), + sendmax(XRP(5)), + domain(domainID), + ter(tecPATH_PARTIAL)); + env.close(); + BEAST_EXPECT(checkOffer( + env, bob, hybridOfferSeq, XRP(50), USD(50), lsfHybrid, true)); + + // bob's unfunded hybrid offer can't be consumed even with a regular + // payment + env(pay(alice, carol, USD(5)), + path(~USD), + sendmax(XRP(5)), + ter(tecPATH_PARTIAL)); + env.close(); + BEAST_EXPECT(checkOffer( + env, bob, hybridOfferSeq, XRP(50), USD(50), lsfHybrid, true)); + + // create a regular offer + auto const regularOfferSeq{env.seq(bob)}; + env(offer(bob, XRP(10), USD(10))); + env.close(); + BEAST_EXPECT(offerExists(env, bob, regularOfferSeq)); + BEAST_EXPECT(checkOffer(env, bob, regularOfferSeq, XRP(10), USD(10))); + + auto const sleHybridOffer = + env.le(keylet::offer(bob.id(), hybridOfferSeq)); + BEAST_EXPECT(sleHybridOffer); + auto const openDir = + sleHybridOffer->getFieldArray(sfAdditionalBooks)[0].getFieldH256( + sfBookDirectory); + BEAST_EXPECT(checkDirectorySize(env, openDir, 2)); + + // this normal payment should consume the regular offer and remove the + // unfunded hybrid offer + env(pay(alice, carol, USD(5)), path(~USD), sendmax(XRP(5))); + env.close(); + + BEAST_EXPECT(!offerExists(env, bob, hybridOfferSeq)); + BEAST_EXPECT(checkOffer(env, bob, regularOfferSeq, XRP(5), USD(5))); + BEAST_EXPECT(checkDirectorySize(env, openDir, 1)); + } + + void + testHybridBookStep(FeatureBitset features) + { + testcase("Hybrid book step"); + + // both non domain and domain payments can consume hybrid offer + { + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + auto const hybridOfferSeq{env.seq(bob)}; + env(offer(bob, XRP(10), USD(10)), + txflags(tfHybrid), + domain(domainID)); + env.close(); + + env(pay(alice, carol, USD(5)), + path(~USD), + sendmax(XRP(5)), + domain(domainID)); + env.close(); + BEAST_EXPECT(checkOffer( + env, bob, hybridOfferSeq, XRP(5), USD(5), lsfHybrid, true)); + + // hybrid offer can't be consumed since bob is not in domain anymore + env(pay(alice, carol, USD(5)), path(~USD), sendmax(XRP(5))); + env.close(); + + BEAST_EXPECT(!offerExists(env, bob, hybridOfferSeq)); + } + + // someone from another domain can't cross hybrid if they specified + // wrong domainID + { + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + // Fund accounts + Account badDomainOwner("badDomainOwner"); + Account devin("devin"); + env.fund(XRP(1000), badDomainOwner, devin); + env.close(); + + auto const badCredType = "badCred"; + pdomain::Credentials credentials{{badDomainOwner, badCredType}}; + env(pdomain::setTx(badDomainOwner, credentials)); + + auto objects = pdomain::getObjects(badDomainOwner, env); + auto const badDomainID = objects.begin()->first; + + env(credentials::create(devin, badDomainOwner, badCredType)); + env.close(); + env(credentials::accept(devin, badDomainOwner, badCredType)); + env.close(); + + auto const hybridOfferSeq{env.seq(bob)}; + env(offer(bob, XRP(10), USD(10)), + txflags(tfHybrid), + domain(domainID)); + env.close(); + + // other domains can't consume the offer + env(pay(devin, badDomainOwner, USD(5)), + path(~USD), + sendmax(XRP(5)), + domain(badDomainID), + ter(tecPATH_DRY)); + env.close(); + BEAST_EXPECT(checkOffer( + env, bob, hybridOfferSeq, XRP(10), USD(10), lsfHybrid, true)); + + env(pay(alice, carol, USD(5)), + path(~USD), + sendmax(XRP(5)), + domain(domainID)); + env.close(); + BEAST_EXPECT(checkOffer( + env, bob, hybridOfferSeq, XRP(5), USD(5), lsfHybrid, true)); + + // hybrid offer can't be consumed since bob is not in domain anymore + env(pay(alice, carol, USD(5)), path(~USD), sendmax(XRP(5))); + env.close(); + + BEAST_EXPECT(!offerExists(env, bob, hybridOfferSeq)); + } + + // test domain payment consuming two offers w/ hybrid offer + { + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + auto const EUR = gw["EUR"]; + env.trust(EUR(1000), alice); + env.close(); + env.trust(EUR(1000), bob); + env.close(); + env.trust(EUR(1000), carol); + env.close(); + env(pay(gw, bob, EUR(100))); + env.close(); + + auto const usdOfferSeq{env.seq(bob)}; + env(offer(bob, XRP(10), USD(10)), domain(domainID)); + env.close(); + + BEAST_EXPECT( + checkOffer(env, bob, usdOfferSeq, XRP(10), USD(10), 0, true)); + + // payment fail because there isn't eur offer + env(pay(alice, carol, EUR(5)), + path(~USD, ~EUR), + sendmax(XRP(5)), + domain(domainID), + ter(tecPATH_PARTIAL)); + env.close(); + BEAST_EXPECT( + checkOffer(env, bob, usdOfferSeq, XRP(10), USD(10), 0, true)); + + // bob creates a hybrid eur offer + auto const eurOfferSeq{env.seq(bob)}; + env(offer(bob, USD(10), EUR(10)), + domain(domainID), + txflags(tfHybrid)); + env.close(); + BEAST_EXPECT(checkOffer( + env, bob, eurOfferSeq, USD(10), EUR(10), lsfHybrid, true)); + + // alice successfully consume two domain offers: xrp/usd and usd/eur + env(pay(alice, carol, EUR(5)), + path(~USD, ~EUR), + sendmax(XRP(5)), + domain(domainID)); + env.close(); + + BEAST_EXPECT( + checkOffer(env, bob, usdOfferSeq, XRP(5), USD(5), 0, true)); + BEAST_EXPECT(checkOffer( + env, bob, eurOfferSeq, USD(5), EUR(5), lsfHybrid, true)); + } + + // test regular payment using a regular offer and a hybrid offer + { + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + auto const EUR = gw["EUR"]; + env.trust(EUR(1000), alice); + env.close(); + env.trust(EUR(1000), bob); + env.close(); + env.trust(EUR(1000), carol); + env.close(); + env(pay(gw, bob, EUR(100))); + env.close(); + + // bob creates a regular usd offer + auto const usdOfferSeq{env.seq(bob)}; + env(offer(bob, XRP(10), USD(10))); + env.close(); + + BEAST_EXPECT( + checkOffer(env, bob, usdOfferSeq, XRP(10), USD(10), 0, false)); + + // bob creates a hybrid eur offer + auto const eurOfferSeq{env.seq(bob)}; + env(offer(bob, USD(10), EUR(10)), + domain(domainID), + txflags(tfHybrid)); + env.close(); + BEAST_EXPECT(checkOffer( + env, bob, eurOfferSeq, USD(10), EUR(10), lsfHybrid, true)); + + // alice successfully consume two offers: xrp/usd and usd/eur + env(pay(alice, carol, EUR(5)), path(~USD, ~EUR), sendmax(XRP(5))); + env.close(); + + BEAST_EXPECT( + checkOffer(env, bob, usdOfferSeq, XRP(5), USD(5), 0, false)); + BEAST_EXPECT(checkOffer( + env, bob, eurOfferSeq, USD(5), EUR(5), lsfHybrid, true)); + } + } + + void + testHybridOfferDirectories(FeatureBitset features) + { + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + + std::vector offerSeqs; + offerSeqs.reserve(100); + + Book domainBook{Issue(XRP), Issue(USD), domainID}; + Book openBook{Issue(XRP), Issue(USD), std::nullopt}; + + auto const domainDir = getBookDirKey(domainBook, XRP(10), USD(10)); + auto const openDir = getBookDirKey(openBook, XRP(10), USD(10)); + + size_t dirCnt = 100; + + for (size_t i = 1; i <= dirCnt; i++) + { + auto const bobOfferSeq{env.seq(bob)}; + offerSeqs.emplace_back(bobOfferSeq); + env(offer(bob, XRP(10), USD(10)), + txflags(tfHybrid), + domain(domainID)); + env.close(); + + auto const sleOffer = env.le(keylet::offer(bob.id(), bobOfferSeq)); + BEAST_EXPECT(sleOffer); + BEAST_EXPECT(sleOffer->getFieldH256(sfBookDirectory) == domainDir); + BEAST_EXPECT( + sleOffer->getFieldArray(sfAdditionalBooks).size() == 1); + BEAST_EXPECT( + sleOffer->getFieldArray(sfAdditionalBooks)[0].getFieldH256( + sfBookDirectory) == openDir); + + BEAST_EXPECT(checkOffer( + env, bob, bobOfferSeq, XRP(10), USD(10), lsfHybrid, true)); + BEAST_EXPECT(checkDirectorySize(env, domainDir, i)); + BEAST_EXPECT(checkDirectorySize(env, openDir, i)); + } + + for (auto const offerSeq : offerSeqs) + { + env(offer_cancel(bob, offerSeq)); + env.close(); + dirCnt--; + BEAST_EXPECT(!offerExists(env, bob, offerSeq)); + BEAST_EXPECT(checkDirectorySize(env, domainDir, dirCnt)); + BEAST_EXPECT(checkDirectorySize(env, openDir, dirCnt)); + } + } + + void + testAutoBridge(FeatureBitset features) + { + testcase("Auto bridge"); + + Env env(*this, features); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + PermissionedDEX(env); + auto const EUR = gw["EUR"]; + + for (auto const& account : {alice, bob, carol}) + { + env(trust(account, EUR(10000))); + env.close(); + } + + env(pay(gw, carol, EUR(1))); + env.close(); + + auto const aliceOfferSeq{env.seq(alice)}; + auto const bobOfferSeq{env.seq(bob)}; + env(offer(alice, XRP(100), USD(1)), domain(domainID)); + env(offer(bob, EUR(1), XRP(100)), domain(domainID)); + env.close(); + + // carol's offer should cross bob and alice's offers due to auto + // bridging + auto const carolOfferSeq{env.seq(carol)}; + env(offer(carol, USD(1), EUR(1)), domain(domainID)); + env.close(); + + BEAST_EXPECT(!offerExists(env, bob, aliceOfferSeq)); + BEAST_EXPECT(!offerExists(env, bob, bobOfferSeq)); + BEAST_EXPECT(!offerExists(env, bob, carolOfferSeq)); + } + +public: + void + run() override + { + FeatureBitset const all{jtx::supported_amendments()}; + + // Test domain offer (w/o hyrbid) + testOfferCreate(all); + testPayment(all); + testBookStep(all); + testRippling(all); + testOfferTokenIssuerInDomain(all); + testRemoveUnfundedOffer(all); + testAmmNotUsed(all); + testAutoBridge(all); + + // Test hybrid offers + testHybridOfferCreate(all); + testHybridBookStep(all); + testHybridInvalidOffer(all); + testHybridOfferDirectories(all); + } +}; + +BEAST_DEFINE_TESTSUITE(PermissionedDEX, app, ripple); + +} // namespace test +} // namespace ripple diff --git a/src/test/app/SetAuth_test.cpp b/src/test/app/SetAuth_test.cpp index e55fbc4d5d..a4c2df6228 100644 --- a/src/test/app/SetAuth_test.cpp +++ b/src/test/app/SetAuth_test.cpp @@ -75,7 +75,8 @@ struct SetAuth_test : public beast::unit_test::suite { using namespace jtx; auto const sa = supported_amendments(); - testAuth(sa - featureFlowCross); + testAuth(sa - featureFlowCross - featurePermissionedDEX); + testAuth(sa - featurePermissionedDEX); testAuth(sa); } }; diff --git a/src/test/app/TheoreticalQuality_test.cpp b/src/test/app/TheoreticalQuality_test.cpp index 0269d206cc..1b3e6d9a82 100644 --- a/src/test/app/TheoreticalQuality_test.cpp +++ b/src/test/app/TheoreticalQuality_test.cpp @@ -267,6 +267,7 @@ class TheoreticalQuality_test : public beast::unit_test::suite sb.rules().enabled(featureOwnerPaysFee), OfferCrossing::no, ammContext, + std::nullopt, dummyJ); BEAST_EXPECT(sr.first == tesSUCCESS); diff --git a/src/test/app/TrustAndBalance_test.cpp b/src/test/app/TrustAndBalance_test.cpp index 037a7e0d89..8f092a725f 100644 --- a/src/test/app/TrustAndBalance_test.cpp +++ b/src/test/app/TrustAndBalance_test.cpp @@ -481,7 +481,8 @@ public: using namespace test::jtx; auto const sa = supported_amendments(); - testWithFeatures(sa - featureFlowCross); + testWithFeatures(sa - featureFlowCross - featurePermissionedDEX); + testWithFeatures(sa - featurePermissionedDEX); testWithFeatures(sa); } }; diff --git a/src/test/jtx.h b/src/test/jtx.h index 2e4764a403..fa67780cbd 100644 --- a/src/test/jtx.h +++ b/src/test/jtx.h @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -52,6 +53,7 @@ #include #include #include +#include #include #include #include diff --git a/src/test/jtx/domain.h b/src/test/jtx/domain.h new file mode 100644 index 0000000000..4af270c1d0 --- /dev/null +++ b/src/test/jtx/domain.h @@ -0,0 +1,45 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include + +namespace ripple { +namespace test { +namespace jtx { + +/** Set the domain on a JTx. */ +class domain +{ +private: + uint256 v_; + +public: + explicit domain(uint256 const& v) : v_(v) + { + } + + void + operator()(Env&, JTx& jt) const; +}; + +} // namespace jtx +} // namespace test +} // namespace ripple diff --git a/src/test/jtx/impl/domain.cpp b/src/test/jtx/impl/domain.cpp new file mode 100644 index 0000000000..51adb4ce98 --- /dev/null +++ b/src/test/jtx/impl/domain.cpp @@ -0,0 +1,36 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include + +namespace ripple { +namespace test { +namespace jtx { + +void +domain::operator()(Env&, JTx& jt) const +{ + jt[sfDomainID.jsonName] = to_string(v_); +} + +} // namespace jtx +} // namespace test +} // namespace ripple diff --git a/src/test/jtx/impl/paths.cpp b/src/test/jtx/impl/paths.cpp index 2a45909eb9..f230305469 100644 --- a/src/test/jtx/impl/paths.cpp +++ b/src/test/jtx/impl/paths.cpp @@ -23,6 +23,8 @@ #include +#include + namespace ripple { namespace test { namespace jtx { @@ -34,6 +36,18 @@ paths::operator()(Env& env, JTx& jt) const auto const from = env.lookup(jv[jss::Account].asString()); auto const to = env.lookup(jv[jss::Destination].asString()); auto const amount = amountFromJson(sfAmount, jv[jss::Amount]); + + std::optional domain; + if (jv.isMember(sfDomainID.jsonName)) + { + if (!jv[sfDomainID.jsonName].isString()) + return; + uint256 num; + auto const s = jv[sfDomainID.jsonName].asString(); + if (num.parseHex(s)) + domain = num; + } + Pathfinder pf( std::make_shared( env.current(), env.app().journal("RippleLineCache")), @@ -43,6 +57,7 @@ paths::operator()(Env& env, JTx& jt) const in_.account, amount, std::nullopt, + domain, env.app()); if (!pf.findPaths(depth_)) return; diff --git a/src/test/jtx/impl/permissioned_dex.cpp b/src/test/jtx/impl/permissioned_dex.cpp new file mode 100644 index 0000000000..04497ebbdc --- /dev/null +++ b/src/test/jtx/impl/permissioned_dex.cpp @@ -0,0 +1,85 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include +#include + +#include + +namespace ripple { +namespace test { +namespace jtx { + +uint256 +setupDomain( + jtx::Env& env, + std::vector const& accounts, + jtx::Account const& domainOwner, + std::string const& credType) +{ + using namespace jtx; + env.fund(XRP(100000), domainOwner); + env.close(); + + pdomain::Credentials credentials{{domainOwner, credType}}; + env(pdomain::setTx(domainOwner, credentials)); + + auto const objects = pdomain::getObjects(domainOwner, env); + auto const domainID = objects.begin()->first; + + for (auto const& account : accounts) + { + env(credentials::create(account, domainOwner, credType)); + env.close(); + env(credentials::accept(account, domainOwner, credType)); + env.close(); + } + return domainID; +} + +PermissionedDEX::PermissionedDEX(Env& env) + : gw("permdex-gateway") + , domainOwner("permdex-domainOwner") + , alice("permdex-alice") + , bob("permdex-bob") + , carol("permdex-carol") + , USD(gw["USD"]) + , credType("permdex-abcde") +{ + // Fund accounts + env.fund(XRP(100000), alice, bob, carol, gw); + env.close(); + + domainID = setupDomain(env, {alice, bob, carol, gw}, domainOwner, credType); + + for (auto const& account : {alice, bob, carol, domainOwner}) + { + env.trust(USD(1000), account); + env.close(); + + env(pay(gw, account, USD(100))); + env.close(); + } +} + +} // namespace jtx +} // namespace test +} // namespace ripple diff --git a/src/test/jtx/permissioned_dex.h b/src/test/jtx/permissioned_dex.h new file mode 100644 index 0000000000..fb32e1c1be --- /dev/null +++ b/src/test/jtx/permissioned_dex.h @@ -0,0 +1,51 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +namespace ripple { +namespace test { +namespace jtx { + +uint256 +setupDomain( + jtx::Env& env, + std::vector const& accounts, + jtx::Account const& domainOwner = jtx::Account("domainOwner"), + std::string const& credType = "Cred"); + +class PermissionedDEX +{ +public: + Account gw; + Account domainOwner; + Account alice; + Account bob; + Account carol; + IOU USD; + uint256 domainID; + std::string credType; + + PermissionedDEX(Env& env); +}; + +} // namespace jtx +} // namespace test +} // namespace ripple diff --git a/src/test/ledger/BookDirs_test.cpp b/src/test/ledger/BookDirs_test.cpp index ed7ca91083..28d9d2c102 100644 --- a/src/test/ledger/BookDirs_test.cpp +++ b/src/test/ledger/BookDirs_test.cpp @@ -37,7 +37,7 @@ struct BookDirs_test : public beast::unit_test::suite env.close(); { - Book book(xrpIssue(), USD.issue()); + Book book(xrpIssue(), USD.issue(), std::nullopt); { auto d = BookDirs(*env.current(), book); BEAST_EXPECT(std::begin(d) == std::end(d)); @@ -53,14 +53,16 @@ struct BookDirs_test : public beast::unit_test::suite env(offer("alice", Account("alice")["USD"](50), XRP(10))); auto d = BookDirs( *env.current(), - Book(Account("alice")["USD"].issue(), xrpIssue())); + Book( + Account("alice")["USD"].issue(), xrpIssue(), std::nullopt)); BEAST_EXPECT(std::distance(d.begin(), d.end()) == 1); } { env(offer("alice", gw["CNY"](50), XRP(10))); - auto d = - BookDirs(*env.current(), Book(gw["CNY"].issue(), xrpIssue())); + auto d = BookDirs( + *env.current(), + Book(gw["CNY"].issue(), xrpIssue(), std::nullopt)); BEAST_EXPECT(std::distance(d.begin(), d.end()) == 1); } @@ -70,7 +72,7 @@ struct BookDirs_test : public beast::unit_test::suite env(offer("alice", USD(50), Account("bob")["CNY"](10))); auto d = BookDirs( *env.current(), - Book(USD.issue(), Account("bob")["CNY"].issue())); + Book(USD.issue(), Account("bob")["CNY"].issue(), std::nullopt)); BEAST_EXPECT(std::distance(d.begin(), d.end()) == 1); } @@ -80,7 +82,8 @@ struct BookDirs_test : public beast::unit_test::suite for (auto k = 0; k < 80; ++k) env(offer("alice", AUD(i), XRP(j))); - auto d = BookDirs(*env.current(), Book(AUD.issue(), xrpIssue())); + auto d = BookDirs( + *env.current(), Book(AUD.issue(), xrpIssue(), std::nullopt)); BEAST_EXPECT(std::distance(d.begin(), d.end()) == 240); auto i = 1, j = 3, k = 0; for (auto const& e : d) @@ -101,7 +104,8 @@ struct BookDirs_test : public beast::unit_test::suite { using namespace jtx; auto const sa = supported_amendments(); - test_bookdir(sa - featureFlowCross); + test_bookdir(sa - featureFlowCross - featurePermissionedDEX); + test_bookdir(sa - featurePermissionedDEX); test_bookdir(sa); } }; diff --git a/src/test/ledger/Directory_test.cpp b/src/test/ledger/Directory_test.cpp index 825d7ff340..7aa6f149b8 100644 --- a/src/test/ledger/Directory_test.cpp +++ b/src/test/ledger/Directory_test.cpp @@ -132,7 +132,8 @@ struct Directory_test : public beast::unit_test::suite // Now check the orderbook: it should be in the order we placed // the offers. - auto book = BookDirs(*env.current(), Book({xrpIssue(), USD.issue()})); + auto book = BookDirs( + *env.current(), Book({xrpIssue(), USD.issue(), std::nullopt})); int count = 1; for (auto const& offer : book) @@ -291,7 +292,8 @@ struct Directory_test : public beast::unit_test::suite // should have no entries and be empty: { Sandbox sb(env.closed().get(), tapNONE); - uint256 const bookBase = getBookBase({xrpIssue(), USD.issue()}); + uint256 const bookBase = + getBookBase({xrpIssue(), USD.issue(), std::nullopt}); BEAST_EXPECT(dirIsEmpty(sb, keylet::page(bookBase))); BEAST_EXPECT(!sb.succ(bookBase, getQualityNext(bookBase))); diff --git a/src/test/ledger/Invariants_test.cpp b/src/test/ledger/Invariants_test.cpp index 7ceb76504d..76fcb34c7f 100644 --- a/src/test/ledger/Invariants_test.cpp +++ b/src/test/ledger/Invariants_test.cpp @@ -1046,6 +1046,30 @@ class Invariants_test : public beast::unit_test::suite }); } + void + createPermissionedDomain( + ApplyContext& ac, + std::shared_ptr& sle, + test::jtx::Account const& A1, + test::jtx::Account const& A2) + { + sle->setAccountID(sfOwner, A1); + sle->setFieldU32(sfSequence, 10); + + STArray credentials(sfAcceptedCredentials, 2); + for (std::size_t n = 0; n < 2; ++n) + { + auto cred = STObject::makeInnerObject(sfCredential); + cred.setAccountID(sfIssuer, A2); + auto credType = "cred_type" + std::to_string(n); + cred.setFieldVL( + sfCredentialType, Slice(credType.c_str(), credType.size())); + credentials.push_back(std::move(cred)); + } + sle->setFieldArray(sfAcceptedCredentials, credentials); + ac.view().insert(sle); + }; + void testPermissionedDomainInvariants() { @@ -1153,36 +1177,15 @@ class Invariants_test : public beast::unit_test::suite STTx{ttPERMISSIONED_DOMAIN_SET, [](STObject& tx) {}}, {tecINVARIANT_FAILED, tecINVARIANT_FAILED}); - auto const createPD = [](ApplyContext& ac, - std::shared_ptr& sle, - Account const& A1, - Account const& A2) { - sle->setAccountID(sfOwner, A1); - sle->setFieldU32(sfSequence, 10); - - STArray credentials(sfAcceptedCredentials, 2); - for (std::size_t n = 0; n < 2; ++n) - { - auto cred = STObject::makeInnerObject(sfCredential); - cred.setAccountID(sfIssuer, A2); - auto credType = "cred_type" + std::to_string(n); - cred.setFieldVL( - sfCredentialType, Slice(credType.c_str(), credType.size())); - credentials.push_back(std::move(cred)); - } - sle->setFieldArray(sfAcceptedCredentials, credentials); - ac.view().insert(sle); - }; - testcase << "PermissionedDomain Set 1"; doInvariantCheck( {{"permissioned domain with no rules."}}, - [createPD](Account const& A1, Account const& A2, ApplyContext& ac) { + [&](Account const& A1, Account const& A2, ApplyContext& ac) { Keylet const pdKeylet = keylet::permissionedDomain(A1.id(), 10); auto slePd = std::make_shared(pdKeylet); // create PD - createPD(ac, slePd, A1, A2); + createPermissionedDomain(ac, slePd, A1, A2); // update PD with empty rules { @@ -1201,12 +1204,12 @@ class Invariants_test : public beast::unit_test::suite doInvariantCheck( {{"permissioned domain bad credentials size " + std::to_string(tooBig)}}, - [createPD](Account const& A1, Account const& A2, ApplyContext& ac) { + [&](Account const& A1, Account const& A2, ApplyContext& ac) { Keylet const pdKeylet = keylet::permissionedDomain(A1.id(), 10); auto slePd = std::make_shared(pdKeylet); // create PD - createPD(ac, slePd, A1, A2); + createPermissionedDomain(ac, slePd, A1, A2); // update PD { @@ -1236,12 +1239,12 @@ class Invariants_test : public beast::unit_test::suite testcase << "PermissionedDomain Set 3"; doInvariantCheck( {{"permissioned domain credentials aren't sorted"}}, - [createPD](Account const& A1, Account const& A2, ApplyContext& ac) { + [&](Account const& A1, Account const& A2, ApplyContext& ac) { Keylet const pdKeylet = keylet::permissionedDomain(A1.id(), 10); auto slePd = std::make_shared(pdKeylet); // create PD - createPD(ac, slePd, A1, A2); + createPermissionedDomain(ac, slePd, A1, A2); // update PD { @@ -1271,12 +1274,12 @@ class Invariants_test : public beast::unit_test::suite testcase << "PermissionedDomain Set 4"; doInvariantCheck( {{"permissioned domain credentials aren't unique"}}, - [createPD](Account const& A1, Account const& A2, ApplyContext& ac) { + [&](Account const& A1, Account const& A2, ApplyContext& ac) { Keylet const pdKeylet = keylet::permissionedDomain(A1.id(), 10); auto slePd = std::make_shared(pdKeylet); // create PD - createPD(ac, slePd, A1, A2); + createPermissionedDomain(ac, slePd, A1, A2); // update PD { @@ -1300,6 +1303,175 @@ class Invariants_test : public beast::unit_test::suite {tecINVARIANT_FAILED, tecINVARIANT_FAILED}); } + void + testPermissionedDEX() + { + using namespace test::jtx; + testcase << "PermissionedDEX"; + + doInvariantCheck( + {{"domain doesn't exist"}}, + [](Account const& A1, Account const&, ApplyContext& ac) { + Keylet const offerKey = keylet::offer(A1.id(), 10); + auto sleOffer = std::make_shared(offerKey); + sleOffer->setAccountID(sfAccount, A1); + sleOffer->setFieldAmount(sfTakerPays, A1["USD"](10)); + sleOffer->setFieldAmount(sfTakerGets, XRP(1)); + ac.view().insert(sleOffer); + return true; + }, + XRPAmount{}, + STTx{ + ttOFFER_CREATE, + [](STObject& tx) { + tx.setFieldH256( + sfDomainID, + uint256{ + "F10D0CC9A0F9A3CBF585B80BE09A186483668FDBDD39AA7E33" + "70F3649CE134E5"}); + Account const A1{"A1"}; + tx.setFieldAmount(sfTakerPays, A1["USD"](10)); + tx.setFieldAmount(sfTakerGets, XRP(1)); + }}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}); + + // missing domain ID in offer object + doInvariantCheck( + {{"hybrid offer is malformed"}}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + Keylet const pdKeylet = keylet::permissionedDomain(A1.id(), 10); + auto slePd = std::make_shared(pdKeylet); + createPermissionedDomain(ac, slePd, A1, A2); + + Keylet const offerKey = keylet::offer(A2.id(), 10); + auto sleOffer = std::make_shared(offerKey); + sleOffer->setAccountID(sfAccount, A2); + sleOffer->setFieldAmount(sfTakerPays, A1["USD"](10)); + sleOffer->setFieldAmount(sfTakerGets, XRP(1)); + sleOffer->setFlag(lsfHybrid); + + STArray bookArr; + bookArr.push_back(STObject::makeInnerObject(sfBook)); + sleOffer->setFieldArray(sfAdditionalBooks, bookArr); + ac.view().insert(sleOffer); + return true; + }, + XRPAmount{}, + STTx{ttOFFER_CREATE, [&](STObject& tx) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}); + + // more than one entry in sfAdditionalBooks + doInvariantCheck( + {{"hybrid offer is malformed"}}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + Keylet const pdKeylet = keylet::permissionedDomain(A1.id(), 10); + auto slePd = std::make_shared(pdKeylet); + createPermissionedDomain(ac, slePd, A1, A2); + + Keylet const offerKey = keylet::offer(A2.id(), 10); + auto sleOffer = std::make_shared(offerKey); + sleOffer->setAccountID(sfAccount, A2); + sleOffer->setFieldAmount(sfTakerPays, A1["USD"](10)); + sleOffer->setFieldAmount(sfTakerGets, XRP(1)); + sleOffer->setFlag(lsfHybrid); + sleOffer->setFieldH256(sfDomainID, pdKeylet.key); + + STArray bookArr; + bookArr.push_back(STObject::makeInnerObject(sfBook)); + bookArr.push_back(STObject::makeInnerObject(sfBook)); + sleOffer->setFieldArray(sfAdditionalBooks, bookArr); + ac.view().insert(sleOffer); + return true; + }, + XRPAmount{}, + STTx{ttOFFER_CREATE, [&](STObject& tx) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}); + + // hybrid offer missing sfAdditionalBooks + doInvariantCheck( + {{"hybrid offer is malformed"}}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + Keylet const pdKeylet = keylet::permissionedDomain(A1.id(), 10); + auto slePd = std::make_shared(pdKeylet); + createPermissionedDomain(ac, slePd, A1, A2); + + Keylet const offerKey = keylet::offer(A2.id(), 10); + auto sleOffer = std::make_shared(offerKey); + sleOffer->setAccountID(sfAccount, A2); + sleOffer->setFieldAmount(sfTakerPays, A1["USD"](10)); + sleOffer->setFieldAmount(sfTakerGets, XRP(1)); + sleOffer->setFlag(lsfHybrid); + sleOffer->setFieldH256(sfDomainID, pdKeylet.key); + ac.view().insert(sleOffer); + return true; + }, + XRPAmount{}, + STTx{ttOFFER_CREATE, [&](STObject& tx) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}); + + doInvariantCheck( + {{"transaction consumed wrong domains"}}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + Keylet const pdKeylet = keylet::permissionedDomain(A1.id(), 10); + auto slePd = std::make_shared(pdKeylet); + createPermissionedDomain(ac, slePd, A1, A2); + + Keylet const badDomainKeylet = + keylet::permissionedDomain(A1.id(), 20); + auto sleBadPd = std::make_shared(badDomainKeylet); + createPermissionedDomain(ac, sleBadPd, A1, A2); + + Keylet const offerKey = keylet::offer(A2.id(), 10); + auto sleOffer = std::make_shared(offerKey); + sleOffer->setAccountID(sfAccount, A2); + sleOffer->setFieldAmount(sfTakerPays, A1["USD"](10)); + sleOffer->setFieldAmount(sfTakerGets, XRP(1)); + sleOffer->setFieldH256(sfDomainID, pdKeylet.key); + ac.view().insert(sleOffer); + return true; + }, + XRPAmount{}, + STTx{ + ttOFFER_CREATE, + [&](STObject& tx) { + Account const A1{"A1"}; + Keylet const badDomainKey = + keylet::permissionedDomain(A1.id(), 20); + tx.setFieldH256(sfDomainID, badDomainKey.key); + tx.setFieldAmount(sfTakerPays, A1["USD"](10)); + tx.setFieldAmount(sfTakerGets, XRP(1)); + }}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}); + + doInvariantCheck( + {{"domain transaction affected regular offers"}}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + Keylet const pdKeylet = keylet::permissionedDomain(A1.id(), 10); + auto slePd = std::make_shared(pdKeylet); + createPermissionedDomain(ac, slePd, A1, A2); + + Keylet const offerKey = keylet::offer(A2.id(), 10); + auto sleOffer = std::make_shared(offerKey); + sleOffer->setAccountID(sfAccount, A2); + sleOffer->setFieldAmount(sfTakerPays, A1["USD"](10)); + sleOffer->setFieldAmount(sfTakerGets, XRP(1)); + ac.view().insert(sleOffer); + return true; + }, + XRPAmount{}, + STTx{ + ttOFFER_CREATE, + [&](STObject& tx) { + Account const A1{"A1"}; + Keylet const domainKey = + keylet::permissionedDomain(A1.id(), 10); + tx.setFieldH256(sfDomainID, domainKey.key); + tx.setFieldAmount(sfTakerPays, A1["USD"](10)); + tx.setFieldAmount(sfTakerGets, XRP(1)); + }}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}); + } + public: void run() override @@ -1318,6 +1490,7 @@ public: testValidNewAccountRoot(); testNFTokenPageInvariants(); testPermissionedDomainInvariants(); + testPermissionedDEX(); } }; diff --git a/src/test/ledger/PaymentSandbox_test.cpp b/src/test/ledger/PaymentSandbox_test.cpp index 303e700f40..8bb0666e06 100644 --- a/src/test/ledger/PaymentSandbox_test.cpp +++ b/src/test/ledger/PaymentSandbox_test.cpp @@ -421,7 +421,8 @@ public: }; using namespace jtx; auto const sa = supported_amendments(); - testAll(sa - featureFlowCross); + testAll(sa - featureFlowCross - featurePermissionedDEX); + testAll(sa - featurePermissionedDEX); testAll(sa); } }; diff --git a/src/test/protocol/Issue_test.cpp b/src/test/protocol/Issue_test.cpp index 53ebf5be24..35f3a3bd8c 100644 --- a/src/test/protocol/Issue_test.cpp +++ b/src/test/protocol/Issue_test.cpp @@ -22,7 +22,10 @@ #include #include +#include + #include +#include #include #include #include @@ -46,6 +49,8 @@ namespace ripple { class Issue_test : public beast::unit_test::suite { public: + using Domain = uint256; + // Comparison, hash tests for uint60 (via base_uint) template void @@ -239,6 +244,120 @@ public: } } + template + void + testIssueDomainSet() + { + Currency const c1(1); + AccountID const i1(1); + Currency const c2(2); + AccountID const i2(2); + Issue const a1(c1, i1); + Issue const a2(c2, i2); + uint256 const domain1{1}; + uint256 const domain2{2}; + + Set c; + + c.insert(std::make_pair(a1, domain1)); + if (!BEAST_EXPECT(c.size() == 1)) + return; + c.insert(std::make_pair(a2, domain1)); + if (!BEAST_EXPECT(c.size() == 2)) + return; + c.insert(std::make_pair(a2, domain2)); + if (!BEAST_EXPECT(c.size() == 3)) + return; + + if (!BEAST_EXPECT(c.erase(std::make_pair(Issue(c1, i2), domain1)) == 0)) + return; + if (!BEAST_EXPECT(c.erase(std::make_pair(a1, domain1)) == 1)) + return; + if (!BEAST_EXPECT(c.erase(std::make_pair(a2, domain1)) == 1)) + return; + if (!BEAST_EXPECT(c.erase(std::make_pair(a2, domain2)) == 1)) + return; + if (!BEAST_EXPECT(c.empty())) + return; + } + + template + void + testIssueDomainMap() + { + Currency const c1(1); + AccountID const i1(1); + Currency const c2(2); + AccountID const i2(2); + Issue const a1(c1, i1); + Issue const a2(c2, i2); + uint256 const domain1{1}; + uint256 const domain2{2}; + + Map c; + + c.insert(std::make_pair(std::make_pair(a1, domain1), 1)); + if (!BEAST_EXPECT(c.size() == 1)) + return; + c.insert(std::make_pair(std::make_pair(a2, domain1), 2)); + if (!BEAST_EXPECT(c.size() == 2)) + return; + c.insert(std::make_pair(std::make_pair(a2, domain2), 2)); + if (!BEAST_EXPECT(c.size() == 3)) + return; + + if (!BEAST_EXPECT(c.erase(std::make_pair(Issue(c1, i2), domain1)) == 0)) + return; + if (!BEAST_EXPECT(c.erase(std::make_pair(a1, domain1)) == 1)) + return; + if (!BEAST_EXPECT(c.erase(std::make_pair(a2, domain1)) == 1)) + return; + if (!BEAST_EXPECT(c.erase(std::make_pair(a2, domain2)) == 1)) + return; + if (!BEAST_EXPECT(c.empty())) + return; + } + + void + testIssueDomainSets() + { + testcase("std::set >"); + testIssueDomainSet>>(); + + testcase("std::set >"); + testIssueDomainSet>>(); + + testcase("hash_set >"); + testIssueDomainSet>>(); + + testcase("hash_set >"); + testIssueDomainSet>>(); + } + + void + testIssueDomainMaps() + { + testcase("std::map , int>"); + testIssueDomainMap, int>>(); + + testcase("std::map , int>"); + testIssueDomainMap, int>>(); + +#if RIPPLE_ASSETS_ENABLE_STD_HASH + testcase("hash_map , int>"); + testIssueDomainMap, int>>(); + + testcase("hash_map , int>"); + testIssueDomainMap, int>>(); + + testcase("hardened_hash_map , int>"); + testIssueDomainMap, int>>(); + + testcase("hardened_hash_map , int>"); + testIssueDomainMap, int>>(); +#endif + } + void testIssueSets() { @@ -306,15 +425,88 @@ public: Issue a2(c1, i2); Issue a3(c2, i2); Issue a4(c3, i2); + uint256 const domain1{1}; + uint256 const domain2{2}; - BEAST_EXPECT(Book(a1, a2) != Book(a2, a3)); - BEAST_EXPECT(Book(a1, a2) < Book(a2, a3)); - BEAST_EXPECT(Book(a1, a2) <= Book(a2, a3)); - BEAST_EXPECT(Book(a2, a3) <= Book(a2, a3)); - BEAST_EXPECT(Book(a2, a3) == Book(a2, a3)); - BEAST_EXPECT(Book(a2, a3) >= Book(a2, a3)); - BEAST_EXPECT(Book(a3, a4) >= Book(a2, a3)); - BEAST_EXPECT(Book(a3, a4) > Book(a2, a3)); + // Books without domains + BEAST_EXPECT(Book(a1, a2, std::nullopt) != Book(a2, a3, std::nullopt)); + BEAST_EXPECT(Book(a1, a2, std::nullopt) < Book(a2, a3, std::nullopt)); + BEAST_EXPECT(Book(a1, a2, std::nullopt) <= Book(a2, a3, std::nullopt)); + BEAST_EXPECT(Book(a2, a3, std::nullopt) <= Book(a2, a3, std::nullopt)); + BEAST_EXPECT(Book(a2, a3, std::nullopt) == Book(a2, a3, std::nullopt)); + BEAST_EXPECT(Book(a2, a3, std::nullopt) >= Book(a2, a3, std::nullopt)); + BEAST_EXPECT(Book(a3, a4, std::nullopt) >= Book(a2, a3, std::nullopt)); + BEAST_EXPECT(Book(a3, a4, std::nullopt) > Book(a2, a3, std::nullopt)); + + // test domain books + { + // Books with different domains + BEAST_EXPECT(Book(a2, a3, domain1) != Book(a2, a3, domain2)); + BEAST_EXPECT(Book(a2, a3, domain1) < Book(a2, a3, domain2)); + BEAST_EXPECT(Book(a2, a3, domain2) > Book(a2, a3, domain1)); + + // One Book has a domain, the other does not + BEAST_EXPECT(Book(a2, a3, domain1) != Book(a2, a3, std::nullopt)); + BEAST_EXPECT(Book(a2, a3, std::nullopt) < Book(a2, a3, domain1)); + BEAST_EXPECT(Book(a2, a3, domain1) > Book(a2, a3, std::nullopt)); + + // Both Books have the same domain + BEAST_EXPECT(Book(a2, a3, domain1) == Book(a2, a3, domain1)); + BEAST_EXPECT(Book(a2, a3, domain2) == Book(a2, a3, domain2)); + BEAST_EXPECT( + Book(a2, a3, std::nullopt) == Book(a2, a3, std::nullopt)); + + // Both Books have no domain + BEAST_EXPECT( + Book(a2, a3, std::nullopt) == Book(a2, a3, std::nullopt)); + + // Testing comparisons with >= and <= + + // When comparing books with domain1 vs domain2 + BEAST_EXPECT(Book(a2, a3, domain1) <= Book(a2, a3, domain2)); + BEAST_EXPECT(Book(a2, a3, domain2) >= Book(a2, a3, domain1)); + BEAST_EXPECT(Book(a2, a3, domain1) >= Book(a2, a3, domain1)); + BEAST_EXPECT(Book(a2, a3, domain2) <= Book(a2, a3, domain2)); + + // One Book has domain1 and the other has no domain + BEAST_EXPECT(Book(a2, a3, domain1) > Book(a2, a3, std::nullopt)); + BEAST_EXPECT(Book(a2, a3, std::nullopt) < Book(a2, a3, domain1)); + + // One Book has domain2 and the other has no domain + BEAST_EXPECT(Book(a2, a3, domain2) > Book(a2, a3, std::nullopt)); + BEAST_EXPECT(Book(a2, a3, std::nullopt) < Book(a2, a3, domain2)); + + // Comparing two Books with no domains + BEAST_EXPECT( + Book(a2, a3, std::nullopt) <= Book(a2, a3, std::nullopt)); + BEAST_EXPECT( + Book(a2, a3, std::nullopt) >= Book(a2, a3, std::nullopt)); + + // Test case where domain1 is less than domain2 + BEAST_EXPECT(Book(a2, a3, domain1) <= Book(a2, a3, domain2)); + BEAST_EXPECT(Book(a2, a3, domain2) >= Book(a2, a3, domain1)); + + // Test case where domain2 is equal to domain1 + BEAST_EXPECT(Book(a2, a3, domain1) >= Book(a2, a3, domain1)); + BEAST_EXPECT(Book(a2, a3, domain1) <= Book(a2, a3, domain1)); + + // More test cases involving a4 (with domain2) + + // Comparing Book with domain2 (a4) to a Book with domain1 + BEAST_EXPECT(Book(a2, a3, domain1) < Book(a3, a4, domain2)); + BEAST_EXPECT(Book(a3, a4, domain2) > Book(a2, a3, domain1)); + + // Comparing Book with domain2 (a4) to a Book with no domain + BEAST_EXPECT(Book(a3, a4, domain2) > Book(a2, a3, std::nullopt)); + BEAST_EXPECT(Book(a2, a3, std::nullopt) < Book(a3, a4, domain2)); + + // Comparing Book with domain2 (a4) to a Book with the same domain + BEAST_EXPECT(Book(a3, a4, domain2) == Book(a3, a4, domain2)); + + // Comparing Book with domain2 (a4) to a Book with domain1 + BEAST_EXPECT(Book(a2, a3, domain1) < Book(a3, a4, domain2)); + BEAST_EXPECT(Book(a3, a4, domain2) > Book(a2, a3, domain1)); + } std::hash hash; @@ -336,18 +528,99 @@ public: // log << std::hex << hash (Book (a3, a4)); // log << std::hex << hash (Book (a3, a4)); - BEAST_EXPECT(hash(Book(a1, a2)) == hash(Book(a1, a2))); - BEAST_EXPECT(hash(Book(a1, a3)) == hash(Book(a1, a3))); - BEAST_EXPECT(hash(Book(a1, a4)) == hash(Book(a1, a4))); - BEAST_EXPECT(hash(Book(a2, a3)) == hash(Book(a2, a3))); - BEAST_EXPECT(hash(Book(a2, a4)) == hash(Book(a2, a4))); - BEAST_EXPECT(hash(Book(a3, a4)) == hash(Book(a3, a4))); + BEAST_EXPECT( + hash(Book(a1, a2, std::nullopt)) == + hash(Book(a1, a2, std::nullopt))); + BEAST_EXPECT( + hash(Book(a1, a3, std::nullopt)) == + hash(Book(a1, a3, std::nullopt))); + BEAST_EXPECT( + hash(Book(a1, a4, std::nullopt)) == + hash(Book(a1, a4, std::nullopt))); + BEAST_EXPECT( + hash(Book(a2, a3, std::nullopt)) == + hash(Book(a2, a3, std::nullopt))); + BEAST_EXPECT( + hash(Book(a2, a4, std::nullopt)) == + hash(Book(a2, a4, std::nullopt))); + BEAST_EXPECT( + hash(Book(a3, a4, std::nullopt)) == + hash(Book(a3, a4, std::nullopt))); - BEAST_EXPECT(hash(Book(a1, a2)) != hash(Book(a1, a3))); - BEAST_EXPECT(hash(Book(a1, a2)) != hash(Book(a1, a4))); - BEAST_EXPECT(hash(Book(a1, a2)) != hash(Book(a2, a3))); - BEAST_EXPECT(hash(Book(a1, a2)) != hash(Book(a2, a4))); - BEAST_EXPECT(hash(Book(a1, a2)) != hash(Book(a3, a4))); + BEAST_EXPECT( + hash(Book(a1, a2, std::nullopt)) != + hash(Book(a1, a3, std::nullopt))); + BEAST_EXPECT( + hash(Book(a1, a2, std::nullopt)) != + hash(Book(a1, a4, std::nullopt))); + BEAST_EXPECT( + hash(Book(a1, a2, std::nullopt)) != + hash(Book(a2, a3, std::nullopt))); + BEAST_EXPECT( + hash(Book(a1, a2, std::nullopt)) != + hash(Book(a2, a4, std::nullopt))); + BEAST_EXPECT( + hash(Book(a1, a2, std::nullopt)) != + hash(Book(a3, a4, std::nullopt))); + + // Books with domain + BEAST_EXPECT( + hash(Book(a1, a2, domain1)) == hash(Book(a1, a2, domain1))); + BEAST_EXPECT( + hash(Book(a1, a3, domain1)) == hash(Book(a1, a3, domain1))); + BEAST_EXPECT( + hash(Book(a1, a4, domain1)) == hash(Book(a1, a4, domain1))); + BEAST_EXPECT( + hash(Book(a2, a3, domain1)) == hash(Book(a2, a3, domain1))); + BEAST_EXPECT( + hash(Book(a2, a4, domain1)) == hash(Book(a2, a4, domain1))); + BEAST_EXPECT( + hash(Book(a3, a4, domain1)) == hash(Book(a3, a4, domain1))); + BEAST_EXPECT( + hash(Book(a1, a2, std::nullopt)) == + hash(Book(a1, a2, std::nullopt))); + + // Comparing Books with domain1 vs no domain + BEAST_EXPECT( + hash(Book(a1, a2, std::nullopt)) != hash(Book(a1, a2, domain1))); + BEAST_EXPECT( + hash(Book(a1, a3, std::nullopt)) != hash(Book(a1, a3, domain1))); + BEAST_EXPECT( + hash(Book(a1, a4, std::nullopt)) != hash(Book(a1, a4, domain1))); + BEAST_EXPECT( + hash(Book(a2, a3, std::nullopt)) != hash(Book(a2, a3, domain1))); + BEAST_EXPECT( + hash(Book(a2, a4, std::nullopt)) != hash(Book(a2, a4, domain1))); + BEAST_EXPECT( + hash(Book(a3, a4, std::nullopt)) != hash(Book(a3, a4, domain1))); + + // Books with domain1 but different Issues + BEAST_EXPECT( + hash(Book(a1, a2, domain1)) != hash(Book(a1, a3, domain1))); + BEAST_EXPECT( + hash(Book(a1, a2, domain1)) != hash(Book(a1, a4, domain1))); + BEAST_EXPECT( + hash(Book(a2, a3, domain1)) != hash(Book(a2, a4, domain1))); + BEAST_EXPECT( + hash(Book(a1, a2, domain1)) != hash(Book(a2, a3, domain1))); + BEAST_EXPECT( + hash(Book(a2, a4, domain1)) != hash(Book(a3, a4, domain1))); + BEAST_EXPECT( + hash(Book(a3, a4, domain1)) != hash(Book(a1, a4, domain1))); + + // Books with domain1 and domain2 + BEAST_EXPECT( + hash(Book(a1, a2, domain1)) != hash(Book(a1, a2, domain2))); + BEAST_EXPECT( + hash(Book(a1, a3, domain1)) != hash(Book(a1, a3, domain2))); + BEAST_EXPECT( + hash(Book(a1, a4, domain1)) != hash(Book(a1, a4, domain2))); + BEAST_EXPECT( + hash(Book(a2, a3, domain1)) != hash(Book(a2, a3, domain2))); + BEAST_EXPECT( + hash(Book(a2, a4, domain1)) != hash(Book(a2, a4, domain2))); + BEAST_EXPECT( + hash(Book(a3, a4, domain1)) != hash(Book(a3, a4, domain2))); } //-------------------------------------------------------------------------- @@ -362,8 +635,16 @@ public: AccountID const i2(2); Issue const a1(c1, i1); Issue const a2(c2, i2); - Book const b1(a1, a2); - Book const b2(a2, a1); + Book const b1(a1, a2, std::nullopt); + Book const b2(a2, a1, std::nullopt); + + uint256 const domain1{1}; + uint256 const domain2{2}; + + Book const b1_d1(a1, a2, domain1); + Book const b2_d1(a2, a1, domain1); + Book const b1_d2(a1, a2, domain2); + Book const b2_d2(a2, a1, domain2); { Set c; @@ -375,11 +656,11 @@ public: if (!BEAST_EXPECT(c.size() == 2)) return; - if (!BEAST_EXPECT(c.erase(Book(a1, a1)) == 0)) + if (!BEAST_EXPECT(c.erase(Book(a1, a1, std::nullopt)) == 0)) return; - if (!BEAST_EXPECT(c.erase(Book(a1, a2)) == 1)) + if (!BEAST_EXPECT(c.erase(Book(a1, a2, std::nullopt)) == 1)) return; - if (!BEAST_EXPECT(c.erase(Book(a2, a1)) == 1)) + if (!BEAST_EXPECT(c.erase(Book(a2, a1, std::nullopt)) == 1)) return; if (!BEAST_EXPECT(c.empty())) return; @@ -395,11 +676,11 @@ public: if (!BEAST_EXPECT(c.size() == 2)) return; - if (!BEAST_EXPECT(c.erase(Book(a1, a1)) == 0)) + if (!BEAST_EXPECT(c.erase(Book(a1, a1, std::nullopt)) == 0)) return; - if (!BEAST_EXPECT(c.erase(Book(a1, a2)) == 1)) + if (!BEAST_EXPECT(c.erase(Book(a1, a2, std::nullopt)) == 1)) return; - if (!BEAST_EXPECT(c.erase(Book(a2, a1)) == 1)) + if (!BEAST_EXPECT(c.erase(Book(a2, a1, std::nullopt)) == 1)) return; if (!BEAST_EXPECT(c.empty())) return; @@ -413,6 +694,66 @@ public: return; #endif } + + { + Set c; + + c.insert(b1_d1); + if (!BEAST_EXPECT(c.size() == 1)) + return; + c.insert(b2_d1); + if (!BEAST_EXPECT(c.size() == 2)) + return; + c.insert(b1_d2); + if (!BEAST_EXPECT(c.size() == 3)) + return; + c.insert(b2_d2); + if (!BEAST_EXPECT(c.size() == 4)) + return; + + // Try removing non-existent elements + if (!BEAST_EXPECT(c.erase(Book(a2, a2, domain1)) == 0)) + return; + + if (!BEAST_EXPECT(c.erase(Book(a1, a2, domain1)) == 1)) + return; + if (!BEAST_EXPECT(c.erase(Book(a2, a1, domain1)) == 1)) + return; + if (!BEAST_EXPECT(c.size() == 2)) + return; + + if (!BEAST_EXPECT(c.erase(Book(a1, a2, domain2)) == 1)) + return; + if (!BEAST_EXPECT(c.erase(Book(a2, a1, domain2)) == 1)) + return; + if (!BEAST_EXPECT(c.empty())) + return; + } + + { + Set c; + + c.insert(b1); + c.insert(b2); + c.insert(b1_d1); + c.insert(b2_d1); + if (!BEAST_EXPECT(c.size() == 4)) + return; + + if (!BEAST_EXPECT(c.erase(Book(a1, a2, std::nullopt)) == 1)) + return; + if (!BEAST_EXPECT(c.erase(Book(a2, a1, std::nullopt)) == 1)) + return; + if (!BEAST_EXPECT(c.size() == 2)) + return; + + if (!BEAST_EXPECT(c.erase(Book(a1, a2, domain1)) == 1)) + return; + if (!BEAST_EXPECT(c.erase(Book(a2, a1, domain1)) == 1)) + return; + if (!BEAST_EXPECT(c.empty())) + return; + } } template @@ -425,8 +766,16 @@ public: AccountID const i2(2); Issue const a1(c1, i1); Issue const a2(c2, i2); - Book const b1(a1, a2); - Book const b2(a2, a1); + Book const b1(a1, a2, std::nullopt); + Book const b2(a2, a1, std::nullopt); + + uint256 const domain1{1}; + uint256 const domain2{2}; + + Book const b1_d1(a1, a2, domain1); + Book const b2_d1(a2, a1, domain1); + Book const b1_d2(a1, a2, domain2); + Book const b2_d2(a2, a1, domain2); // typename Map::value_type value_type; // std::pair value_type; @@ -443,11 +792,11 @@ public: if (!BEAST_EXPECT(c.size() == 2)) return; - if (!BEAST_EXPECT(c.erase(Book(a1, a1)) == 0)) + if (!BEAST_EXPECT(c.erase(Book(a1, a1, std::nullopt)) == 0)) return; - if (!BEAST_EXPECT(c.erase(Book(a1, a2)) == 1)) + if (!BEAST_EXPECT(c.erase(Book(a1, a2, std::nullopt)) == 1)) return; - if (!BEAST_EXPECT(c.erase(Book(a2, a1)) == 1)) + if (!BEAST_EXPECT(c.erase(Book(a2, a1, std::nullopt)) == 1)) return; if (!BEAST_EXPECT(c.empty())) return; @@ -465,11 +814,77 @@ public: if (!BEAST_EXPECT(c.size() == 2)) return; - if (!BEAST_EXPECT(c.erase(Book(a1, a1)) == 0)) + if (!BEAST_EXPECT(c.erase(Book(a1, a1, std::nullopt)) == 0)) return; - if (!BEAST_EXPECT(c.erase(Book(a1, a2)) == 1)) + if (!BEAST_EXPECT(c.erase(Book(a1, a2, std::nullopt)) == 1)) return; - if (!BEAST_EXPECT(c.erase(Book(a2, a1)) == 1)) + if (!BEAST_EXPECT(c.erase(Book(a2, a1, std::nullopt)) == 1)) + return; + if (!BEAST_EXPECT(c.empty())) + return; + } + + { + Map c; + + c.insert(std::make_pair(b1_d1, 10)); + if (!BEAST_EXPECT(c.size() == 1)) + return; + c.insert(std::make_pair(b2_d1, 20)); + if (!BEAST_EXPECT(c.size() == 2)) + return; + c.insert(std::make_pair(b1_d2, 30)); + if (!BEAST_EXPECT(c.size() == 3)) + return; + c.insert(std::make_pair(b2_d2, 40)); + if (!BEAST_EXPECT(c.size() == 4)) + return; + + // Try removing non-existent elements + if (!BEAST_EXPECT(c.erase(Book(a2, a2, domain1)) == 0)) + return; + + if (!BEAST_EXPECT(c.erase(Book(a1, a2, domain1)) == 1)) + return; + if (!BEAST_EXPECT(c.erase(Book(a2, a1, domain1)) == 1)) + return; + if (!BEAST_EXPECT(c.size() == 2)) + return; + + if (!BEAST_EXPECT(c.erase(Book(a1, a2, domain2)) == 1)) + return; + if (!BEAST_EXPECT(c.erase(Book(a2, a1, domain2)) == 1)) + return; + if (!BEAST_EXPECT(c.empty())) + return; + } + + { + Map c; + + c.insert(std::make_pair(b1, 1)); + c.insert(std::make_pair(b2, 2)); + c.insert(std::make_pair(b1_d1, 3)); + c.insert(std::make_pair(b2_d1, 4)); + if (!BEAST_EXPECT(c.size() == 4)) + return; + + // Try removing non-existent elements + if (!BEAST_EXPECT(c.erase(Book(a1, a1, domain1)) == 0)) + return; + if (!BEAST_EXPECT(c.erase(Book(a2, a2, domain2)) == 0)) + return; + + if (!BEAST_EXPECT(c.erase(Book(a1, a2, std::nullopt)) == 1)) + return; + if (!BEAST_EXPECT(c.erase(Book(a2, a1, std::nullopt)) == 1)) + return; + if (!BEAST_EXPECT(c.size() == 2)) + return; + + if (!BEAST_EXPECT(c.erase(Book(a1, a2, domain1)) == 1)) + return; + if (!BEAST_EXPECT(c.erase(Book(a2, a1, domain1)) == 1)) return; if (!BEAST_EXPECT(c.empty())) return; @@ -556,6 +971,10 @@ public: testBookSets(); testBookMaps(); + + // --- + testIssueDomainSets(); + testIssueDomainMaps(); } }; diff --git a/src/test/rpc/BookChanges_test.cpp b/src/test/rpc/BookChanges_test.cpp index 95997538d7..1f059c2bf7 100644 --- a/src/test/rpc/BookChanges_test.cpp +++ b/src/test/rpc/BookChanges_test.cpp @@ -18,6 +18,10 @@ //============================================================================== #include +#include + +#include "xrpl/beast/unit_test/suite.h" +#include "xrpl/protocol/jss.h" namespace ripple { namespace test { @@ -83,14 +87,59 @@ public: // == 3); } + void + testDomainOffer() + { + testcase("Domain Offer"); + using namespace jtx; + + FeatureBitset const all{ + jtx::supported_amendments() | featurePermissionedDomains | + featureCredentials | featurePermissionedDEX}; + + Env env(*this, all); + PermissionedDEX permDex(env); + auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = + permDex; + + auto wsc = makeWSClient(env.app().config()); + + env(offer(alice, XRP(10), USD(10)), domain(domainID)); + env.close(); + + env(pay(bob, carol, USD(10)), + path(~USD), + sendmax(XRP(10)), + domain(domainID)); + env.close(); + + std::string const txHash{ + env.tx()->getJson(JsonOptions::none)[jss::hash].asString()}; + + Json::Value const txResult = env.rpc("tx", txHash)[jss::result]; + auto const ledgerIndex = txResult[jss::ledger_index].asInt(); + + Json::Value jvParams; + jvParams[jss::ledger_index] = ledgerIndex; + + auto jv = wsc->invoke("book_changes", jvParams); + auto jrr = jv[jss::result]; + + BEAST_EXPECT(jrr[jss::changes].size() == 1); + BEAST_EXPECT( + jrr[jss::changes][0u][jss::domain].asString() == + to_string(domainID)); + } + void run() override { testConventionalLedgerInputStrings(); testLedgerInputDefaultBehavior(); - // Note: Other aspects of the book_changes rpc are fertile grounds for - // unit-testing purposes. It can be included in future work + testDomainOffer(); + // Note: Other aspects of the book_changes rpc are fertile grounds + // for unit-testing purposes. It can be included in future work } }; diff --git a/src/test/rpc/Book_test.cpp b/src/test/rpc/Book_test.cpp index 79e3f940f8..0ec36eca53 100644 --- a/src/test/rpc/Book_test.cpp +++ b/src/test/rpc/Book_test.cpp @@ -22,6 +22,8 @@ #include #include +#include +#include #include namespace ripple { @@ -30,10 +32,14 @@ namespace test { class Book_test : public beast::unit_test::suite { std::string - getBookDir(jtx::Env& env, Issue const& in, Issue const& out) + getBookDir( + jtx::Env& env, + Issue const& in, + Issue const& out, + std::optional const& domain = std::nullopt) { std::string dir; - auto uBookBase = getBookBase({in, out}); + auto uBookBase = getBookBase({in, out, domain}); auto uBookEnd = getQualityNext(uBookBase); auto view = env.closed(); auto key = view->succ(uBookBase, uBookEnd); @@ -1657,6 +1663,19 @@ public: "Unneeded field 'taker_gets.issuer' " "for XRP currency specification."); } + { + Json::Value jvParams; + jvParams[jss::ledger_index] = "validated"; + jvParams[jss::taker_pays][jss::currency] = "USD"; + jvParams[jss::taker_pays][jss::issuer] = gw.human(); + jvParams[jss::taker_gets][jss::currency] = "EUR"; + jvParams[jss::taker_gets][jss::issuer] = gw.human(); + jvParams[jss::domain] = "badString"; + auto const jrr = env.rpc( + "json", "book_offers", to_string(jvParams))[jss::result]; + BEAST_EXPECT(jrr[jss::error] == "domainMalformed"); + BEAST_EXPECT(jrr[jss::error_message] == "Unable to parse domain."); + } } void @@ -1711,6 +1730,273 @@ public: (asAdmin ? RPC::Tuning::bookOffers.rdefault : 0u)); } + void + testTrackDomainOffer() + { + testcase("TrackDomainOffer"); + using namespace jtx; + + FeatureBitset const all{ + jtx::supported_amendments() | featurePermissionedDomains | + featureCredentials | featurePermissionedDEX}; + + Env env(*this, all); + PermissionedDEX permDex(env); + auto const alice = permDex.alice; + auto const bob = permDex.bob; + auto const carol = permDex.carol; + auto const domainID = permDex.domainID; + auto const gw = permDex.gw; + auto const USD = permDex.USD; + + auto wsc = makeWSClient(env.app().config()); + + env(offer(alice, XRP(10), USD(10)), domain(domainID)); + env.close(); + + auto checkBookOffers = [&](Json::Value const& jrr) { + BEAST_EXPECT(jrr[jss::offers].isArray()); + BEAST_EXPECT(jrr[jss::offers].size() == 1); + auto const jrOffer = jrr[jss::offers][0u]; + BEAST_EXPECT(jrOffer[sfAccount.fieldName] == alice.human()); + BEAST_EXPECT( + jrOffer[sfBookDirectory.fieldName] == + getBookDir(env, XRP, USD.issue(), domainID)); + BEAST_EXPECT(jrOffer[sfBookNode.fieldName] == "0"); + BEAST_EXPECT(jrOffer[jss::Flags] == 0); + BEAST_EXPECT(jrOffer[sfLedgerEntryType.fieldName] == jss::Offer); + BEAST_EXPECT(jrOffer[sfOwnerNode.fieldName] == "0"); + BEAST_EXPECT( + jrOffer[jss::TakerGets] == + USD(10).value().getJson(JsonOptions::none)); + BEAST_EXPECT( + jrOffer[jss::TakerPays] == + XRP(10).value().getJson(JsonOptions::none)); + BEAST_EXPECT( + jrOffer[sfDomainID.jsonName].asString() == to_string(domainID)); + }; + + // book_offers: open book doesn't return offer + { + Json::Value jvParams; + jvParams[jss::taker] = env.master.human(); + jvParams[jss::taker_pays][jss::currency] = "XRP"; + jvParams[jss::ledger_index] = "validated"; + jvParams[jss::taker_gets][jss::currency] = "USD"; + jvParams[jss::taker_gets][jss::issuer] = gw.human(); + + auto jv = wsc->invoke("book_offers", jvParams); + auto jrr = jv[jss::result]; + BEAST_EXPECT(jrr[jss::offers].isArray()); + BEAST_EXPECT(jrr[jss::offers].size() == 0); + } + + auto checkSubBooks = [&](Json::Value const& jv) { + BEAST_EXPECT( + jv[jss::result].isMember(jss::offers) && + jv[jss::result][jss::offers].size() == 1); + BEAST_EXPECT( + jv[jss::result][jss::offers][0u][jss::TakerGets] == + USD(10).value().getJson(JsonOptions::none)); + BEAST_EXPECT( + jv[jss::result][jss::offers][0u][jss::TakerPays] == + XRP(10).value().getJson(JsonOptions::none)); + BEAST_EXPECT( + jv[jss::result][jss::offers][0u][sfDomainID.jsonName] + .asString() == to_string(domainID)); + }; + + // book_offers: requesting domain book returns hybrid offer + { + Json::Value jvParams; + jvParams[jss::taker] = env.master.human(); + jvParams[jss::taker_pays][jss::currency] = "XRP"; + jvParams[jss::ledger_index] = "validated"; + jvParams[jss::taker_gets][jss::currency] = "USD"; + jvParams[jss::taker_gets][jss::issuer] = gw.human(); + jvParams[jss::domain] = to_string(domainID); + + auto jv = wsc->invoke("book_offers", jvParams); + auto jrr = jv[jss::result]; + checkBookOffers(jrr); + } + + // subscribe to domain book should return domain offer + { + Json::Value books; + books[jss::books] = Json::arrayValue; + { + auto& j = books[jss::books].append(Json::objectValue); + j[jss::snapshot] = true; + j[jss::taker_pays][jss::currency] = "XRP"; + j[jss::taker_gets][jss::currency] = "USD"; + j[jss::taker_gets][jss::issuer] = gw.human(); + j[jss::domain] = to_string(domainID); + } + + auto jv = wsc->invoke("subscribe", books); + if (!BEAST_EXPECT(jv[jss::status] == "success")) + return; + checkSubBooks(jv); + } + + // subscribe to open book should not return domain offer + { + Json::Value books; + books[jss::books] = Json::arrayValue; + { + auto& j = books[jss::books].append(Json::objectValue); + j[jss::snapshot] = true; + j[jss::taker_pays][jss::currency] = "XRP"; + j[jss::taker_gets][jss::currency] = "USD"; + j[jss::taker_gets][jss::issuer] = gw.human(); + } + + auto jv = wsc->invoke("subscribe", books); + if (!BEAST_EXPECT(jv[jss::status] == "success")) + return; + BEAST_EXPECT( + jv[jss::result].isMember(jss::offers) && + jv[jss::result][jss::offers].size() == 0); + } + } + + void + testTrackHybridOffer() + { + testcase("TrackHybridOffer"); + using namespace jtx; + + FeatureBitset const all{ + jtx::supported_amendments() | featurePermissionedDomains | + featureCredentials | featurePermissionedDEX}; + + Env env(*this, all); + PermissionedDEX permDex(env); + auto const alice = permDex.alice; + auto const bob = permDex.bob; + auto const carol = permDex.carol; + auto const domainID = permDex.domainID; + auto const gw = permDex.gw; + auto const USD = permDex.USD; + + auto wsc = makeWSClient(env.app().config()); + + env(offer(alice, XRP(10), USD(10)), + domain(domainID), + txflags(tfHybrid)); + env.close(); + + auto checkBookOffers = [&](Json::Value const& jrr) { + BEAST_EXPECT(jrr[jss::offers].isArray()); + BEAST_EXPECT(jrr[jss::offers].size() == 1); + auto const jrOffer = jrr[jss::offers][0u]; + BEAST_EXPECT(jrOffer[sfAccount.fieldName] == alice.human()); + BEAST_EXPECT( + jrOffer[sfBookDirectory.fieldName] == + getBookDir(env, XRP, USD.issue(), domainID)); + BEAST_EXPECT(jrOffer[sfBookNode.fieldName] == "0"); + BEAST_EXPECT(jrOffer[jss::Flags] == lsfHybrid); + BEAST_EXPECT(jrOffer[sfLedgerEntryType.fieldName] == jss::Offer); + BEAST_EXPECT(jrOffer[sfOwnerNode.fieldName] == "0"); + BEAST_EXPECT( + jrOffer[jss::TakerGets] == + USD(10).value().getJson(JsonOptions::none)); + BEAST_EXPECT( + jrOffer[jss::TakerPays] == + XRP(10).value().getJson(JsonOptions::none)); + BEAST_EXPECT( + jrOffer[sfDomainID.jsonName].asString() == to_string(domainID)); + BEAST_EXPECT(jrOffer[sfAdditionalBooks.jsonName].size() == 1); + }; + + // book_offers: open book returns hybrid offer + { + Json::Value jvParams; + jvParams[jss::taker] = env.master.human(); + jvParams[jss::taker_pays][jss::currency] = "XRP"; + jvParams[jss::ledger_index] = "validated"; + jvParams[jss::taker_gets][jss::currency] = "USD"; + jvParams[jss::taker_gets][jss::issuer] = gw.human(); + + auto jv = wsc->invoke("book_offers", jvParams); + auto jrr = jv[jss::result]; + checkBookOffers(jrr); + } + + auto checkSubBooks = [&](Json::Value const& jv) { + BEAST_EXPECT( + jv[jss::result].isMember(jss::offers) && + jv[jss::result][jss::offers].size() == 1); + BEAST_EXPECT( + jv[jss::result][jss::offers][0u][jss::TakerGets] == + USD(10).value().getJson(JsonOptions::none)); + BEAST_EXPECT( + jv[jss::result][jss::offers][0u][jss::TakerPays] == + XRP(10).value().getJson(JsonOptions::none)); + BEAST_EXPECT( + jv[jss::result][jss::offers][0u][sfDomainID.jsonName] + .asString() == to_string(domainID)); + }; + + // book_offers: requesting domain book returns hybrid offer + { + Json::Value jvParams; + jvParams[jss::taker] = env.master.human(); + jvParams[jss::taker_pays][jss::currency] = "XRP"; + jvParams[jss::ledger_index] = "validated"; + jvParams[jss::taker_gets][jss::currency] = "USD"; + jvParams[jss::taker_gets][jss::issuer] = gw.human(); + jvParams[jss::domain] = to_string(domainID); + + auto jv = wsc->invoke("book_offers", jvParams); + auto jrr = jv[jss::result]; + checkBookOffers(jrr); + } + + // subscribe to domain book should return hybrid offer + { + Json::Value books; + books[jss::books] = Json::arrayValue; + { + auto& j = books[jss::books].append(Json::objectValue); + j[jss::snapshot] = true; + j[jss::taker_pays][jss::currency] = "XRP"; + j[jss::taker_gets][jss::currency] = "USD"; + j[jss::taker_gets][jss::issuer] = gw.human(); + j[jss::domain] = to_string(domainID); + } + + auto jv = wsc->invoke("subscribe", books); + if (!BEAST_EXPECT(jv[jss::status] == "success")) + return; + checkSubBooks(jv); + + // RPC unsubscribe + auto unsubJv = wsc->invoke("unsubscribe", books); + if (wsc->version() == 2) + BEAST_EXPECT(unsubJv[jss::status] == "success"); + } + + // subscribe to open book should return hybrid offer + { + Json::Value books; + books[jss::books] = Json::arrayValue; + { + auto& j = books[jss::books].append(Json::objectValue); + j[jss::snapshot] = true; + j[jss::taker_pays][jss::currency] = "XRP"; + j[jss::taker_gets][jss::currency] = "USD"; + j[jss::taker_gets][jss::issuer] = gw.human(); + } + + auto jv = wsc->invoke("subscribe", books); + if (!BEAST_EXPECT(jv[jss::status] == "success")) + return; + checkSubBooks(jv); + } + } + void run() override { @@ -1728,6 +2014,8 @@ public: testBookOfferErrors(); testBookOfferLimits(true); testBookOfferLimits(false); + testTrackDomainOffer(); + testTrackHybridOffer(); } }; diff --git a/src/test/rpc/GatewayBalances_test.cpp b/src/test/rpc/GatewayBalances_test.cpp index 249d4f892f..7e9273d25e 100644 --- a/src/test/rpc/GatewayBalances_test.cpp +++ b/src/test/rpc/GatewayBalances_test.cpp @@ -252,7 +252,10 @@ public: { using namespace jtx; auto const sa = supported_amendments(); - for (auto feature : {sa - featureFlowCross, sa}) + for (auto feature : + {sa - featureFlowCross - featurePermissionedDEX, + sa - featurePermissionedDEX, + sa}) { testGWB(feature); testGWBApiVersions(feature); diff --git a/src/test/rpc/JSONRPC_test.cpp b/src/test/rpc/JSONRPC_test.cpp index 22c7dfd1dc..1612d1b455 100644 --- a/src/test/rpc/JSONRPC_test.cpp +++ b/src/test/rpc/JSONRPC_test.cpp @@ -2041,6 +2041,28 @@ static constexpr TxnTestData txnTestArray[] = { "Cannot specify differing 'Amount' and 'DeliverMax'", "Cannot specify differing 'Amount' and 'DeliverMax'", "Cannot specify differing 'Amount' and 'DeliverMax'"}}}, + {"Payment cannot specify bad DomainID.", + __LINE__, + R"({ + "command": "doesnt_matter", + "account": "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", + "secret": "masterpassphrase", + "debug_signing": 0, + "tx_json": { + "Account": "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", + "Amount": "1000000000", + "Destination": "rnUy2SHTrB9DubsPmkJZUXTf5FcNDGrYEA", + "Fee": 50, + "Sequence": 0, + "SigningPubKey": "", + "TransactionType": "Payment", + "DomainID": "invalid", + } +})", + {{"Unable to parse 'DomainID'.", + "Unable to parse 'DomainID'.", + "Unable to parse 'DomainID'.", + "Unable to parse 'DomainID'."}}}, {"Minimal delegated transaction.", __LINE__, diff --git a/src/test/rpc/NoRipple_test.cpp b/src/test/rpc/NoRipple_test.cpp index 5c41f25128..42c86b34bb 100644 --- a/src/test/rpc/NoRipple_test.cpp +++ b/src/test/rpc/NoRipple_test.cpp @@ -294,7 +294,8 @@ public: }; using namespace jtx; auto const sa = supported_amendments(); - withFeatsTests(sa - featureFlowCross); + withFeatsTests(sa - featureFlowCross - featurePermissionedDEX); + withFeatsTests(sa - featurePermissionedDEX); withFeatsTests(sa); } }; diff --git a/src/test/rpc/Subscribe_test.cpp b/src/test/rpc/Subscribe_test.cpp index 3d1b425422..32296c5d0a 100644 --- a/src/test/rpc/Subscribe_test.cpp +++ b/src/test/rpc/Subscribe_test.cpp @@ -1300,6 +1300,60 @@ public: } } + void + testSubBookChanges() + { + testcase("SubBookChanges"); + using namespace jtx; + using namespace std::chrono_literals; + FeatureBitset const all{ + jtx::supported_amendments() | featurePermissionedDomains | + featureCredentials | featurePermissionedDEX}; + + Env env(*this, all); + PermissionedDEX permDex(env); + auto const alice = permDex.alice; + auto const bob = permDex.bob; + auto const carol = permDex.carol; + auto const domainID = permDex.domainID; + auto const gw = permDex.gw; + auto const USD = permDex.USD; + + auto wsc = makeWSClient(env.app().config()); + + Json::Value streams; + streams[jss::streams] = Json::arrayValue; + streams[jss::streams][0u] = "book_changes"; + + auto jv = wsc->invoke("subscribe", streams); + if (!BEAST_EXPECT(jv[jss::status] == "success")) + return; + env(offer(alice, XRP(10), USD(10)), + domain(domainID), + txflags(tfHybrid)); + env.close(); + + env(pay(bob, carol, USD(5)), + path(~USD), + sendmax(XRP(5)), + domain(domainID)); + env.close(); + + BEAST_EXPECT(wsc->findMsg(5s, [&](auto const& jv) { + if (jv[jss::changes].size() != 1) + return false; + + auto const jrOffer = jv[jss::changes][0u]; + return (jv[jss::changes][0u][jss::domain]).asString() == + strHex(domainID) && + jrOffer[jss::currency_a].asString() == "XRP_drops" && + jrOffer[jss::volume_a].asString() == "5000000" && + jrOffer[jss::currency_b].asString() == + "rHUKYAZyUFn8PCZWbPfwHfbVQXTYrYKkHb/USD" && + jrOffer[jss::volume_b].asString() == "5"; + })); + } + void run() override { @@ -1318,6 +1372,7 @@ public: testSubErrors(false); testSubByUrl(); testHistoryTxStream(); + testSubBookChanges(); } }; diff --git a/src/xrpld/app/ledger/OrderBookDB.cpp b/src/xrpld/app/ledger/OrderBookDB.cpp index b8a7b54008..433a993772 100644 --- a/src/xrpld/app/ledger/OrderBookDB.cpp +++ b/src/xrpld/app/ledger/OrderBookDB.cpp @@ -89,6 +89,8 @@ OrderBookDB::update(std::shared_ptr const& ledger) decltype(allBooks_) allBooks; decltype(xrpBooks_) xrpBooks; + decltype(domainBooks_) domainBooks; + decltype(xrpDomainBooks_) xrpDomainBooks; allBooks.reserve(allBooks_.size()); xrpBooks.reserve(xrpBooks_.size()); @@ -120,10 +122,16 @@ OrderBookDB::update(std::shared_ptr const& ledger) book.in.account = sle->getFieldH160(sfTakerPaysIssuer); book.out.currency = sle->getFieldH160(sfTakerGetsCurrency); book.out.account = sle->getFieldH160(sfTakerGetsIssuer); + book.domain = (*sle)[~sfDomainID]; - allBooks[book.in].insert(book.out); + if (book.domain) + domainBooks_[{book.in, *book.domain}].insert(book.out); + else + allBooks[book.in].insert(book.out); - if (isXRP(book.out)) + if (book.domain && isXRP(book.out)) + xrpDomainBooks.insert({book.in, *book.domain}); + else if (isXRP(book.out)) xrpBooks.insert(book.in); ++cnt; @@ -160,6 +168,8 @@ OrderBookDB::update(std::shared_ptr const& ledger) std::lock_guard sl(mLock); allBooks_.swap(allBooks); xrpBooks_.swap(xrpBooks); + domainBooks_.swap(domainBooks); + xrpDomainBooks_.swap(xrpDomainBooks); } app_.getLedgerMaster().newOrderBookDB(); @@ -172,47 +182,77 @@ OrderBookDB::addOrderBook(Book const& book) std::lock_guard sl(mLock); - allBooks_[book.in].insert(book.out); + if (book.domain) + domainBooks_[{book.in, *book.domain}].insert(book.out); + else + allBooks_[book.in].insert(book.out); - if (toXRP) + if (book.domain && toXRP) + xrpDomainBooks_.insert({book.in, *book.domain}); + else if (toXRP) xrpBooks_.insert(book.in); } // return list of all orderbooks that want this issuerID and currencyID std::vector -OrderBookDB::getBooksByTakerPays(Issue const& issue) +OrderBookDB::getBooksByTakerPays( + Issue const& issue, + std::optional const& domain) { std::vector ret; { std::lock_guard sl(mLock); - if (auto it = allBooks_.find(issue); it != allBooks_.end()) - { - ret.reserve(it->second.size()); + auto getBooks = [&](auto const& container, auto const& key) { + if (auto it = container.find(key); it != container.end()) + { + auto const& books = it->second; + ret.reserve(books.size()); - for (auto const& gets : it->second) - ret.push_back(Book(issue, gets)); - } + for (auto const& gets : books) + ret.emplace_back(issue, gets, domain); + } + }; + + if (!domain) + getBooks(allBooks_, issue); + else + getBooks(domainBooks_, std::make_pair(issue, *domain)); } return ret; } int -OrderBookDB::getBookSize(Issue const& issue) +OrderBookDB::getBookSize( + Issue const& issue, + std::optional const& domain) { std::lock_guard sl(mLock); - if (auto it = allBooks_.find(issue); it != allBooks_.end()) - return static_cast(it->second.size()); + + if (!domain) + { + if (auto it = allBooks_.find(issue); it != allBooks_.end()) + return static_cast(it->second.size()); + } + else + { + if (auto it = domainBooks_.find({issue, *domain}); + it != domainBooks_.end()) + return static_cast(it->second.size()); + } + return 0; } bool -OrderBookDB::isBookToXRP(Issue const& issue) +OrderBookDB::isBookToXRP(Issue const& issue, std::optional domain) { std::lock_guard sl(mLock); - return xrpBooks_.count(issue) > 0; + if (domain) + return xrpDomainBooks_.contains({issue, *domain}); + return xrpBooks_.contains(issue); } BookListeners::pointer @@ -228,7 +268,8 @@ OrderBookDB::makeBookListeners(Book const& book) mListeners[book] = ret; XRPL_ASSERT( getBookListeners(book) == ret, - "ripple::OrderBookDB::makeBookListeners : result roundtrip lookup"); + "ripple::OrderBookDB::makeBookListeners : result roundtrip " + "lookup"); } return ret; @@ -278,7 +319,8 @@ OrderBookDB::processTxn( { auto listeners = getBookListeners( {data->getFieldAmount(sfTakerGets).issue(), - data->getFieldAmount(sfTakerPays).issue()}); + data->getFieldAmount(sfTakerPays).issue(), + (*data)[~sfDomainID]}); if (listeners) listeners->publish(jvObj, havePublished); } diff --git a/src/xrpld/app/ledger/OrderBookDB.h b/src/xrpld/app/ledger/OrderBookDB.h index bc36f8a301..89c20b7074 100644 --- a/src/xrpld/app/ledger/OrderBookDB.h +++ b/src/xrpld/app/ledger/OrderBookDB.h @@ -25,8 +25,10 @@ #include #include +#include #include +#include namespace ripple { @@ -46,15 +48,19 @@ public: /** @return a list of all orderbooks that want this issuerID and currencyID. */ std::vector - getBooksByTakerPays(Issue const&); + getBooksByTakerPays( + Issue const&, + std::optional const& domain = std::nullopt); /** @return a count of all orderbooks that want this issuerID and currencyID. */ int - getBookSize(Issue const&); + getBookSize( + Issue const&, + std::optional const& domain = std::nullopt); bool - isBookToXRP(Issue const&); + isBookToXRP(Issue const&, std::optional domain = std::nullopt); BookListeners::pointer getBookListeners(Book const&); @@ -74,9 +80,15 @@ private: // Maps order books by "issue in" to "issue out": hardened_hash_map> allBooks_; + hardened_hash_map, hardened_hash_set> + domainBooks_; + // does an order book to XRP exist hash_set xrpBooks_; + // does an order book to XRP exist + hash_set> xrpDomainBooks_; + std::recursive_mutex mLock; using BookToListenersMap = hash_map; diff --git a/src/xrpld/app/misc/PermissionedDEXHelpers.cpp b/src/xrpld/app/misc/PermissionedDEXHelpers.cpp new file mode 100644 index 0000000000..4251ac1519 --- /dev/null +++ b/src/xrpld/app/misc/PermissionedDEXHelpers.cpp @@ -0,0 +1,88 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +namespace ripple { +namespace permissioned_dex { + +bool +accountInDomain( + ReadView const& view, + AccountID const& account, + Domain const& domainID) +{ + auto const sleDomain = view.read(keylet::permissionedDomain(domainID)); + if (!sleDomain) + return false; + + // domain owner is in the domain + if (sleDomain->getAccountID(sfOwner) == account) + return true; + + auto const& credentials = sleDomain->getFieldArray(sfAcceptedCredentials); + + bool const inDomain = std::any_of( + credentials.begin(), credentials.end(), [&](auto const& credential) { + auto const sleCred = view.read(keylet::credential( + account, credential[sfIssuer], credential[sfCredentialType])); + if (!sleCred || !sleCred->isFlag(lsfAccepted)) + return false; + + return !credentials::checkExpired( + sleCred, view.info().parentCloseTime); + }); + + return inDomain; +} + +bool +offerInDomain( + ReadView const& view, + uint256 const& offerID, + Domain const& domainID, + beast::Journal j) +{ + auto const sleOffer = view.read(keylet::offer(offerID)); + + // The following are defensive checks that should never happen, since this + // function is used to check against the order book offers, which should not + // have any of the following wrong behavior + if (!sleOffer) + return false; // LCOV_EXCL_LINE + if (!sleOffer->isFieldPresent(sfDomainID)) + return false; // LCOV_EXCL_LINE + if (sleOffer->getFieldH256(sfDomainID) != domainID) + return false; // LCOV_EXCL_LINE + + if (sleOffer->isFlag(lsfHybrid) && + !sleOffer->isFieldPresent(sfAdditionalBooks)) + { + JLOG(j.error()) << "Hybrid offer " << offerID + << " missing AdditionalBooks field"; + return false; // LCOV_EXCL_LINE + } + + return accountInDomain(view, sleOffer->getAccountID(sfAccount), domainID); +} + +} // namespace permissioned_dex + +} // namespace ripple diff --git a/src/xrpld/app/misc/PermissionedDEXHelpers.h b/src/xrpld/app/misc/PermissionedDEXHelpers.h new file mode 100644 index 0000000000..1b3a0323fd --- /dev/null +++ b/src/xrpld/app/misc/PermissionedDEXHelpers.h @@ -0,0 +1,43 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once +#include + +namespace ripple { +namespace permissioned_dex { + +// Check if an account is in a permissioned domain +[[nodiscard]] bool +accountInDomain( + ReadView const& view, + AccountID const& account, + Domain const& domainID); + +// Check if an offer is in the permissioned domain +[[nodiscard]] bool +offerInDomain( + ReadView const& view, + uint256 const& offerID, + Domain const& domainID, + beast::Journal j); + +} // namespace permissioned_dex + +} // namespace ripple diff --git a/src/xrpld/app/paths/Flow.cpp b/src/xrpld/app/paths/Flow.cpp index 08f8ec3f25..3b14b8b968 100644 --- a/src/xrpld/app/paths/Flow.cpp +++ b/src/xrpld/app/paths/Flow.cpp @@ -64,6 +64,7 @@ flow( OfferCrossing offerCrossing, std::optional const& limitQuality, std::optional const& sendMax, + std::optional const& domainID, beast::Journal j, path::detail::FlowDebugInfo* flowDebugInfo) { @@ -98,6 +99,7 @@ flow( ownerPaysTransferFee, offerCrossing, ammContext, + domainID, j); if (toStrandsTer != tesSUCCESS) diff --git a/src/xrpld/app/paths/Flow.h b/src/xrpld/app/paths/Flow.h index 048b8785f1..659f180484 100644 --- a/src/xrpld/app/paths/Flow.h +++ b/src/xrpld/app/paths/Flow.h @@ -66,6 +66,7 @@ flow( OfferCrossing offerCrossing, std::optional const& limitQuality, std::optional const& sendMax, + std::optional const& domainID, beast::Journal j, path::detail::FlowDebugInfo* flowDebugInfo = nullptr); diff --git a/src/xrpld/app/paths/PathRequest.cpp b/src/xrpld/app/paths/PathRequest.cpp index ed090d25aa..8a88e774d0 100644 --- a/src/xrpld/app/paths/PathRequest.cpp +++ b/src/xrpld/app/paths/PathRequest.cpp @@ -438,6 +438,21 @@ PathRequest::parseJson(Json::Value const& jvParams) if (jvParams.isMember(jss::id)) jvId = jvParams[jss::id]; + if (jvParams.isMember(jss::domain)) + { + uint256 num; + if (!jvParams[jss::domain].isString() || + !num.parseHex(jvParams[jss::domain].asString())) + { + jvStatus = rpcError(rpcDOMAIN_MALFORMED); + return PFR_PJ_INVALID; + } + else + { + domain = num; + } + } + return PFR_PJ_NOCHANGE; } @@ -484,6 +499,7 @@ PathRequest::getPathFinder( std::nullopt, dst_amount, saSendMax, + domain, app_); if (pathfinder->findPaths(level, continueCallback)) pathfinder->computePathRanks(max_paths_, continueCallback); @@ -581,6 +597,7 @@ PathRequest::findPaths( *raDstAccount, // --> Account to deliver to. *raSrcAccount, // --> Account sending from. ps, // --> Path set. + domain, // --> Domain. app_.logs(), &rcInput); @@ -601,6 +618,7 @@ PathRequest::findPaths( *raDstAccount, // --> Account to deliver to. *raSrcAccount, // --> Account sending from. ps, // --> Path set. + domain, // --> Domain. app_.logs()); if (rc.result() != tesSUCCESS) diff --git a/src/xrpld/app/paths/PathRequest.h b/src/xrpld/app/paths/PathRequest.h index e480c2b812..aea0e564fb 100644 --- a/src/xrpld/app/paths/PathRequest.h +++ b/src/xrpld/app/paths/PathRequest.h @@ -25,6 +25,7 @@ #include #include +#include #include #include @@ -156,6 +157,8 @@ private: std::set sciSourceCurrencies; std::map mContext; + std::optional domain; + bool convert_all_; std::recursive_mutex mIndexLock; diff --git a/src/xrpld/app/paths/Pathfinder.cpp b/src/xrpld/app/paths/Pathfinder.cpp index e02c3ed089..74a33ec917 100644 --- a/src/xrpld/app/paths/Pathfinder.cpp +++ b/src/xrpld/app/paths/Pathfinder.cpp @@ -166,6 +166,7 @@ Pathfinder::Pathfinder( std::optional const& uSrcIssuer, STAmount const& saDstAmount, std::optional const& srcAmount, + std::optional const& domain, Application& app) : mSrcAccount(uSrcAccount) , mDstAccount(uDstAccount) @@ -184,6 +185,7 @@ Pathfinder::Pathfinder( 0, true))) , convert_all_(convertAllCheck(mDstAmount)) + , mDomain(domain) , mLedger(cache->getLedger()) , mRLCache(cache) , app_(app) @@ -372,6 +374,7 @@ Pathfinder::getPathLiquidity( mDstAccount, mSrcAccount, pathSet, + mDomain, app_.logs(), &rcInput); // If we can't get even the minimum liquidity requested, we're done. @@ -392,6 +395,7 @@ Pathfinder::getPathLiquidity( mDstAccount, mSrcAccount, pathSet, + mDomain, app_.logs(), &rcInput); @@ -431,6 +435,7 @@ Pathfinder::computePathRanks( mDstAccount, mSrcAccount, STPathSet(), + mDomain, app_.logs(), &rcInput); @@ -741,7 +746,7 @@ Pathfinder::getPathsOut( if (!bFrozen) { - count = app_.getOrderBookDB().getBookSize(issue); + count = app_.getOrderBookDB().getBookSize(issue, mDomain); if (auto const lines = mRLCache->getRippleLines(account, direction)) { @@ -1128,7 +1133,8 @@ Pathfinder::addLink( { // to XRP only if (!bOnXRP && - app_.getOrderBookDB().isBookToXRP({uEndCurrency, uEndIssuer})) + app_.getOrderBookDB().isBookToXRP( + {uEndCurrency, uEndIssuer}, mDomain)) { STPathElement pathElement( STPathElement::typeCurrency, @@ -1142,7 +1148,7 @@ Pathfinder::addLink( { bool bDestOnly = (addFlags & afOB_LAST) != 0; auto books = app_.getOrderBookDB().getBooksByTakerPays( - {uEndCurrency, uEndIssuer}); + {uEndCurrency, uEndIssuer}, mDomain); JLOG(j_.trace()) << books.size() << " books found from this currency/issuer"; diff --git a/src/xrpld/app/paths/Pathfinder.h b/src/xrpld/app/paths/Pathfinder.h index 973fda8855..ea3928dff4 100644 --- a/src/xrpld/app/paths/Pathfinder.h +++ b/src/xrpld/app/paths/Pathfinder.h @@ -48,6 +48,7 @@ public: std::optional const& uSrcIssuer, STAmount const& dstAmount, std::optional const& srcAmount, + std::optional const& domain, Application& app); Pathfinder(Pathfinder const&) = delete; Pathfinder& @@ -205,6 +206,7 @@ private: been removed. */ STAmount mRemainingAmount; bool convert_all_; + std::optional mDomain; std::shared_ptr mLedger; std::unique_ptr m_loadEvent; diff --git a/src/xrpld/app/paths/RippleCalc.cpp b/src/xrpld/app/paths/RippleCalc.cpp index c783bb8e9f..4e472e07c8 100644 --- a/src/xrpld/app/paths/RippleCalc.cpp +++ b/src/xrpld/app/paths/RippleCalc.cpp @@ -53,6 +53,8 @@ RippleCalc::rippleCalculate( // A set of paths that are included in the transaction that we'll // explore for liquidity. STPathSet const& spsPaths, + + std::optional const& domainID, Logs& l, Input const* const pInputs) { @@ -110,6 +112,7 @@ RippleCalc::rippleCalculate( OfferCrossing::no, limitQuality, sendMax, + domainID, j, nullptr); } diff --git a/src/xrpld/app/paths/RippleCalc.h b/src/xrpld/app/paths/RippleCalc.h index 45f68725cc..09de7334e8 100644 --- a/src/xrpld/app/paths/RippleCalc.h +++ b/src/xrpld/app/paths/RippleCalc.h @@ -111,6 +111,8 @@ public: // A set of paths that are included in the transaction that we'll // explore for liquidity. STPathSet const& spsPaths, + + std::optional const& domainID, Logs& l, Input const* const pInputs = nullptr); diff --git a/src/xrpld/app/paths/detail/BookStep.cpp b/src/xrpld/app/paths/detail/BookStep.cpp index 4024ca190d..8d20a9900c 100644 --- a/src/xrpld/app/paths/detail/BookStep.cpp +++ b/src/xrpld/app/paths/detail/BookStep.cpp @@ -93,7 +93,7 @@ protected: public: BookStep(StrandContext const& ctx, Issue const& in, Issue const& out) : maxOffersToConsume_(getMaxOffersToConsume(ctx)) - , book_(in, out) + , book_(in, out, ctx.domainID) , strandSrc_(ctx.strandSrc) , strandDst_(ctx.strandDst) , prevStep_(ctx.prevStep) @@ -837,6 +837,10 @@ BookStep::forEachOffer( // At any payment engine iteration, AMM offer can only be consumed once. auto tryAMM = [&](std::optional const& lobQuality) -> bool { + // amm doesn't support domain yet + if (book_.domain) + return true; + // If offer crossing then use either LOB quality or nullopt // to prevent AMM being blocked by a lower quality LOB. auto const qualityThreshold = [&]() -> std::optional { diff --git a/src/xrpld/app/paths/detail/PaySteps.cpp b/src/xrpld/app/paths/detail/PaySteps.cpp index 99f212d548..aa9e21e182 100644 --- a/src/xrpld/app/paths/detail/PaySteps.cpp +++ b/src/xrpld/app/paths/detail/PaySteps.cpp @@ -142,6 +142,7 @@ toStrand( bool ownerPaysTransferFee, OfferCrossing offerCrossing, AMMContext& ammContext, + std::optional const& domainID, beast::Journal j) { if (isXRP(src) || isXRP(dst) || !isConsistent(deliver) || @@ -279,6 +280,7 @@ toStrand( seenDirectIssues, seenBookOuts, ammContext, + domainID, j}; }; @@ -476,6 +478,7 @@ toStrands( bool ownerPaysTransferFee, OfferCrossing offerCrossing, AMMContext& ammContext, + std::optional const& domainID, beast::Journal j) { std::vector result; @@ -502,6 +505,7 @@ toStrands( ownerPaysTransferFee, offerCrossing, ammContext, + domainID, j); auto const ter = sp.first; auto& strand = sp.second; @@ -546,6 +550,7 @@ toStrands( ownerPaysTransferFee, offerCrossing, ammContext, + domainID, j); auto ter = sp.first; auto& strand = sp.second; @@ -592,6 +597,7 @@ StrandContext::StrandContext( std::array, 2>& seenDirectIssues_, boost::container::flat_set& seenBookOuts_, AMMContext& ammContext_, + std::optional const& domainID_, beast::Journal j_) : view(view_) , strandSrc(strandSrc_) @@ -608,6 +614,7 @@ StrandContext::StrandContext( , seenDirectIssues(seenDirectIssues_) , seenBookOuts(seenBookOuts_) , ammContext(ammContext_) + , domainID(domainID_) , j(j_) { } diff --git a/src/xrpld/app/paths/detail/Steps.h b/src/xrpld/app/paths/detail/Steps.h index bb9abf6545..0fcdc85fe1 100644 --- a/src/xrpld/app/paths/detail/Steps.h +++ b/src/xrpld/app/paths/detail/Steps.h @@ -23,6 +23,7 @@ #include #include +#include #include #include #include @@ -388,6 +389,7 @@ normalizePath( owner @param offerCrossing false -> payment; true -> offer crossing @param ammContext counts iterations with AMM offers + @param domainID the domain that order books will use @param j Journal for logging messages @return Error code and constructed Strand */ @@ -403,6 +405,7 @@ toStrand( bool ownerPaysTransferFee, OfferCrossing offerCrossing, AMMContext& ammContext, + std::optional const& domainID, beast::Journal j); /** @@ -427,6 +430,7 @@ toStrand( owner @param offerCrossing false -> payment; true -> offer crossing @param ammContext counts iterations with AMM offers + @param domainID the domain that order books will use @param j Journal for logging messages @return error code and collection of strands */ @@ -443,6 +447,7 @@ toStrands( bool ownerPaysTransferFee, OfferCrossing offerCrossing, AMMContext& ammContext, + std::optional const& domainID, beast::Journal j); /// @cond INTERNAL @@ -553,6 +558,7 @@ struct StrandContext */ boost::container::flat_set& seenBookOuts; AMMContext& ammContext; + std::optional domainID; // the domain the order book will use beast::Journal const j; /** StrandContext constructor. */ @@ -574,6 +580,7 @@ struct StrandContext boost::container::flat_set& seenBookOuts_, ///< For detecting book loops AMMContext& ammContext_, + std::optional const& domainID, beast::Journal j_); ///< Journal for logging }; diff --git a/src/xrpld/app/tx/detail/AMMCreate.cpp b/src/xrpld/app/tx/detail/AMMCreate.cpp index 95cb5bf2e6..f0ccc6f298 100644 --- a/src/xrpld/app/tx/detail/AMMCreate.cpp +++ b/src/xrpld/app/tx/detail/AMMCreate.cpp @@ -329,7 +329,7 @@ applyCreate( << amount2; auto addOrderBook = [&](Issue const& issueIn, Issue const& issueOut, std::uint64_t uRate) { - Book const book{issueIn, issueOut}; + Book const book{issueIn, issueOut, std::nullopt}; auto const dir = keylet::quality(keylet::book(book), uRate); if (auto const bookExisted = static_cast(sb.read(dir)); !bookExisted) diff --git a/src/xrpld/app/tx/detail/CashCheck.cpp b/src/xrpld/app/tx/detail/CashCheck.cpp index cccda83a68..0f1d08689c 100644 --- a/src/xrpld/app/tx/detail/CashCheck.cpp +++ b/src/xrpld/app/tx/detail/CashCheck.cpp @@ -451,6 +451,7 @@ CashCheck::doApply() OfferCrossing::no, std::nullopt, sleCheck->getFieldAmount(sfSendMax), + std::nullopt, // check does not support domain viewJ); if (result.result() != tesSUCCESS) diff --git a/src/xrpld/app/tx/detail/CreateOffer.cpp b/src/xrpld/app/tx/detail/CreateOffer.cpp index d9bd57ec3c..7ccecd7a47 100644 --- a/src/xrpld/app/tx/detail/CreateOffer.cpp +++ b/src/xrpld/app/tx/detail/CreateOffer.cpp @@ -18,16 +18,20 @@ //============================================================================== #include +#include #include #include #include +#include #include #include #include +#include +#include +#include namespace ripple { - TxConsequences CreateOffer::makeTxConsequences(PreflightContext const& ctx) { @@ -42,6 +46,16 @@ CreateOffer::makeTxConsequences(PreflightContext const& ctx) NotTEC CreateOffer::preflight(PreflightContext const& ctx) { + if (ctx.tx.isFieldPresent(sfDomainID) && + !ctx.rules.enabled(featurePermissionedDEX)) + return temDISABLED; + + // Permissioned offers should use the PE (which must be enabled by + // featureFlowCross amendment) + if (ctx.rules.enabled(featurePermissionedDEX) && + !ctx.rules.enabled(featureFlowCross)) + return temDISABLED; + if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) return ret; @@ -56,6 +70,12 @@ CreateOffer::preflight(PreflightContext const& ctx) return temINVALID_FLAG; } + if (!ctx.rules.enabled(featurePermissionedDEX) && tx.isFlag(tfHybrid)) + return temINVALID_FLAG; + + if (tx.isFlag(tfHybrid) && !tx.isFieldPresent(sfDomainID)) + return temINVALID_FLAG; + bool const bImmediateOrCancel(uTxFlags & tfImmediateOrCancel); bool const bFillOrKill(uTxFlags & tfFillOrKill); @@ -198,6 +218,15 @@ CreateOffer::preclaim(PreclaimContext const& ctx) return result; } + // if domain is specified, make sure that domain exists and the offer create + // is part of the domain + if (ctx.tx.isFieldPresent(sfDomainID)) + { + if (!permissioned_dex::accountInDomain( + ctx.view, id, ctx.tx[sfDomainID])) + return tecNO_PERMISSION; + } + return tesSUCCESS; } @@ -367,7 +396,7 @@ CreateOffer::bridged_cross( OfferStream offers_direct( view, view_cancel, - Book(taker.issue_in(), taker.issue_out()), + Book(taker.issue_in(), taker.issue_out(), std::nullopt), when, stepCounter_, j_); @@ -375,7 +404,7 @@ CreateOffer::bridged_cross( OfferStream offers_leg1( view, view_cancel, - Book(taker.issue_in(), xrpIssue()), + Book(taker.issue_in(), xrpIssue(), std::nullopt), when, stepCounter_, j_); @@ -383,7 +412,7 @@ CreateOffer::bridged_cross( OfferStream offers_leg2( view, view_cancel, - Book(xrpIssue(), taker.issue_out()), + Book(xrpIssue(), taker.issue_out(), std::nullopt), when, stepCounter_, j_); @@ -551,7 +580,7 @@ CreateOffer::direct_cross( OfferStream offers( view, view_cancel, - Book(taker.issue_in(), taker.issue_out()), + Book(taker.issue_in(), taker.issue_out(), std::nullopt), when, stepCounter_, j_); @@ -708,7 +737,8 @@ std::pair CreateOffer::flowCross( PaymentSandbox& psb, PaymentSandbox& psbCancel, - Amounts const& takerAmount) + Amounts const& takerAmount, + std::optional const& domainID) { try { @@ -805,6 +835,7 @@ CreateOffer::flowCross( offerCrossing, threshold, sendMax, + domainID, j_); // If stale offers were found remove them. @@ -907,13 +938,18 @@ CreateOffer::flowCross( } std::pair -CreateOffer::cross(Sandbox& sb, Sandbox& sbCancel, Amounts const& takerAmount) +CreateOffer::cross( + Sandbox& sb, + Sandbox& sbCancel, + Amounts const& takerAmount, + std::optional const& domainID) { if (sb.rules().enabled(featureFlowCross)) { PaymentSandbox psbFlow{&sb}; PaymentSandbox psbCancelFlow{&sbCancel}; - auto const ret = flowCross(psbFlow, psbCancelFlow, takerAmount); + auto const ret = + flowCross(psbFlow, psbCancelFlow, takerAmount, domainID); psbFlow.apply(sb); psbCancelFlow.apply(sbCancel); return ret; @@ -950,6 +986,54 @@ CreateOffer::preCompute() return Transactor::preCompute(); } +TER +CreateOffer::applyHybrid( + Sandbox& sb, + std::shared_ptr sleOffer, + Keylet const& offerKey, + STAmount const& saTakerPays, + STAmount const& saTakerGets, + std::function)> const& setDir) +{ + if (!sleOffer->isFieldPresent(sfDomainID)) + return tecINTERNAL; // LCOV_EXCL_LINE + + // set hybrid flag + sleOffer->setFlag(lsfHybrid); + + // if offer is hybrid, need to also place into open offer dir + Book const book{saTakerPays.issue(), saTakerGets.issue(), std::nullopt}; + + auto dir = + keylet::quality(keylet::book(book), getRate(saTakerGets, saTakerPays)); + bool const bookExists = sb.exists(dir); + + auto const bookNode = sb.dirAppend(dir, offerKey, [&](SLE::ref sle) { + // don't set domainID on the directory object since this directory is + // for open book + setDir(sle, std::nullopt); + }); + + if (!bookNode) + { + JLOG(j_.debug()) + << "final result: failed to add hybrid offer to open book"; + return tecDIR_FULL; // LCOV_EXCL_LINE + } + + STArray bookArr(sfAdditionalBooks, 1); + auto bookInfo = STObject::makeInnerObject(sfBook); + bookInfo.setFieldH256(sfBookDirectory, dir.key); + bookInfo.setFieldU64(sfBookNode, *bookNode); + bookArr.push_back(std::move(bookInfo)); + + if (!bookExists) + ctx_.app.getOrderBookDB().addOrderBook(book); + + sleOffer->setFieldArray(sfAdditionalBooks, bookArr); + return tesSUCCESS; +} + std::pair CreateOffer::applyGuts(Sandbox& sb, Sandbox& sbCancel) { @@ -961,9 +1045,11 @@ CreateOffer::applyGuts(Sandbox& sb, Sandbox& sbCancel) bool const bImmediateOrCancel(uTxFlags & tfImmediateOrCancel); bool const bFillOrKill(uTxFlags & tfFillOrKill); bool const bSell(uTxFlags & tfSell); + bool const bHybrid(uTxFlags & tfHybrid); auto saTakerPays = ctx_.tx[sfTakerPays]; auto saTakerGets = ctx_.tx[sfTakerGets]; + auto const domainID = ctx_.tx[~sfDomainID]; auto const cancelSequence = ctx_.tx[~sfOfferSequence]; @@ -1080,7 +1166,8 @@ CreateOffer::applyGuts(Sandbox& sb, Sandbox& sbCancel) stream << " out: " << format_amount(takerAmount.out); } - std::tie(result, place_offer) = cross(sb, sbCancel, takerAmount); + std::tie(result, place_offer) = + cross(sb, sbCancel, takerAmount, domainID); // We expect the implementation of cross to succeed // or give a tec. @@ -1222,21 +1309,39 @@ CreateOffer::applyGuts(Sandbox& sb, Sandbox& sbCancel) adjustOwnerCount(sb, sleCreator, 1, viewJ); JLOG(j_.trace()) << "adding to book: " << to_string(saTakerPays.issue()) - << " : " << to_string(saTakerGets.issue()); + << " : " << to_string(saTakerGets.issue()) + << (domainID ? (" : " + to_string(*domainID)) : ""); - Book const book{saTakerPays.issue(), saTakerGets.issue()}; + Book const book{saTakerPays.issue(), saTakerGets.issue(), domainID}; // Add offer to order book, using the original rate // before any crossing occured. + // + // Regular offer - BookDirectory points to open directory + // + // Domain offer (w/o hyrbid) - BookDirectory points to domain + // directory + // + // Hybrid domain offer - BookDirectory points to domain directory, + // and AdditionalBooks field stores one entry that points to the open + // directory auto dir = keylet::quality(keylet::book(book), uRate); bool const bookExisted = static_cast(sb.peek(dir)); - auto const bookNode = sb.dirAppend(dir, offer_index, [&](SLE::ref sle) { + auto setBookDir = [&](SLE::ref sle, + std::optional const& maybeDomain) { sle->setFieldH160(sfTakerPaysCurrency, saTakerPays.issue().currency); sle->setFieldH160(sfTakerPaysIssuer, saTakerPays.issue().account); sle->setFieldH160(sfTakerGetsCurrency, saTakerGets.issue().currency); sle->setFieldH160(sfTakerGetsIssuer, saTakerGets.issue().account); sle->setFieldU64(sfExchangeRate, uRate); + if (maybeDomain) + sle->setFieldH256(sfDomainID, *maybeDomain); + }; + + auto const bookNode = sb.dirAppend(dir, offer_index, [&](SLE::ref sle) { + // sets domainID on book directory if it's a domain offer + setBookDir(sle, domainID); }); if (!bookNode) @@ -1259,6 +1364,18 @@ CreateOffer::applyGuts(Sandbox& sb, Sandbox& sbCancel) sleOffer->setFlag(lsfPassive); if (bSell) sleOffer->setFlag(lsfSell); + if (domainID) + sleOffer->setFieldH256(sfDomainID, *domainID); + + // if it's a hybrid offer, set hybrid flag, and create an open dir + if (bHybrid) + { + auto const res = applyHybrid( + sb, sleOffer, offer_index, saTakerPays, saTakerGets, setBookDir); + if (res != tesSUCCESS) + return {res, true}; // LCOV_EXCL_LINE + } + sb.insert(sleOffer); if (!bookExisted) diff --git a/src/xrpld/app/tx/detail/CreateOffer.h b/src/xrpld/app/tx/detail/CreateOffer.h index 35808c78fe..9b35062d8a 100644 --- a/src/xrpld/app/tx/detail/CreateOffer.h +++ b/src/xrpld/app/tx/detail/CreateOffer.h @@ -121,18 +121,32 @@ private: flowCross( PaymentSandbox& psb, PaymentSandbox& psbCancel, - Amounts const& takerAmount); + Amounts const& takerAmount, + std::optional const& domainID); // Temporary // This is a central location that invokes both versions of cross // so the results can be compared. Eventually this layer will be // removed once flowCross is determined to be stable. std::pair - cross(Sandbox& sb, Sandbox& sbCancel, Amounts const& takerAmount); + cross( + Sandbox& sb, + Sandbox& sbCancel, + Amounts const& takerAmount, + std::optional const& domainID); static std::string format_amount(STAmount const& amount); + TER + applyHybrid( + Sandbox& sb, + std::shared_ptr sleOffer, + Keylet const& offer_index, + STAmount const& saTakerPays, + STAmount const& saTakerGets, + std::function)> const& setDir); + private: // What kind of offer we are placing CrossType cross_type_; diff --git a/src/xrpld/app/tx/detail/InvariantCheck.cpp b/src/xrpld/app/tx/detail/InvariantCheck.cpp index aa1464ec2a..729f69a03b 100644 --- a/src/xrpld/app/tx/detail/InvariantCheck.cpp +++ b/src/xrpld/app/tx/detail/InvariantCheck.cpp @@ -1580,4 +1580,87 @@ ValidPermissionedDomain::finalize( (sleStatus_[1] ? check(*sleStatus_[1], j) : true); } +void +ValidPermissionedDEX::visitEntry( + bool, + std::shared_ptr const& before, + std::shared_ptr const& after) +{ + if (after && after->getType() == ltDIR_NODE) + { + if (after->isFieldPresent(sfDomainID)) + domains_.insert(after->getFieldH256(sfDomainID)); + } + + if (after && after->getType() == ltOFFER) + { + if (after->isFieldPresent(sfDomainID)) + domains_.insert(after->getFieldH256(sfDomainID)); + else + regularOffers_ = true; + + // if a hybrid offer is missing domain or additional book, there's + // something wrong + if (after->isFlag(lsfHybrid) && + (!after->isFieldPresent(sfDomainID) || + !after->isFieldPresent(sfAdditionalBooks) || + after->getFieldArray(sfAdditionalBooks).size() > 1)) + badHybrids_ = true; + } +} + +bool +ValidPermissionedDEX::finalize( + STTx const& tx, + TER const result, + XRPAmount const, + ReadView const& view, + beast::Journal const& j) +{ + auto const txType = tx.getTxnType(); + if ((txType != ttPAYMENT && txType != ttOFFER_CREATE) || + result != tesSUCCESS) + return true; + + // For each offercreate transaction, check if + // permissioned offers are valid + if (txType == ttOFFER_CREATE && badHybrids_) + { + JLOG(j.fatal()) << "Invariant failed: hybrid offer is malformed"; + return false; + } + + if (!tx.isFieldPresent(sfDomainID)) + return true; + + auto const domain = tx.getFieldH256(sfDomainID); + + if (!view.exists(keylet::permissionedDomain(domain))) + { + JLOG(j.fatal()) << "Invariant failed: domain doesn't exist"; + return false; + } + + // for both payment and offercreate, there shouldn't be another domain + // that's different from the domain specified + for (auto const& d : domains_) + { + if (d != domain) + { + JLOG(j.fatal()) << "Invariant failed: transaction" + " consumed wrong domains"; + return false; + } + } + + if (regularOffers_) + { + JLOG(j.fatal()) << "Invariant failed: domain transaction" + " affected regular offers"; + return false; + } + + return true; +} + } // namespace ripple diff --git a/src/xrpld/app/tx/detail/InvariantCheck.h b/src/xrpld/app/tx/detail/InvariantCheck.h index 6819780114..fdde8427fb 100644 --- a/src/xrpld/app/tx/detail/InvariantCheck.h +++ b/src/xrpld/app/tx/detail/InvariantCheck.h @@ -28,6 +28,7 @@ #include #include +#include namespace ripple { @@ -618,6 +619,28 @@ public: beast::Journal const&); }; +class ValidPermissionedDEX +{ + bool regularOffers_ = false; + bool badHybrids_ = false; + hash_set domains_; + +public: + void + visitEntry( + bool, + std::shared_ptr const&, + std::shared_ptr const&); + + bool + finalize( + STTx const&, + TER const, + XRPAmount const, + ReadView const&, + beast::Journal const&); +}; + // additional invariant checks can be declared above and then added to this // tuple using InvariantChecks = std::tuple< @@ -637,7 +660,8 @@ using InvariantChecks = std::tuple< NFTokenCountTracking, ValidClawback, ValidMPTIssuance, - ValidPermissionedDomain>; + ValidPermissionedDomain, + ValidPermissionedDEX>; /** * @brief get a tuple of all invariant checks diff --git a/src/xrpld/app/tx/detail/OfferStream.cpp b/src/xrpld/app/tx/detail/OfferStream.cpp index 7640cca206..55993f5c5f 100644 --- a/src/xrpld/app/tx/detail/OfferStream.cpp +++ b/src/xrpld/app/tx/detail/OfferStream.cpp @@ -17,10 +17,13 @@ */ //============================================================================== +#include #include +#include #include #include +#include namespace ripple { @@ -288,6 +291,17 @@ TOfferStreamBase::step() continue; } + if (entry->isFieldPresent(sfDomainID) && + !permissioned_dex::offerInDomain( + view_, entry->key(), entry->getFieldH256(sfDomainID), j_)) + { + JLOG(j_.trace()) + << "Removing offer no longer in domain " << entry->key(); + permRmOffer(entry->key()); + offer_ = TOffer{}; + continue; + } + // Calculate owner funds ownerFunds_ = accountFundsHelper( view_, diff --git a/src/xrpld/app/tx/detail/Payment.cpp b/src/xrpld/app/tx/detail/Payment.cpp index a97e472841..b597af570a 100644 --- a/src/xrpld/app/tx/detail/Payment.cpp +++ b/src/xrpld/app/tx/detail/Payment.cpp @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -71,6 +72,10 @@ Payment::preflight(PreflightContext const& ctx) !ctx.rules.enabled(featureCredentials)) return temDISABLED; + if (ctx.tx.isFieldPresent(sfDomainID) && + !ctx.rules.enabled(featurePermissionedDEX)) + return temDISABLED; + if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) return ret; @@ -357,6 +362,17 @@ Payment::preclaim(PreclaimContext const& ctx) !isTesSuccess(err)) return err; + if (ctx.tx.isFieldPresent(sfDomainID)) + { + if (!permissioned_dex::accountInDomain( + ctx.view, ctx.tx[sfAccount], ctx.tx[sfDomainID])) + return tecNO_PERMISSION; + + if (!permissioned_dex::accountInDomain( + ctx.view, ctx.tx[sfDestination], ctx.tx[sfDomainID])) + return tecNO_PERMISSION; + } + return tesSUCCESS; } @@ -458,6 +474,7 @@ Payment::doApply() dstAccountID, account_, ctx_.tx.getFieldPathSet(sfPaths), + ctx_.tx[~sfDomainID], ctx_.app.logs(), &rcInput); // VFALCO NOTE We might not need to apply, depending diff --git a/src/xrpld/app/tx/detail/XChainBridge.cpp b/src/xrpld/app/tx/detail/XChainBridge.cpp index 5fa03557e5..6ca049ee66 100644 --- a/src/xrpld/app/tx/detail/XChainBridge.cpp +++ b/src/xrpld/app/tx/detail/XChainBridge.cpp @@ -511,6 +511,7 @@ transferHelper( /*offer crossing*/ OfferCrossing::no, /*limit quality*/ std::nullopt, /*sendmax*/ std::nullopt, + /*domain id*/ std::nullopt, j); if (auto const r = result.result(); diff --git a/src/xrpld/ledger/detail/View.cpp b/src/xrpld/ledger/detail/View.cpp index e9499a287a..3978d26e56 100644 --- a/src/xrpld/ledger/detail/View.cpp +++ b/src/xrpld/ledger/detail/View.cpp @@ -37,7 +37,6 @@ #include #include -#include #include #include @@ -1484,6 +1483,27 @@ offerDelete(ApplyView& view, std::shared_ptr const& sle, beast::Journal j) return tefBAD_LEDGER; } + if (sle->isFieldPresent(sfAdditionalBooks)) + { + XRPL_ASSERT( + sle->isFlag(lsfHybrid) && sle->isFieldPresent(sfDomainID), + "ripple::offerDelete : should be a hybrid domain offer"); + + auto const& additionalBookDirs = sle->getFieldArray(sfAdditionalBooks); + + for (auto const& bookDir : additionalBookDirs) + { + auto const& dirIndex = bookDir.getFieldH256(sfBookDirectory); + auto const& dirNode = bookDir.getFieldU64(sfBookNode); + + if (!view.dirRemove( + keylet::page(dirIndex), dirNode, offerIndex, false)) + { + return tefBAD_LEDGER; // LCOV_EXCL_LINE + } + } + } + adjustOwnerCount(view, view.peek(keylet::account(owner)), -1, j); view.erase(sle); diff --git a/src/xrpld/rpc/BookChanges.h b/src/xrpld/rpc/BookChanges.h index c87fa0ccf4..9d94e80b82 100644 --- a/src/xrpld/rpc/BookChanges.h +++ b/src/xrpld/rpc/BookChanges.h @@ -49,13 +49,13 @@ computeBookChanges(std::shared_ptr const& lpAccepted) std::map< std::string, std::tuple< - STAmount, // side A volume - STAmount, // side B volume - STAmount, // high rate - STAmount, // low rate - STAmount, // open rate - STAmount // close rate - >> + STAmount, // side A volume + STAmount, // side B volume + STAmount, // high rate + STAmount, // low rate + STAmount, // open rate + STAmount, // close rate + std::optional>> // optional: domain id tally; for (auto& tx : lpAccepted->txs) @@ -148,6 +148,8 @@ computeBookChanges(std::shared_ptr const& lpAccepted) else ss << p << "|" << g; + std::optional domain = finalFields[~sfDomainID]; + std::string key{ss.str()}; if (tally.find(key) == tally.end()) @@ -157,8 +159,8 @@ computeBookChanges(std::shared_ptr const& lpAccepted) rate, // high rate, // low rate, // open - rate // close - }; + rate, // close + domain}; else { // increment volume @@ -173,7 +175,8 @@ computeBookChanges(std::shared_ptr const& lpAccepted) if (std::get<3>(entry) > rate) // low std::get<3>(entry) = rate; - std::get<5>(entry) = rate; // close + std::get<5>(entry) = rate; // close + std::get<6>(entry) = domain; // domain } } } @@ -211,6 +214,10 @@ computeBookChanges(std::shared_ptr const& lpAccepted) inner[jss::low] = to_string(std::get<3>(entry.second).iou()); inner[jss::open] = to_string(std::get<4>(entry.second).iou()); inner[jss::close] = to_string(std::get<5>(entry.second).iou()); + + std::optional const domain = std::get<6>(entry.second); + if (domain) + inner[jss::domain] = to_string(*domain); } return jvObj; diff --git a/src/xrpld/rpc/detail/TransactionSign.cpp b/src/xrpld/rpc/detail/TransactionSign.cpp index a4454c6e8a..175fd84c9b 100644 --- a/src/xrpld/rpc/detail/TransactionSign.cpp +++ b/src/xrpld/rpc/detail/TransactionSign.cpp @@ -40,6 +40,7 @@ #include #include +#include namespace ripple { namespace RPC { @@ -222,6 +223,22 @@ checkPayment( rpcINVALID_PARAMS, "Cannot specify both 'tx_json.Paths' and 'build_path'"); + std::optional domain; + if (tx_json.isMember(sfDomainID.jsonName)) + { + uint256 num; + if (!tx_json[sfDomainID.jsonName].isString() || + !num.parseHex(tx_json[sfDomainID.jsonName].asString())) + { + return RPC::make_error( + rpcDOMAIN_MALFORMED, "Unable to parse 'DomainID'."); + } + else + { + domain = num; + } + } + if (!tx_json.isMember(jss::Paths) && params.isMember(jss::build_path)) { STAmount sendMax; @@ -260,6 +277,7 @@ checkPayment( sendMax.issue().account, amount, std::nullopt, + domain, app); if (pf.findPaths(app.config().PATH_SEARCH_OLD)) { diff --git a/src/xrpld/rpc/handlers/BookOffers.cpp b/src/xrpld/rpc/handlers/BookOffers.cpp index bede01b927..df4712209c 100644 --- a/src/xrpld/rpc/handlers/BookOffers.cpp +++ b/src/xrpld/rpc/handlers/BookOffers.cpp @@ -172,6 +172,22 @@ doBookOffers(RPC::JsonContext& context) return RPC::invalid_field_error(jss::taker); } + std::optional domain; + if (context.params.isMember(jss::domain)) + { + uint256 num; + if (!context.params[jss::domain].isString() || + !num.parseHex(context.params[jss::domain].asString())) + { + return RPC::make_error( + rpcDOMAIN_MALFORMED, "Unable to parse domain."); + } + else + { + domain = num; + } + } + if (pay_currency == get_currency && pay_issuer == get_issuer) { JLOG(context.j.info()) << "taker_gets same as taker_pays."; @@ -190,7 +206,7 @@ doBookOffers(RPC::JsonContext& context) context.netOps.getBookPage( lpLedger, - {{pay_currency, pay_issuer}, {get_currency, get_issuer}}, + {{pay_currency, pay_issuer}, {get_currency, get_issuer}, domain}, takerID ? *takerID : beast::zero, bProof, limit, diff --git a/src/xrpld/rpc/handlers/Subscribe.cpp b/src/xrpld/rpc/handlers/Subscribe.cpp index deac6e18ad..e71d973b7b 100644 --- a/src/xrpld/rpc/handlers/Subscribe.cpp +++ b/src/xrpld/rpc/handlers/Subscribe.cpp @@ -305,6 +305,20 @@ doSubscribe(RPC::JsonContext& context) return rpcError(rpcBAD_ISSUER); } + if (j.isMember(jss::domain)) + { + uint256 domain; + if (!j[jss::domain].isString() || + !domain.parseHex(j[jss::domain].asString())) + { + return rpcError(rpcDOMAIN_MALFORMED); + } + else + { + book.domain = domain; + } + } + if (!isConsistent(book)) { JLOG(context.j.warn()) << "Bad market: " << book; diff --git a/src/xrpld/rpc/handlers/Unsubscribe.cpp b/src/xrpld/rpc/handlers/Unsubscribe.cpp index c890de593a..f512840c86 100644 --- a/src/xrpld/rpc/handlers/Unsubscribe.cpp +++ b/src/xrpld/rpc/handlers/Unsubscribe.cpp @@ -230,6 +230,20 @@ doUnsubscribe(RPC::JsonContext& context) return rpcError(rpcBAD_MARKET); } + if (jv.isMember(jss::domain)) + { + uint256 domain; + if (!jv[jss::domain].isString() || + !domain.parseHex(jv[jss::domain].asString())) + { + return rpcError(rpcDOMAIN_MALFORMED); + } + else + { + book.domain = domain; + } + } + context.netOps.unsubBook(ispSub->getSeq(), book); // both_sides is deprecated. From 621df422a79a6b24f0b35eac2ce719a660b0945f Mon Sep 17 00:00:00 2001 From: Gregory Tsipenyuk Date: Mon, 2 Jun 2025 09:52:10 -0400 Subject: [PATCH 043/244] fix: Add AMMv1_3 amendment (#5203) * Add AMM bid/create/deposit/swap/withdraw/vote invariants: - Deposit, Withdrawal invariants: `sqrt(asset1Balance * asset2Balance) >= LPTokens`. - Bid: `sqrt(asset1Balance * asset2Balance) > LPTokens` and the pool balances don't change. - Create: `sqrt(asset1Balance * assetBalance2) == LPTokens`. - Swap: `asset1BalanceAfter * asset2BalanceAfter >= asset1BalanceBefore * asset2BalanceBefore` and `LPTokens` don't change. - Vote: `LPTokens` and pool balances don't change. - All AMM and swap transactions: amounts and tokens are greater than zero, except on withdrawal if all tokens are withdrawn. * Add AMM deposit and withdraw rounding to ensure AMM invariant: - On deposit, tokens out are rounded downward and deposit amount is rounded upward. - On withdrawal, tokens in are rounded upward and withdrawal amount is rounded downward. * Add Order Book Offer invariant to verify consumed amounts. Consumed amounts are less than the offer. * Fix Bid validation. `AuthAccount` can't have duplicate accounts or the submitter account. --- include/xrpl/protocol/IOUAmount.h | 6 + include/xrpl/protocol/Rules.h | 3 + include/xrpl/protocol/detail/features.macro | 1 + src/libxrpl/protocol/Rules.cpp | 8 + src/test/app/AMMClawback_test.cpp | 543 ++++++--- src/test/app/AMMExtended_test.cpp | 14 +- src/test/app/AMM_test.cpp | 1186 +++++++++++++++---- src/test/jtx/AMM.h | 11 +- src/test/jtx/AMMTest.h | 14 + src/test/jtx/Env.h | 6 + src/test/jtx/impl/AMM.cpp | 17 +- src/test/jtx/impl/AMMTest.cpp | 25 +- src/test/rpc/AMMInfo_test.cpp | 186 +-- src/xrpld/app/misc/AMMHelpers.h | 151 ++- src/xrpld/app/misc/detail/AMMHelpers.cpp | 206 +++- src/xrpld/app/tx/detail/AMMBid.cpp | 19 +- src/xrpld/app/tx/detail/AMMDeposit.cpp | 156 ++- src/xrpld/app/tx/detail/AMMWithdraw.cpp | 143 ++- src/xrpld/app/tx/detail/AMMWithdraw.h | 2 +- src/xrpld/app/tx/detail/InvariantCheck.cpp | 307 +++++ src/xrpld/app/tx/detail/InvariantCheck.h | 66 +- src/xrpld/app/tx/detail/Offer.h | 19 +- 22 files changed, 2515 insertions(+), 574 deletions(-) diff --git a/include/xrpl/protocol/IOUAmount.h b/include/xrpl/protocol/IOUAmount.h index a27069e37b..93fba4150d 100644 --- a/include/xrpl/protocol/IOUAmount.h +++ b/include/xrpl/protocol/IOUAmount.h @@ -98,6 +98,12 @@ public: static IOUAmount minPositiveAmount(); + + friend std::ostream& + operator<<(std::ostream& os, IOUAmount const& x) + { + return os << to_string(x); + } }; inline IOUAmount::IOUAmount(beast::Zero) diff --git a/include/xrpl/protocol/Rules.h b/include/xrpl/protocol/Rules.h index 6b22d01afe..efdaf803fd 100644 --- a/include/xrpl/protocol/Rules.h +++ b/include/xrpl/protocol/Rules.h @@ -28,6 +28,9 @@ namespace ripple { +bool +isFeatureEnabled(uint256 const& feature); + class DigestAwareReadView; /** Rules controlling protocol behavior. */ diff --git a/include/xrpl/protocol/detail/features.macro b/include/xrpl/protocol/detail/features.macro index 61828d4758..f458b57219 100644 --- a/include/xrpl/protocol/detail/features.macro +++ b/include/xrpl/protocol/detail/features.macro @@ -32,6 +32,7 @@ // If you add an amendment here, then do not forget to increment `numFeatures` // in include/xrpl/protocol/Feature.h. +XRPL_FIX (AMMv1_3, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(PermissionedDEX, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(Batch, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(SingleAssetVault, Supported::no, VoteBehavior::DefaultNo) diff --git a/src/libxrpl/protocol/Rules.cpp b/src/libxrpl/protocol/Rules.cpp index 3d1c718e65..b472b9b0f1 100644 --- a/src/libxrpl/protocol/Rules.cpp +++ b/src/libxrpl/protocol/Rules.cpp @@ -161,4 +161,12 @@ Rules::operator!=(Rules const& other) const { return !(*this == other); } + +bool +isFeatureEnabled(uint256 const& feature) +{ + auto const& rules = getCurrentTransactionRules(); + return rules && rules->enabled(feature); +} + } // namespace ripple diff --git a/src/test/app/AMMClawback_test.cpp b/src/test/app/AMMClawback_test.cpp index 878c570a12..77e908d5fe 100644 --- a/src/test/app/AMMClawback_test.cpp +++ b/src/test/app/AMMClawback_test.cpp @@ -581,8 +581,12 @@ class AMMClawback_test : public jtx::AMMTest AMM amm(env, alice, EUR(5000), USD(4000), ter(tesSUCCESS)); env.close(); - BEAST_EXPECT(amm.expectBalances( - USD(4000), EUR(5000), IOUAmount{4472135954999580, -12})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectBalances( + USD(4000), EUR(5000), IOUAmount{4472135954999580, -12})); + else + BEAST_EXPECT(amm.expectBalances( + USD(4000), EUR(5000), IOUAmount{4472135954999579, -12})); // gw clawback 1000 USD from the AMM pool env(amm::ammClawback(gw, alice, USD, EUR, USD(1000)), @@ -601,12 +605,20 @@ class AMMClawback_test : public jtx::AMMTest // 1000 USD and 1250 EUR was withdrawn from the AMM pool, so the // current balance is 3000 USD and 3750 EUR. - BEAST_EXPECT(amm.expectBalances( - USD(3000), EUR(3750), IOUAmount{3354101966249685, -12})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectBalances( + USD(3000), EUR(3750), IOUAmount{3354101966249685, -12})); + else + BEAST_EXPECT(amm.expectBalances( + USD(3000), EUR(3750), IOUAmount{3354101966249684, -12})); // Alice has 3/4 of its initial lptokens Left. - BEAST_EXPECT( - amm.expectLPTokens(alice, IOUAmount{3354101966249685, -12})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectLPTokens( + alice, IOUAmount{3354101966249685, -12})); + else + BEAST_EXPECT(amm.expectLPTokens( + alice, IOUAmount{3354101966249684, -12})); // gw clawback another 500 USD from the AMM pool. env(amm::ammClawback(gw, alice, USD, EUR, USD(500)), @@ -617,14 +629,21 @@ class AMMClawback_test : public jtx::AMMTest // AMM pool. env.require(balance(alice, gw["USD"](2000))); - BEAST_EXPECT(amm.expectBalances( - STAmount{USD, UINT64_C(2500000000000001), -12}, - STAmount{EUR, UINT64_C(3125000000000001), -12}, - IOUAmount{2795084971874738, -12})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectBalances( + STAmount{USD, UINT64_C(2500000000000001), -12}, + STAmount{EUR, UINT64_C(3125000000000001), -12}, + IOUAmount{2795084971874738, -12})); + else + BEAST_EXPECT(amm.expectBalances( + USD(2500), EUR(3125), IOUAmount{2795084971874737, -12})); - BEAST_EXPECT( - env.balance(alice, EUR) == - STAmount(EUR, UINT64_C(2874999999999999), -12)); + if (!features[fixAMMv1_3]) + BEAST_EXPECT( + env.balance(alice, EUR) == + STAmount(EUR, UINT64_C(2874999999999999), -12)); + else + BEAST_EXPECT(env.balance(alice, EUR) == EUR(2875)); // gw clawback small amount, 1 USD. env(amm::ammClawback(gw, alice, USD, EUR, USD(1)), ter(tesSUCCESS)); @@ -633,14 +652,21 @@ class AMMClawback_test : public jtx::AMMTest // Another 1 USD / 1.25 EUR was withdrawn. env.require(balance(alice, gw["USD"](2000))); - BEAST_EXPECT(amm.expectBalances( - STAmount{USD, UINT64_C(2499000000000002), -12}, - STAmount{EUR, UINT64_C(3123750000000002), -12}, - IOUAmount{2793966937885989, -12})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectBalances( + STAmount{USD, UINT64_C(2499000000000002), -12}, + STAmount{EUR, UINT64_C(3123750000000002), -12}, + IOUAmount{2793966937885989, -12})); + else + BEAST_EXPECT(amm.expectBalances( + USD(2499), EUR(3123.75), IOUAmount{2793966937885987, -12})); - BEAST_EXPECT( - env.balance(alice, EUR) == - STAmount(EUR, UINT64_C(2876249999999998), -12)); + if (!features[fixAMMv1_3]) + BEAST_EXPECT( + env.balance(alice, EUR) == + STAmount(EUR, UINT64_C(2'876'249999999998), -12)); + else + BEAST_EXPECT(env.balance(alice, EUR) == EUR(2876.25)); // gw clawback 4000 USD, exceeding the current balance. We // will clawback all. @@ -713,14 +739,26 @@ class AMMClawback_test : public jtx::AMMTest // gw2 creates AMM pool of XRP/EUR, alice and bob deposit XRP/EUR. AMM amm2(env, gw2, XRP(3000), EUR(1000), ter(tesSUCCESS)); - BEAST_EXPECT(amm2.expectBalances( - EUR(1000), XRP(3000), IOUAmount{1732050807568878, -9})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm2.expectBalances( + EUR(1000), XRP(3000), IOUAmount{1732050807568878, -9})); + else + BEAST_EXPECT(amm2.expectBalances( + EUR(1000), XRP(3000), IOUAmount{1732050807568877, -9})); amm2.deposit(alice, EUR(1000), XRP(3000)); - BEAST_EXPECT(amm2.expectBalances( - EUR(2000), XRP(6000), IOUAmount{3464101615137756, -9})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm2.expectBalances( + EUR(2000), XRP(6000), IOUAmount{3464101615137756, -9})); + else + BEAST_EXPECT(amm2.expectBalances( + EUR(2000), XRP(6000), IOUAmount{3464101615137754, -9})); amm2.deposit(bob, EUR(1000), XRP(3000)); - BEAST_EXPECT(amm2.expectBalances( - EUR(3000), XRP(9000), IOUAmount{5196152422706634, -9})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm2.expectBalances( + EUR(3000), XRP(9000), IOUAmount{5196152422706634, -9})); + else + BEAST_EXPECT(amm2.expectBalances( + EUR(3000), XRP(9000), IOUAmount{5196152422706631, -9})); env.close(); auto aliceXrpBalance = env.balance(alice, XRP); @@ -743,10 +781,18 @@ class AMMClawback_test : public jtx::AMMTest BEAST_EXPECT( expectLedgerEntryRoot(env, alice, aliceXrpBalance + XRP(1000))); - BEAST_EXPECT(amm.expectBalances( - USD(2500), XRP(5000), IOUAmount{3535533905932738, -9})); - BEAST_EXPECT( - amm.expectLPTokens(alice, IOUAmount{7071067811865480, -10})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectBalances( + USD(2500), XRP(5000), IOUAmount{3535533905932738, -9})); + else + BEAST_EXPECT(amm.expectBalances( + USD(2500), XRP(5000), IOUAmount{3535533905932737, -9})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectLPTokens( + alice, IOUAmount{7071067811865480, -10})); + else + BEAST_EXPECT(amm.expectLPTokens( + alice, IOUAmount{7071067811865474, -10})); BEAST_EXPECT( amm.expectLPTokens(bob, IOUAmount{1414213562373095, -9})); @@ -760,14 +806,26 @@ class AMMClawback_test : public jtx::AMMTest // Bob gets 20 XRP back. BEAST_EXPECT( expectLedgerEntryRoot(env, bob, bobXrpBalance + XRP(20))); - BEAST_EXPECT(amm.expectBalances( - STAmount{USD, UINT64_C(2490000000000001), -12}, - XRP(4980), - IOUAmount{3521391770309008, -9})); - BEAST_EXPECT( - amm.expectLPTokens(alice, IOUAmount{7071067811865480, -10})); - BEAST_EXPECT( - amm.expectLPTokens(bob, IOUAmount{1400071426749365, -9})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectBalances( + STAmount{USD, UINT64_C(2490000000000001), -12}, + XRP(4980), + IOUAmount{3521391770309008, -9})); + else + BEAST_EXPECT(amm.expectBalances( + USD(2'490), XRP(4980), IOUAmount{3521391770309006, -9})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectLPTokens( + alice, IOUAmount{7071067811865480, -10})); + else + BEAST_EXPECT(amm.expectLPTokens( + alice, IOUAmount{7071067811865474, -10})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT( + amm.expectLPTokens(bob, IOUAmount{1400071426749365, -9})); + else + BEAST_EXPECT( + amm.expectLPTokens(bob, IOUAmount{1400071426749364, -9})); // gw2 clawback 200 EUR from amm2. env(amm::ammClawback(gw2, alice, EUR, XRP, EUR(200)), @@ -780,12 +838,24 @@ class AMMClawback_test : public jtx::AMMTest // Alice gets 600 XRP back. BEAST_EXPECT(expectLedgerEntryRoot( env, alice, aliceXrpBalance + XRP(1000) + XRP(600))); - BEAST_EXPECT(amm2.expectBalances( - EUR(2800), XRP(8400), IOUAmount{4849742261192859, -9})); - BEAST_EXPECT( - amm2.expectLPTokens(alice, IOUAmount{1385640646055103, -9})); - BEAST_EXPECT( - amm2.expectLPTokens(bob, IOUAmount{1732050807568878, -9})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm2.expectBalances( + EUR(2800), XRP(8400), IOUAmount{4849742261192859, -9})); + else + BEAST_EXPECT(amm2.expectBalances( + EUR(2800), XRP(8400), IOUAmount{4849742261192856, -9})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm2.expectLPTokens( + alice, IOUAmount{1385640646055103, -9})); + else + BEAST_EXPECT(amm2.expectLPTokens( + alice, IOUAmount{1385640646055102, -9})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT( + amm2.expectLPTokens(bob, IOUAmount{1732050807568878, -9})); + else + BEAST_EXPECT( + amm2.expectLPTokens(bob, IOUAmount{1732050807568877, -9})); // gw claw back 1000 USD from alice in amm, which exceeds alice's // balance. This will clawback all the remaining LP tokens of alice @@ -798,17 +868,34 @@ class AMMClawback_test : public jtx::AMMTest env.require(balance(bob, gw["USD"](4000))); // Alice gets 1000 XRP back. - BEAST_EXPECT(expectLedgerEntryRoot( - env, - alice, - aliceXrpBalance + XRP(1000) + XRP(600) + XRP(1000))); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(expectLedgerEntryRoot( + env, + alice, + aliceXrpBalance + XRP(1000) + XRP(600) + XRP(1000))); + else + BEAST_EXPECT(expectLedgerEntryRoot( + env, + alice, + aliceXrpBalance + XRP(1000) + XRP(600) + XRP(1000) - + XRPAmount{1})); BEAST_EXPECT(amm.expectLPTokens(alice, IOUAmount(0))); - BEAST_EXPECT( - amm.expectLPTokens(bob, IOUAmount{1400071426749365, -9})); - BEAST_EXPECT(amm.expectBalances( - STAmount{USD, UINT64_C(1990000000000001), -12}, - XRP(3980), - IOUAmount{2814284989122460, -9})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT( + amm.expectLPTokens(bob, IOUAmount{1400071426749365, -9})); + else + BEAST_EXPECT( + amm.expectLPTokens(bob, IOUAmount{1400071426749364, -9})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectBalances( + STAmount{USD, UINT64_C(1990000000000001), -12}, + XRP(3980), + IOUAmount{2814284989122460, -9})); + else + BEAST_EXPECT(amm.expectBalances( + USD(1'990), + XRPAmount{3'980'000'001}, + IOUAmount{2814284989122459, -9})); // gw clawback 1000 USD from bob in amm, which also exceeds bob's // balance in amm. All bob's lptoken in amm will be consumed, which @@ -820,10 +907,17 @@ class AMMClawback_test : public jtx::AMMTest env.require(balance(alice, gw["USD"](5000))); env.require(balance(bob, gw["USD"](4000))); - BEAST_EXPECT(expectLedgerEntryRoot( - env, - alice, - aliceXrpBalance + XRP(1000) + XRP(600) + XRP(1000))); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(expectLedgerEntryRoot( + env, + alice, + aliceXrpBalance + XRP(1000) + XRP(600) + XRP(1000))); + else + BEAST_EXPECT(expectLedgerEntryRoot( + env, + alice, + aliceXrpBalance + XRP(1000) + XRP(600) + XRP(1000) - + XRPAmount{1})); BEAST_EXPECT(expectLedgerEntryRoot( env, bob, bobXrpBalance + XRP(20) + XRP(1980))); @@ -843,21 +937,32 @@ class AMMClawback_test : public jtx::AMMTest // Alice gets another 2400 XRP back, bob's XRP balance remains the // same. - BEAST_EXPECT(expectLedgerEntryRoot( - env, - alice, - aliceXrpBalance + XRP(1000) + XRP(600) + XRP(1000) + - XRP(2400))); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(expectLedgerEntryRoot( + env, + alice, + aliceXrpBalance + XRP(1000) + XRP(600) + XRP(1000) + + XRP(2400))); + else + BEAST_EXPECT(expectLedgerEntryRoot( + env, + alice, + aliceXrpBalance + XRP(1000) + XRP(600) + XRP(1000) + + XRP(2400) - XRPAmount{1})); BEAST_EXPECT(expectLedgerEntryRoot( env, bob, bobXrpBalance + XRP(20) + XRP(1980))); // Alice now does not have any lptoken in amm2 BEAST_EXPECT(amm2.expectLPTokens(alice, IOUAmount(0))); - BEAST_EXPECT(amm2.expectBalances( - EUR(2000), XRP(6000), IOUAmount{3464101615137756, -9})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm2.expectBalances( + EUR(2000), XRP(6000), IOUAmount{3464101615137756, -9})); + else + BEAST_EXPECT(amm2.expectBalances( + EUR(2000), XRP(6000), IOUAmount{3464101615137754, -9})); - // gw2 claw back 2000 EUR from bib in amm2, which exceeds bob's + // gw2 claw back 2000 EUR from bob in amm2, which exceeds bob's // balance. All bob's lptokens will be consumed, which corresponds // to 1000EUR / 3000 XRP. env(amm::ammClawback(gw2, bob, EUR, XRP, EUR(2000)), @@ -869,11 +974,18 @@ class AMMClawback_test : public jtx::AMMTest // Bob gets another 3000 XRP back. Alice's XRP balance remains the // same. - BEAST_EXPECT(expectLedgerEntryRoot( - env, - alice, - aliceXrpBalance + XRP(1000) + XRP(600) + XRP(1000) + - XRP(2400))); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(expectLedgerEntryRoot( + env, + alice, + aliceXrpBalance + XRP(1000) + XRP(600) + XRP(1000) + + XRP(2400))); + else + BEAST_EXPECT(expectLedgerEntryRoot( + env, + alice, + aliceXrpBalance + XRP(1000) + XRP(600) + XRP(1000) + + XRP(2400) - XRPAmount{1})); BEAST_EXPECT(expectLedgerEntryRoot( env, bob, bobXrpBalance + XRP(20) + XRP(1980) + XRP(3000))); @@ -881,8 +993,12 @@ class AMMClawback_test : public jtx::AMMTest BEAST_EXPECT(amm2.expectLPTokens(alice, IOUAmount(0))); BEAST_EXPECT(amm2.expectLPTokens(bob, IOUAmount(0))); - BEAST_EXPECT(amm2.expectBalances( - EUR(1000), XRP(3000), IOUAmount{1732050807568878, -9})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm2.expectBalances( + EUR(1000), XRP(3000), IOUAmount{1732050807568878, -9})); + else + BEAST_EXPECT(amm2.expectBalances( + EUR(1000), XRP(3000), IOUAmount{1732050807568877, -9})); } } @@ -940,21 +1056,45 @@ class AMMClawback_test : public jtx::AMMTest AMM amm(env, alice, EUR(5000), USD(4000), ter(tesSUCCESS)); env.close(); - BEAST_EXPECT(amm.expectBalances( - USD(4000), EUR(5000), IOUAmount{4472135954999580, -12})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectBalances( + USD(4000), EUR(5000), IOUAmount{4472135954999580, -12})); + else + BEAST_EXPECT(amm.expectBalances( + USD(4000), EUR(5000), IOUAmount{4472135954999579, -12})); amm.deposit(bob, USD(2000), EUR(2500)); - BEAST_EXPECT(amm.expectBalances( - USD(6000), EUR(7500), IOUAmount{6708203932499370, -12})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectBalances( + USD(6000), EUR(7500), IOUAmount{6708203932499370, -12})); + else + BEAST_EXPECT(amm.expectBalances( + USD(6000), EUR(7500), IOUAmount{6708203932499368, -12})); amm.deposit(carol, USD(1000), EUR(1250)); - BEAST_EXPECT(amm.expectBalances( - USD(7000), EUR(8750), IOUAmount{7826237921249265, -12})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectBalances( + USD(7000), EUR(8750), IOUAmount{7826237921249265, -12})); + else + BEAST_EXPECT(amm.expectBalances( + USD(7000), EUR(8750), IOUAmount{7826237921249262, -12})); - BEAST_EXPECT( - amm.expectLPTokens(alice, IOUAmount{4472135954999580, -12})); - BEAST_EXPECT( - amm.expectLPTokens(bob, IOUAmount{2236067977499790, -12})); - BEAST_EXPECT( - amm.expectLPTokens(carol, IOUAmount{1118033988749895, -12})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectLPTokens( + alice, IOUAmount{4472135954999580, -12})); + else + BEAST_EXPECT(amm.expectLPTokens( + alice, IOUAmount{4472135954999579, -12})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT( + amm.expectLPTokens(bob, IOUAmount{2236067977499790, -12})); + else + BEAST_EXPECT( + amm.expectLPTokens(bob, IOUAmount{2236067977499789, -12})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectLPTokens( + carol, IOUAmount{1118033988749895, -12})); + else + BEAST_EXPECT(amm.expectLPTokens( + carol, IOUAmount{1118033988749894, -12})); env.require(balance(alice, gw["USD"](2000))); env.require(balance(alice, gw2["EUR"](1000))); @@ -968,16 +1108,30 @@ class AMMClawback_test : public jtx::AMMTest ter(tesSUCCESS)); env.close(); - BEAST_EXPECT(amm.expectBalances( - STAmount{USD, UINT64_C(4999999999999999), -12}, - STAmount{EUR, UINT64_C(6249999999999999), -12}, - IOUAmount{5590169943749475, -12})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectBalances( + STAmount{USD, UINT64_C(4999999999999999), -12}, + STAmount{EUR, UINT64_C(6249999999999999), -12}, + IOUAmount{5590169943749475, -12})); + else + BEAST_EXPECT(amm.expectBalances( + STAmount{USD, UINT64_C(5000000000000001), -12}, + STAmount{EUR, UINT64_C(6250000000000001), -12}, + IOUAmount{5590169943749473, -12})); - BEAST_EXPECT( - amm.expectLPTokens(alice, IOUAmount{4472135954999580, -12})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectLPTokens( + alice, IOUAmount{4472135954999580, -12})); + else + BEAST_EXPECT(amm.expectLPTokens( + alice, IOUAmount{4472135954999579, -12})); BEAST_EXPECT(amm.expectLPTokens(bob, IOUAmount(0))); - BEAST_EXPECT( - amm.expectLPTokens(carol, IOUAmount{1118033988749895, -12})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectLPTokens( + carol, IOUAmount{1118033988749895, -12})); + else + BEAST_EXPECT(amm.expectLPTokens( + carol, IOUAmount{1118033988749894, -12})); // Bob will get 2500 EUR back. env.require(balance(alice, gw["USD"](2000))); @@ -986,9 +1140,14 @@ class AMMClawback_test : public jtx::AMMTest env.balance(bob, USD) == STAmount(USD, UINT64_C(3000000000000000), -12)); - BEAST_EXPECT( - env.balance(bob, EUR) == - STAmount(EUR, UINT64_C(5000000000000001), -12)); + if (!features[fixAMMv1_3]) + BEAST_EXPECT( + env.balance(bob, EUR) == + STAmount(EUR, UINT64_C(5000000000000001), -12)); + else + BEAST_EXPECT( + env.balance(bob, EUR) == + STAmount(EUR, UINT64_C(4999999999999999), -12)); env.require(balance(carol, gw["USD"](3000))); env.require(balance(carol, gw2["EUR"](2750))); @@ -996,13 +1155,23 @@ class AMMClawback_test : public jtx::AMMTest env(amm::ammClawback(gw2, carol, EUR, USD, std::nullopt), ter(tesSUCCESS)); env.close(); - BEAST_EXPECT(amm.expectBalances( - STAmount{USD, UINT64_C(3999999999999999), -12}, - STAmount{EUR, UINT64_C(4999999999999999), -12}, - IOUAmount{4472135954999580, -12})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectBalances( + STAmount{USD, UINT64_C(3999999999999999), -12}, + STAmount{EUR, UINT64_C(4999999999999999), -12}, + IOUAmount{4472135954999580, -12})); + else + BEAST_EXPECT(amm.expectBalances( + STAmount{USD, UINT64_C(4000000000000001), -12}, + STAmount{EUR, UINT64_C(5000000000000002), -12}, + IOUAmount{4472135954999579, -12})); - BEAST_EXPECT( - amm.expectLPTokens(alice, IOUAmount{4472135954999580, -12})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectLPTokens( + alice, IOUAmount{4472135954999580, -12})); + else + BEAST_EXPECT(amm.expectLPTokens( + alice, IOUAmount{4472135954999579, -12})); BEAST_EXPECT(amm.expectLPTokens(bob, IOUAmount(0))); BEAST_EXPECT(amm.expectLPTokens(carol, IOUAmount(0))); @@ -1041,14 +1210,26 @@ class AMMClawback_test : public jtx::AMMTest // gw creates AMM pool of XRP/USD, alice and bob deposit XRP/USD. AMM amm(env, gw, XRP(2000), USD(10000), ter(tesSUCCESS)); - BEAST_EXPECT(amm.expectBalances( - USD(10000), XRP(2000), IOUAmount{4472135954999580, -9})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectBalances( + USD(10000), XRP(2000), IOUAmount{4472135954999580, -9})); + else + BEAST_EXPECT(amm.expectBalances( + USD(10000), XRP(2000), IOUAmount{4472135954999579, -9})); amm.deposit(alice, USD(1000), XRP(200)); - BEAST_EXPECT(amm.expectBalances( - USD(11000), XRP(2200), IOUAmount{4919349550499538, -9})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectBalances( + USD(11000), XRP(2200), IOUAmount{4919349550499538, -9})); + else + BEAST_EXPECT(amm.expectBalances( + USD(11000), XRP(2200), IOUAmount{4919349550499536, -9})); amm.deposit(bob, USD(2000), XRP(400)); - BEAST_EXPECT(amm.expectBalances( - USD(13000), XRP(2600), IOUAmount{5813776741499453, -9})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectBalances( + USD(13000), XRP(2600), IOUAmount{5813776741499453, -9})); + else + BEAST_EXPECT(amm.expectBalances( + USD(13000), XRP(2600), IOUAmount{5813776741499451, -9})); env.close(); auto aliceXrpBalance = env.balance(alice, XRP); @@ -1058,18 +1239,34 @@ class AMMClawback_test : public jtx::AMMTest env(amm::ammClawback(gw, alice, USD, XRP, std::nullopt), ter(tesSUCCESS)); env.close(); - BEAST_EXPECT(amm.expectBalances( - USD(12000), XRP(2400), IOUAmount{5366563145999495, -9})); - BEAST_EXPECT( - expectLedgerEntryRoot(env, alice, aliceXrpBalance + XRP(200))); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectBalances( + USD(12000), XRP(2400), IOUAmount{5366563145999495, -9})); + else + BEAST_EXPECT(amm.expectBalances( + USD(12000), + XRPAmount(2400000001), + IOUAmount{5366563145999494, -9})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(expectLedgerEntryRoot( + env, alice, aliceXrpBalance + XRP(200))); + else + BEAST_EXPECT(expectLedgerEntryRoot( + env, alice, aliceXrpBalance + XRP(200) - XRPAmount{1})); BEAST_EXPECT(amm.expectLPTokens(alice, IOUAmount(0))); // gw clawback all bob's USD in amm. (2000 USD / 400 XRP) env(amm::ammClawback(gw, bob, USD, XRP, std::nullopt), ter(tesSUCCESS)); env.close(); - BEAST_EXPECT(amm.expectBalances( - USD(10000), XRP(2000), IOUAmount{4472135954999580, -9})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectBalances( + USD(10000), XRP(2000), IOUAmount{4472135954999580, -9})); + else + BEAST_EXPECT(amm.expectBalances( + USD(10000), + XRPAmount(2000000001), + IOUAmount{4472135954999579, -9})); BEAST_EXPECT( expectLedgerEntryRoot(env, bob, bobXrpBalance + XRP(400))); BEAST_EXPECT(amm.expectLPTokens(alice, IOUAmount(0))); @@ -1125,10 +1322,12 @@ class AMMClawback_test : public jtx::AMMTest amm.deposit(bob, USD(4000), EUR(1000)); BEAST_EXPECT( amm.expectBalances(USD(12000), EUR(3000), IOUAmount(6000))); - amm.deposit(carol, USD(2000), EUR(500)); + if (!features[fixAMMv1_3]) + amm.deposit(carol, USD(2000), EUR(500)); + else + amm.deposit(carol, USD(2000.25), EUR(500)); BEAST_EXPECT( amm.expectBalances(USD(14000), EUR(3500), IOUAmount(7000))); - // gw clawback 1000 USD from carol. env(amm::ammClawback(gw, carol, USD, EUR, USD(1000)), ter(tesSUCCESS)); env.close(); @@ -1142,7 +1341,12 @@ class AMMClawback_test : public jtx::AMMTest BEAST_EXPECT(env.balance(alice, EUR) == EUR(8000)); BEAST_EXPECT(env.balance(bob, USD) == USD(5000)); BEAST_EXPECT(env.balance(bob, EUR) == EUR(8000)); - BEAST_EXPECT(env.balance(carol, USD) == USD(6000)); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(env.balance(carol, USD) == USD(6000)); + else + BEAST_EXPECT( + env.balance(carol, USD) == + STAmount(USD, UINT64_C(5999'999999999999), -12)); // 250 EUR goes back to carol. BEAST_EXPECT(env.balance(carol, EUR) == EUR(7750)); @@ -1164,7 +1368,12 @@ class AMMClawback_test : public jtx::AMMTest BEAST_EXPECT(env.balance(bob, USD) == USD(5000)); // 250 EUR did not go back to bob because tfClawTwoAssets is set. BEAST_EXPECT(env.balance(bob, EUR) == EUR(8000)); - BEAST_EXPECT(env.balance(carol, USD) == USD(6000)); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(env.balance(carol, USD) == USD(6000)); + else + BEAST_EXPECT( + env.balance(carol, USD) == + STAmount(USD, UINT64_C(5999'999999999999), -12)); BEAST_EXPECT(env.balance(carol, EUR) == EUR(7750)); // gw clawback all USD from alice and set tfClawTwoAssets. @@ -1181,7 +1390,12 @@ class AMMClawback_test : public jtx::AMMTest BEAST_EXPECT(env.balance(alice, EUR) == EUR(8000)); BEAST_EXPECT(env.balance(bob, USD) == USD(5000)); BEAST_EXPECT(env.balance(bob, EUR) == EUR(8000)); - BEAST_EXPECT(env.balance(carol, USD) == USD(6000)); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(env.balance(carol, USD) == USD(6000)); + else + BEAST_EXPECT( + env.balance(carol, USD) == + STAmount(USD, UINT64_C(5999'999999999999), -12)); BEAST_EXPECT(env.balance(carol, EUR) == EUR(7750)); } @@ -1366,12 +1580,21 @@ class AMMClawback_test : public jtx::AMMTest // gw2 claws back 1000 EUR from gw. env(amm::ammClawback(gw2, gw, EUR, USD, EUR(1000)), ter(tesSUCCESS)); env.close(); - BEAST_EXPECT(amm.expectBalances( - USD(4500), - STAmount(EUR, UINT64_C(9000000000000001), -12), - IOUAmount{6363961030678928, -12})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectBalances( + USD(4500), + STAmount(EUR, UINT64_C(9000000000000001), -12), + IOUAmount{6363961030678928, -12})); + else + BEAST_EXPECT(amm.expectBalances( + USD(4500), EUR(9000), IOUAmount{6363961030678928, -12})); - BEAST_EXPECT(amm.expectLPTokens(gw, IOUAmount{7071067811865480, -13})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT( + amm.expectLPTokens(gw, IOUAmount{7071067811865480, -13})); + else + BEAST_EXPECT( + amm.expectLPTokens(gw, IOUAmount{7071067811865475, -13})); BEAST_EXPECT(amm.expectLPTokens(gw2, IOUAmount{1414213562373095, -12})); BEAST_EXPECT( amm.expectLPTokens(alice, IOUAmount{4242640687119285, -12})); @@ -1384,12 +1607,21 @@ class AMMClawback_test : public jtx::AMMTest // gw2 claws back 4000 EUR from alice. env(amm::ammClawback(gw2, alice, EUR, USD, EUR(4000)), ter(tesSUCCESS)); env.close(); - BEAST_EXPECT(amm.expectBalances( - USD(2500), - STAmount(EUR, UINT64_C(5000000000000001), -12), - IOUAmount{3535533905932738, -12})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectBalances( + USD(2500), + STAmount(EUR, UINT64_C(5000000000000001), -12), + IOUAmount{3535533905932738, -12})); + else + BEAST_EXPECT(amm.expectBalances( + USD(2500), EUR(5000), IOUAmount{3535533905932738, -12})); - BEAST_EXPECT(amm.expectLPTokens(gw, IOUAmount{7071067811865480, -13})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT( + amm.expectLPTokens(gw, IOUAmount{7071067811865480, -13})); + else + BEAST_EXPECT( + amm.expectLPTokens(gw, IOUAmount{7071067811865475, -13})); BEAST_EXPECT(amm.expectLPTokens(gw2, IOUAmount{1414213562373095, -12})); BEAST_EXPECT( amm.expectLPTokens(alice, IOUAmount{1414213562373095, -12})); @@ -1653,7 +1885,10 @@ class AMMClawback_test : public jtx::AMMTest amm.deposit(bob, USD(4000), EUR(1000)); BEAST_EXPECT( amm.expectBalances(USD(12000), EUR(3000), IOUAmount(6000))); - amm.deposit(carol, USD(2000), EUR(500)); + if (!features[fixAMMv1_3]) + amm.deposit(carol, USD(2000), EUR(500)); + else + amm.deposit(carol, USD(2000.25), EUR(500)); BEAST_EXPECT( amm.expectBalances(USD(14000), EUR(3500), IOUAmount(7000))); @@ -1675,7 +1910,12 @@ class AMMClawback_test : public jtx::AMMTest BEAST_EXPECT(env.balance(alice, EUR) == EUR(8000)); BEAST_EXPECT(env.balance(bob, USD) == USD(5000)); BEAST_EXPECT(env.balance(bob, EUR) == EUR(8000)); - BEAST_EXPECT(env.balance(carol, USD) == USD(6000)); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(env.balance(carol, USD) == USD(6000)); + else + BEAST_EXPECT( + env.balance(carol, USD) == + STAmount(USD, UINT64_C(5999'999999999999), -12)); // 250 EUR goes back to carol. BEAST_EXPECT(env.balance(carol, EUR) == EUR(7750)); @@ -1697,7 +1937,12 @@ class AMMClawback_test : public jtx::AMMTest BEAST_EXPECT(env.balance(bob, USD) == USD(5000)); // 250 EUR did not go back to bob because tfClawTwoAssets is set. BEAST_EXPECT(env.balance(bob, EUR) == EUR(8000)); - BEAST_EXPECT(env.balance(carol, USD) == USD(6000)); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(env.balance(carol, USD) == USD(6000)); + else + BEAST_EXPECT( + env.balance(carol, USD) == + STAmount(USD, UINT64_C(5999'999999999999), -12)); BEAST_EXPECT(env.balance(carol, EUR) == EUR(7750)); // gw clawback all USD from alice and set tfClawTwoAssets. @@ -1715,7 +1960,12 @@ class AMMClawback_test : public jtx::AMMTest BEAST_EXPECT(env.balance(alice, EUR) == EUR(8000)); BEAST_EXPECT(env.balance(bob, USD) == USD(5000)); BEAST_EXPECT(env.balance(bob, EUR) == EUR(8000)); - BEAST_EXPECT(env.balance(carol, USD) == USD(6000)); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(env.balance(carol, USD) == USD(6000)); + else + BEAST_EXPECT( + env.balance(carol, USD) == + STAmount(USD, UINT64_C(5999'999999999999), -12)); BEAST_EXPECT(env.balance(carol, EUR) == EUR(7750)); } } @@ -1763,13 +2013,23 @@ class AMMClawback_test : public jtx::AMMTest env(amm::ammClawback(gw, alice, USD, XRP, USD(400)), ter(tesSUCCESS)); env.close(); - BEAST_EXPECT(amm.expectBalances( - STAmount(USD, UINT64_C(5656854249492380), -13), - XRP(70.710678), - IOUAmount(200000))); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectBalances( + STAmount(USD, UINT64_C(5656854249492380), -13), + XRP(70.710678), + IOUAmount(200000))); + else + BEAST_EXPECT(amm.expectBalances( + STAmount(USD, UINT64_C(565'685424949238), -12), + XRP(70.710679), + IOUAmount(200000))); BEAST_EXPECT(amm.expectLPTokens(alice, IOUAmount(0))); - BEAST_EXPECT(expectLedgerEntryRoot( - env, alice, aliceXrpBalance + XRP(29.289322))); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(expectLedgerEntryRoot( + env, alice, aliceXrpBalance + XRP(29.289322))); + else + BEAST_EXPECT(expectLedgerEntryRoot( + env, alice, aliceXrpBalance + XRP(29.289321))); } void @@ -1780,13 +2040,18 @@ class AMMClawback_test : public jtx::AMMTest testFeatureDisabled(all - featureAMMClawback); testAMMClawbackSpecificAmount(all); testAMMClawbackExceedBalance(all); + testAMMClawbackExceedBalance(all - fixAMMv1_3); testAMMClawbackAll(all); + testAMMClawbackAll(all - fixAMMv1_3); testAMMClawbackSameIssuerAssets(all); + testAMMClawbackSameIssuerAssets(all - fixAMMv1_3); testAMMClawbackSameCurrency(all); testAMMClawbackIssuesEachOther(all); testNotHoldingLptoken(all); testAssetFrozen(all); + testAssetFrozen(all - fixAMMv1_3); testSingleDepositAndClawback(all); + testSingleDepositAndClawback(all - fixAMMv1_3); } }; BEAST_DEFINE_TESTSUITE(AMMClawback, app, ripple); diff --git a/src/test/app/AMMExtended_test.cpp b/src/test/app/AMMExtended_test.cpp index f9750eaa53..3d959a6a09 100644 --- a/src/test/app/AMMExtended_test.cpp +++ b/src/test/app/AMMExtended_test.cpp @@ -1451,7 +1451,7 @@ private: using namespace jtx; FeatureBitset const all{supported_amendments()}; testRmFundedOffer(all); - testRmFundedOffer(all - fixAMMv1_1); + testRmFundedOffer(all - fixAMMv1_1 - fixAMMv1_3); testEnforceNoRipple(all); testFillModes(all); testOfferCrossWithXRP(all); @@ -1465,7 +1465,7 @@ private: testOfferCreateThenCross(all); testSellFlagExceedLimit(all); testGatewayCrossCurrency(all); - testGatewayCrossCurrency(all - fixAMMv1_1); + testGatewayCrossCurrency(all - fixAMMv1_1 - fixAMMv1_3); testBridgedCross(all); testSellWithFillOrKill(all); testTransferRateOffer(all); @@ -1473,7 +1473,7 @@ private: testBadPathAssert(all); testSellFlagBasic(all); testDirectToDirectPath(all); - testDirectToDirectPath(all - fixAMMv1_1); + testDirectToDirectPath(all - fixAMMv1_1 - fixAMMv1_3); testRequireAuth(all); testMissingAuth(all); } @@ -4063,9 +4063,9 @@ private: testBookStep(all); testBookStep(all | ownerPaysFee); testTransferRate(all | ownerPaysFee); - testTransferRate((all - fixAMMv1_1) | ownerPaysFee); + testTransferRate((all - fixAMMv1_1 - fixAMMv1_3) | ownerPaysFee); testTransferRateNoOwnerFee(all); - testTransferRateNoOwnerFee(all - fixAMMv1_1); + testTransferRateNoOwnerFee(all - fixAMMv1_1 - fixAMMv1_3); testLimitQuality(); testXRPPathLoop(); } @@ -4076,7 +4076,7 @@ private: using namespace jtx; FeatureBitset const all{supported_amendments()}; testStepLimit(all); - testStepLimit(all - fixAMMv1_1); + testStepLimit(all - fixAMMv1_1 - fixAMMv1_3); } void @@ -4085,7 +4085,7 @@ private: using namespace jtx; FeatureBitset const all{supported_amendments()}; test_convert_all_of_an_asset(all); - test_convert_all_of_an_asset(all - fixAMMv1_1); + test_convert_all_of_an_asset(all - fixAMMv1_1 - fixAMMv1_3); } void diff --git a/src/test/app/AMM_test.cpp b/src/test/app/AMM_test.cpp index e0b3dc1ec7..7211d37730 100644 --- a/src/test/app/AMM_test.cpp +++ b/src/test/app/AMM_test.cpp @@ -27,6 +27,7 @@ #include #include +#include #include #include @@ -836,21 +837,6 @@ private: std::nullopt, ter(tecAMM_FAILED)); - // Tiny deposit - ammAlice.deposit( - carol, - IOUAmount{1, -4}, - std::nullopt, - std::nullopt, - ter(temBAD_AMOUNT)); - ammAlice.deposit( - carol, - STAmount{USD, 1, -12}, - std::nullopt, - std::nullopt, - std::nullopt, - ter(tecAMM_INVALID_TOKENS)); - // Deposit non-empty AMM ammAlice.deposit( carol, @@ -861,6 +847,34 @@ private: ter(tecAMM_NOT_EMPTY)); }); + // Tiny deposit + testAMM( + [&](AMM& ammAlice, Env& env) { + auto const enabledv1_3 = + env.current()->rules().enabled(fixAMMv1_3); + auto const err = + !enabledv1_3 ? ter(temBAD_AMOUNT) : ter(tesSUCCESS); + // Pre-amendment XRP deposit side is rounded to 0 + // and deposit fails. + // Post-amendment XRP deposit side is rounded to 1 + // and deposit succeeds. + ammAlice.deposit( + carol, IOUAmount{1, -4}, std::nullopt, std::nullopt, err); + // Pre/post-amendment LPTokens is rounded to 0 and deposit + // fails with tecAMM_INVALID_TOKENS. + ammAlice.deposit( + carol, + STAmount{USD, 1, -12}, + std::nullopt, + std::nullopt, + std::nullopt, + ter(tecAMM_INVALID_TOKENS)); + }, + std::nullopt, + 0, + std::nullopt, + {features, features - fixAMMv1_3}); + // Invalid AMM testAMM([&](AMM& ammAlice, Env& env) { ammAlice.withdrawAll(alice); @@ -1316,6 +1330,53 @@ private: std::nullopt, ter(tecAMM_FAILED)); }); + + // Equal deposit, tokens rounded to 0 + testAMM([&](AMM& amm, Env& env) { + amm.deposit(DepositArg{ + .tokens = IOUAmount{1, -12}, + .err = ter(tecAMM_INVALID_TOKENS)}); + }); + + // Equal deposit limit, tokens rounded to 0 + testAMM( + [&](AMM& amm, Env& env) { + amm.deposit(DepositArg{ + .asset1In = STAmount{USD, 1, -15}, + .asset2In = XRPAmount{1}, + .err = ter(tecAMM_INVALID_TOKENS)}); + }, + {.pool = {{USD(1'000'000), XRP(1'000'000)}}, + .features = {features - fixAMMv1_3}}); + testAMM([&](AMM& amm, Env& env) { + amm.deposit(DepositArg{ + .asset1In = STAmount{USD, 1, -15}, + .asset2In = XRPAmount{1}, + .err = ter(tecAMM_INVALID_TOKENS)}); + }); + + // Single deposit by asset, tokens rounded to 0 + testAMM([&](AMM& amm, Env& env) { + amm.deposit(DepositArg{ + .asset1In = STAmount{USD, 1, -15}, + .err = ter(tecAMM_INVALID_TOKENS)}); + }); + + // Single deposit by tokens, tokens rounded to 0 + testAMM([&](AMM& amm, Env& env) { + amm.deposit(DepositArg{ + .tokens = IOUAmount{1, -10}, + .asset1In = STAmount{USD, 1, -15}, + .err = ter(tecAMM_INVALID_TOKENS)}); + }); + + // Single deposit with eprice, tokens rounded to 0 + testAMM([&](AMM& amm, Env& env) { + amm.deposit(DepositArg{ + .asset1In = STAmount{USD, 1, -15}, + .maxEP = STAmount{USD, 1, -1}, + .err = ter(tecAMM_INVALID_TOKENS)}); + }); } void @@ -1324,6 +1385,7 @@ private: testcase("Deposit"); using namespace jtx; + auto const all = supported_amendments(); // Equal deposit: 1000000 tokens, 10% of the current pool testAMM([&](AMM& ammAlice, Env& env) { @@ -1529,8 +1591,9 @@ private: }); // Issuer create/deposit + for (auto const& feat : {all, all - fixAMMv1_3}) { - Env env(*this); + Env env(*this, feat); env.fund(XRP(30000), gw); AMM ammGw(env, gw, XRP(10'000), USD(10'000)); BEAST_EXPECT( @@ -1624,6 +1687,7 @@ private: testcase("Invalid Withdraw"); using namespace jtx; + auto const all = supported_amendments(); testAMM( [&](AMM& ammAlice, Env& env) { @@ -1918,16 +1982,6 @@ private: ammAlice.withdraw( carol, 10'000, std::nullopt, std::nullopt, ter(tecAMM_BALANCE)); - // Withdraw entire one side of the pool. - // Equal withdraw but due to XRP precision limit, - // this results in full withdraw of XRP pool only, - // while leaving a tiny amount in USD pool. - ammAlice.withdraw( - alice, - IOUAmount{9'999'999'9999, -4}, - std::nullopt, - std::nullopt, - ter(tecAMM_BALANCE)); // Withdrawing from one side. // XRP by tokens ammAlice.withdraw( @@ -1959,6 +2013,57 @@ private: ter(tecAMM_BALANCE)); }); + testAMM( + [&](AMM& ammAlice, Env& env) { + // Withdraw entire one side of the pool. + // Pre-amendment: + // Equal withdraw but due to XRP rounding + // this results in full withdraw of XRP pool only, + // while leaving a tiny amount in USD pool. + // Post-amendment: + // Most of the pool is withdrawn with remaining tiny amounts + auto err = env.enabled(fixAMMv1_3) ? ter(tesSUCCESS) + : ter(tecAMM_BALANCE); + ammAlice.withdraw( + alice, + IOUAmount{9'999'999'9999, -4}, + std::nullopt, + std::nullopt, + err); + if (env.enabled(fixAMMv1_3)) + BEAST_EXPECT(ammAlice.expectBalances( + XRPAmount(1), STAmount{USD, 1, -7}, IOUAmount{1, -4})); + }, + std::nullopt, + 0, + std::nullopt, + {all, all - fixAMMv1_3}); + + testAMM( + [&](AMM& ammAlice, Env& env) { + // Similar to above with even smaller remaining amount + // is it ok that the pool is unbalanced? + // Withdraw entire one side of the pool. + // Equal withdraw but due to XRP precision limit, + // this results in full withdraw of XRP pool only, + // while leaving a tiny amount in USD pool. + auto err = env.enabled(fixAMMv1_3) ? ter(tesSUCCESS) + : ter(tecAMM_BALANCE); + ammAlice.withdraw( + alice, + IOUAmount{9'999'999'999999999, -9}, + std::nullopt, + std::nullopt, + err); + if (env.enabled(fixAMMv1_3)) + BEAST_EXPECT(ammAlice.expectBalances( + XRPAmount(1), STAmount{USD, 1, -11}, IOUAmount{1, -8})); + }, + std::nullopt, + 0, + std::nullopt, + {all, all - fixAMMv1_3}); + // Invalid AMM testAMM([&](AMM& ammAlice, Env& env) { ammAlice.withdrawAll(alice); @@ -2022,15 +2127,19 @@ private: // Withdraw with EPrice limit. Fails to withdraw, calculated tokens // to withdraw are 0. - testAMM([&](AMM& ammAlice, Env&) { - ammAlice.deposit(carol, 1'000'000); - ammAlice.withdraw( - carol, - USD(100), - std::nullopt, - IOUAmount{500, 0}, - ter(tecAMM_FAILED)); - }); + testAMM( + [&](AMM& ammAlice, Env& env) { + ammAlice.deposit(carol, 1'000'000); + auto const err = env.enabled(fixAMMv1_3) + ? ter(tecAMM_INVALID_TOKENS) + : ter(tecAMM_FAILED); + ammAlice.withdraw( + carol, USD(100), std::nullopt, IOUAmount{500, 0}, err); + }, + std::nullopt, + 0, + std::nullopt, + {all, all - fixAMMv1_3}); // Withdraw with EPrice limit. Fails to withdraw, calculated tokens // to withdraw are greater than the LP shares. @@ -2095,14 +2204,19 @@ private: // Withdraw close to one side of the pool. Account's LP tokens // are rounded to all LP tokens. - testAMM([&](AMM& ammAlice, Env&) { - ammAlice.withdraw( - alice, - STAmount{USD, UINT64_C(9'999'999999999999), -12}, - std::nullopt, - std::nullopt, - ter(tecAMM_BALANCE)); - }); + testAMM( + [&](AMM& ammAlice, Env& env) { + auto const err = env.enabled(fixAMMv1_3) + ? ter(tecINVARIANT_FAILED) + : ter(tecAMM_BALANCE); + ammAlice.withdraw( + alice, + STAmount{USD, UINT64_C(9'999'999999999999), -12}, + std::nullopt, + std::nullopt, + err); + }, + {.features = {all, all - fixAMMv1_3}, .noLog = true}); // Tiny withdraw testAMM([&](AMM& ammAlice, Env&) { @@ -2133,6 +2247,17 @@ private: XRPAmount{1}, std::nullopt, ter(tecAMM_INVALID_TOKENS)); + ammAlice.withdraw(WithdrawArg{ + .tokens = IOUAmount{1, -10}, + .err = ter(tecAMM_INVALID_TOKENS)}); + ammAlice.withdraw(WithdrawArg{ + .asset1Out = STAmount{USD, 1, -15}, + .asset2Out = XRPAmount{1}, + .err = ter(tecAMM_INVALID_TOKENS)}); + ammAlice.withdraw(WithdrawArg{ + .tokens = IOUAmount{1, -10}, + .asset1Out = STAmount{USD, 1, -15}, + .err = ter(tecAMM_INVALID_TOKENS)}); }); } @@ -2142,6 +2267,7 @@ private: testcase("Withdraw"); using namespace jtx; + auto const all = supported_amendments(); // Equal withdrawal by Carol: 1000000 of tokens, 10% of the current // pool @@ -2196,11 +2322,24 @@ private: }); // Single withdrawal by amount XRP1000 - testAMM([&](AMM& ammAlice, Env&) { - ammAlice.withdraw(alice, XRP(1'000)); - BEAST_EXPECT(ammAlice.expectBalances( - XRP(9'000), USD(10'000), IOUAmount{9'486'832'98050514, -8})); - }); + testAMM( + [&](AMM& ammAlice, Env& env) { + ammAlice.withdraw(alice, XRP(1'000)); + if (!env.enabled(fixAMMv1_3)) + BEAST_EXPECT(ammAlice.expectBalances( + XRP(9'000), + USD(10'000), + IOUAmount{9'486'832'98050514, -8})); + else + BEAST_EXPECT(ammAlice.expectBalances( + XRPAmount{9'000'000'001}, + USD(10'000), + IOUAmount{9'486'832'98050514, -8})); + }, + std::nullopt, + 0, + std::nullopt, + {all, all - fixAMMv1_3}); // Single withdrawal by tokens 10000. testAMM([&](AMM& ammAlice, Env&) { @@ -2251,20 +2390,31 @@ private: }); // Single deposit/withdraw by the same account - testAMM([&](AMM& ammAlice, Env&) { - // Since a smaller amount might be deposited due to - // the lp tokens adjustment, withdrawing by tokens - // is generally preferred to withdrawing by amount. - auto lpTokens = ammAlice.deposit(carol, USD(1'000)); - ammAlice.withdraw(carol, lpTokens, USD(0)); - lpTokens = ammAlice.deposit(carol, STAmount(USD, 1, -6)); - ammAlice.withdraw(carol, lpTokens, USD(0)); - lpTokens = ammAlice.deposit(carol, XRPAmount(1)); - ammAlice.withdraw(carol, lpTokens, XRPAmount(0)); - BEAST_EXPECT(ammAlice.expectBalances( - XRP(10'000), USD(10'000), ammAlice.tokens())); - BEAST_EXPECT(ammAlice.expectLPTokens(carol, IOUAmount{0})); - }); + testAMM( + [&](AMM& ammAlice, Env& env) { + // Since a smaller amount might be deposited due to + // the lp tokens adjustment, withdrawing by tokens + // is generally preferred to withdrawing by amount. + auto lpTokens = ammAlice.deposit(carol, USD(1'000)); + ammAlice.withdraw(carol, lpTokens, USD(0)); + lpTokens = ammAlice.deposit(carol, STAmount(USD, 1, -6)); + ammAlice.withdraw(carol, lpTokens, USD(0)); + lpTokens = ammAlice.deposit(carol, XRPAmount(1)); + ammAlice.withdraw(carol, lpTokens, XRPAmount(0)); + if (!env.enabled(fixAMMv1_3)) + BEAST_EXPECT(ammAlice.expectBalances( + XRP(10'000), USD(10'000), ammAlice.tokens())); + else + BEAST_EXPECT(ammAlice.expectBalances( + XRPAmount(10'000'000'001), + USD(10'000), + ammAlice.tokens())); + BEAST_EXPECT(ammAlice.expectLPTokens(carol, IOUAmount{0})); + }, + std::nullopt, + 0, + std::nullopt, + {all, all - fixAMMv1_3}); // Single deposit by different accounts and then withdraw // in reverse. @@ -2307,36 +2457,34 @@ private: IOUAmount{10'000'000, 0})); }); - auto const all = supported_amendments(); // Withdraw with EPrice limit. testAMM( [&](AMM& ammAlice, Env& env) { ammAlice.deposit(carol, 1'000'000); ammAlice.withdraw( carol, USD(100), std::nullopt, IOUAmount{520, 0}); - if (!env.current()->rules().enabled(fixAMMv1_1)) - BEAST_EXPECT( - ammAlice.expectBalances( - XRPAmount(11'000'000'000), - STAmount{USD, UINT64_C(9'372'781065088757), -12}, - IOUAmount{10'153'846'15384616, -8}) && - ammAlice.expectLPTokens( - carol, IOUAmount{153'846'15384616, -8})); - else - BEAST_EXPECT( - ammAlice.expectBalances( - XRPAmount(11'000'000'000), - STAmount{USD, UINT64_C(9'372'781065088769), -12}, - IOUAmount{10'153'846'15384616, -8}) && - ammAlice.expectLPTokens( - carol, IOUAmount{153'846'15384616, -8})); + BEAST_EXPECT(ammAlice.expectLPTokens( + carol, IOUAmount{153'846'15384616, -8})); + if (!env.enabled(fixAMMv1_1) && !env.enabled(fixAMMv1_3)) + BEAST_EXPECT(ammAlice.expectBalances( + XRPAmount(11'000'000'000), + STAmount{USD, UINT64_C(9'372'781065088757), -12}, + IOUAmount{10'153'846'15384616, -8})); + else if (env.enabled(fixAMMv1_1) && !env.enabled(fixAMMv1_3)) + BEAST_EXPECT(ammAlice.expectBalances( + XRPAmount(11'000'000'000), + STAmount{USD, UINT64_C(9'372'781065088769), -12}, + IOUAmount{10'153'846'15384616, -8})); + else if (env.enabled(fixAMMv1_3)) + BEAST_EXPECT(ammAlice.expectBalances( + XRPAmount(11'000'000'000), + STAmount{USD, UINT64_C(9'372'78106508877), -11}, + IOUAmount{10'153'846'15384616, -8})); ammAlice.withdrawAll(carol); BEAST_EXPECT(ammAlice.expectLPTokens(carol, IOUAmount{0})); }, - std::nullopt, - 0, - std::nullopt, - {all, all - fixAMMv1_1}); + {.features = {all, all - fixAMMv1_3, all - fixAMMv1_1 - fixAMMv1_3}, + .noLog = true}); // Withdraw with EPrice limit. AssetOut is 0. testAMM( @@ -2344,27 +2492,28 @@ private: ammAlice.deposit(carol, 1'000'000); ammAlice.withdraw( carol, USD(0), std::nullopt, IOUAmount{520, 0}); - if (!env.current()->rules().enabled(fixAMMv1_1)) - BEAST_EXPECT( - ammAlice.expectBalances( - XRPAmount(11'000'000'000), - STAmount{USD, UINT64_C(9'372'781065088757), -12}, - IOUAmount{10'153'846'15384616, -8}) && - ammAlice.expectLPTokens( - carol, IOUAmount{153'846'15384616, -8})); - else - BEAST_EXPECT( - ammAlice.expectBalances( - XRPAmount(11'000'000'000), - STAmount{USD, UINT64_C(9'372'781065088769), -12}, - IOUAmount{10'153'846'15384616, -8}) && - ammAlice.expectLPTokens( - carol, IOUAmount{153'846'15384616, -8})); + BEAST_EXPECT(ammAlice.expectLPTokens( + carol, IOUAmount{153'846'15384616, -8})); + if (!env.enabled(fixAMMv1_1) && !env.enabled(fixAMMv1_3)) + BEAST_EXPECT(ammAlice.expectBalances( + XRP(11'000), + STAmount{USD, UINT64_C(9'372'781065088757), -12}, + IOUAmount{10'153'846'15384616, -8})); + else if (env.enabled(fixAMMv1_1) && !env.enabled(fixAMMv1_3)) + BEAST_EXPECT(ammAlice.expectBalances( + XRP(11'000), + STAmount{USD, UINT64_C(9'372'781065088769), -12}, + IOUAmount{10'153'846'15384616, -8})); + else if (env.enabled(fixAMMv1_3)) + BEAST_EXPECT(ammAlice.expectBalances( + XRP(11'000), + STAmount{USD, UINT64_C(9'372'78106508877), -11}, + IOUAmount{10'153'846'15384616, -8})); }, std::nullopt, 0, std::nullopt, - {all, all - fixAMMv1_1}); + {all, all - fixAMMv1_3, all - fixAMMv1_1 - fixAMMv1_3}); // IOU to IOU + transfer fee { @@ -2403,14 +2552,25 @@ private: STAmount{USD, UINT64_C(9'999'999999), -6}, IOUAmount{9'999'999'999, -3})); }); - testAMM([&](AMM& ammAlice, Env&) { - // Single XRP pool - ammAlice.withdraw(alice, std::nullopt, XRPAmount{1}); - BEAST_EXPECT(ammAlice.expectBalances( - XRPAmount{9'999'999'999}, - USD(10'000), - IOUAmount{9'999'999'9995, -4})); - }); + testAMM( + [&](AMM& ammAlice, Env& env) { + // Single XRP pool + ammAlice.withdraw(alice, std::nullopt, XRPAmount{1}); + if (!env.enabled(fixAMMv1_3)) + BEAST_EXPECT(ammAlice.expectBalances( + XRPAmount{9'999'999'999}, + USD(10'000), + IOUAmount{9'999'999'9995, -4})); + else + BEAST_EXPECT(ammAlice.expectBalances( + XRP(10'000), + USD(10'000), + IOUAmount{9'999'999'9995, -4})); + }, + std::nullopt, + 0, + std::nullopt, + {all, all - fixAMMv1_3}); testAMM([&](AMM& ammAlice, Env&) { // Single USD pool ammAlice.withdraw(alice, std::nullopt, STAmount{USD, 1, -10}); @@ -2528,6 +2688,7 @@ private: { testcase("Fee Vote"); using namespace jtx; + auto const all = supported_amendments(); // One vote sets fee to 1%. testAMM([&](AMM& ammAlice, Env& env) { @@ -2545,6 +2706,12 @@ private: std::uint32_t tokens = 10'000'000, std::vector* accounts = nullptr) { Account a(std::to_string(i)); + // post-amendment the amount to deposit is slightly higher + // in order to ensure AMM invariant sqrt(asset1 * asset2) >= tokens + // fund just one USD higher in this case, which is enough for + // deposit to succeed + if (env.enabled(fixAMMv1_3)) + ++fundUSD; fund(env, gw, {a}, {USD(fundUSD)}, Fund::Acct); ammAlice.deposit(a, tokens); ammAlice.vote(a, 50 * (i + 1)); @@ -2553,11 +2720,16 @@ private: }; // Eight votes fill all voting slots, set fee 0.175%. - testAMM([&](AMM& ammAlice, Env& env) { - for (int i = 0; i < 7; ++i) - vote(ammAlice, env, i, 10'000); - BEAST_EXPECT(ammAlice.expectTradingFee(175)); - }); + testAMM( + [&](AMM& ammAlice, Env& env) { + for (int i = 0; i < 7; ++i) + vote(ammAlice, env, i, 10'000); + BEAST_EXPECT(ammAlice.expectTradingFee(175)); + }, + std::nullopt, + 0, + std::nullopt, + {all}); // Eight votes fill all voting slots, set fee 0.175%. // New vote, same account, sets fee 0.225% @@ -2951,8 +3123,14 @@ private: fund(env, gw, {bob}, {USD(10'000)}, Fund::Acct); ammAlice.deposit(bob, 1'000'000); - BEAST_EXPECT(ammAlice.expectBalances( - XRP(12'000), USD(12'000), IOUAmount{12'000'000, 0})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(ammAlice.expectBalances( + XRP(12'000), USD(12'000), IOUAmount{12'000'000, 0})); + else + BEAST_EXPECT(ammAlice.expectBalances( + XRPAmount{12'000'000'001}, + USD(12'000), + IOUAmount{12'000'000, 0})); // Initial state. Pay bidMin. env(ammAlice.bid({.account = carol, .bidMin = 110})).close(); @@ -2984,8 +3162,16 @@ private: BEAST_EXPECT(ammAlice.expectAuctionSlot( 0, std::nullopt, IOUAmount{110})); // ~321.09 tokens burnt on bidding fees. - BEAST_EXPECT(ammAlice.expectBalances( - XRP(12'000), USD(12'000), IOUAmount{11'999'678'91, -2})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(ammAlice.expectBalances( + XRP(12'000), + USD(12'000), + IOUAmount{11'999'678'91, -2})); + else + BEAST_EXPECT(ammAlice.expectBalances( + XRPAmount{12'000'000'001}, + USD(12'000), + IOUAmount{11'999'678'91, -2})); }, std::nullopt, 0, @@ -3014,8 +3200,12 @@ private: auto const slotPrice = IOUAmount{5'200}; ammTokens -= slotPrice; BEAST_EXPECT(ammAlice.expectAuctionSlot(100, 0, slotPrice)); - BEAST_EXPECT(ammAlice.expectBalances( - XRP(13'000), USD(13'000), ammTokens)); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(ammAlice.expectBalances( + XRP(13'000), USD(13'000), ammTokens)); + else + BEAST_EXPECT(ammAlice.expectBalances( + XRPAmount{13'000'000'003}, USD(13'000), ammTokens)); // Discounted trade for (int i = 0; i < 10; ++i) { @@ -3056,10 +3246,16 @@ private: env.balance(ed, USD) == STAmount(USD, UINT64_C(18'999'0057261184), -10)); // USD pool is slightly higher because of the fees. - BEAST_EXPECT(ammAlice.expectBalances( - XRP(13'000), - STAmount(USD, UINT64_C(13'002'98282151422), -11), - ammTokens)); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(ammAlice.expectBalances( + XRP(13'000), + STAmount(USD, UINT64_C(13'002'98282151422), -11), + ammTokens)); + else + BEAST_EXPECT(ammAlice.expectBalances( + XRPAmount{13'000'000'003}, + STAmount(USD, UINT64_C(13'002'98282151422), -11), + ammTokens)); } ammTokens = ammAlice.getLPTokensBalance(); // Trade with the fee @@ -3101,31 +3297,54 @@ private: } else { - BEAST_EXPECT( - env.balance(dan, USD) == - STAmount(USD, UINT64_C(19'490'05672274399), -11)); + if (!features[fixAMMv1_3]) + BEAST_EXPECT( + env.balance(dan, USD) == + STAmount(USD, UINT64_C(19'490'05672274399), -11)); + else + BEAST_EXPECT( + env.balance(dan, USD) == + STAmount(USD, UINT64_C(19'490'05672274398), -11)); // USD pool gains more in dan's fees. - BEAST_EXPECT(ammAlice.expectBalances( - XRP(13'000), - STAmount{USD, UINT64_C(13'012'92609877023), -11}, - ammTokens)); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(ammAlice.expectBalances( + XRP(13'000), + STAmount{USD, UINT64_C(13'012'92609877023), -11}, + ammTokens)); + else + BEAST_EXPECT(ammAlice.expectBalances( + XRPAmount{13'000'000'003}, + STAmount{USD, UINT64_C(13'012'92609877024), -11}, + ammTokens)); // Discounted fee payment ammAlice.deposit(carol, USD(100)); ammTokens = ammAlice.getLPTokensBalance(); - BEAST_EXPECT(ammAlice.expectBalances( - XRP(13'000), - STAmount{USD, UINT64_C(13'112'92609877023), -11}, - ammTokens)); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(ammAlice.expectBalances( + XRP(13'000), + STAmount{USD, UINT64_C(13'112'92609877023), -11}, + ammTokens)); + else + BEAST_EXPECT(ammAlice.expectBalances( + XRPAmount{13'000'000'003}, + STAmount{USD, UINT64_C(13'112'92609877024), -11}, + ammTokens)); env(pay(carol, bob, USD(100)), path(~USD), sendmax(XRP(110))); env.close(); // carol pays 100000 drops in fees // 99900668XRP swapped in for 100USD - BEAST_EXPECT(ammAlice.expectBalances( - XRPAmount{13'100'000'668}, - STAmount{USD, UINT64_C(13'012'92609877023), -11}, - ammTokens)); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(ammAlice.expectBalances( + XRPAmount{13'100'000'668}, + STAmount{USD, UINT64_C(13'012'92609877023), -11}, + ammTokens)); + else + BEAST_EXPECT(ammAlice.expectBalances( + XRPAmount{13'100'000'671}, + STAmount{USD, UINT64_C(13'012'92609877024), -11}, + ammTokens)); } // Payment with the trading fee env(pay(alice, carol, XRP(100)), path(~XRP), sendmax(USD(110))); @@ -3133,20 +3352,27 @@ private: // alice pays ~1.011USD in fees, which is ~10 times more // than carol's fee // 100.099431529USD swapped in for 100XRP - if (!features[fixAMMv1_1]) + if (!features[fixAMMv1_1] && !features[fixAMMv1_3]) { BEAST_EXPECT(ammAlice.expectBalances( XRPAmount{13'000'000'668}, STAmount{USD, UINT64_C(13'114'03663047264), -11}, ammTokens)); } - else + else if (features[fixAMMv1_1] && !features[fixAMMv1_3]) { BEAST_EXPECT(ammAlice.expectBalances( XRPAmount{13'000'000'668}, STAmount{USD, UINT64_C(13'114'03663047269), -11}, ammTokens)); } + else + { + BEAST_EXPECT(ammAlice.expectBalances( + XRPAmount{13'000'000'671}, + STAmount{USD, UINT64_C(13'114'03663044937), -11}, + ammTokens)); + } // Auction slot expired, no discounted fee env.close(seconds(TOTAL_TIME_SLOT_SECS + 1)); // clock is parent's based @@ -3155,7 +3381,7 @@ private: BEAST_EXPECT( env.balance(carol, USD) == STAmount(USD, UINT64_C(29'399'00572620545), -11)); - else + else if (!features[fixAMMv1_3]) BEAST_EXPECT( env.balance(carol, USD) == STAmount(USD, UINT64_C(29'399'00572620544), -11)); @@ -3167,7 +3393,7 @@ private: } // carol pays ~9.94USD in fees, which is ~10 times more in // trading fees vs discounted fee. - if (!features[fixAMMv1_1]) + if (!features[fixAMMv1_1] && !features[fixAMMv1_3]) { BEAST_EXPECT( env.balance(carol, USD) == @@ -3177,7 +3403,7 @@ private: STAmount{USD, UINT64_C(13'123'98038490681), -11}, ammTokens)); } - else + else if (features[fixAMMv1_1] && !features[fixAMMv1_3]) { BEAST_EXPECT( env.balance(carol, USD) == @@ -3187,25 +3413,42 @@ private: STAmount{USD, UINT64_C(13'123'98038490689), -11}, ammTokens)); } + else + { + BEAST_EXPECT( + env.balance(carol, USD) == + STAmount(USD, UINT64_C(29'389'06197177129), -11)); + BEAST_EXPECT(ammAlice.expectBalances( + XRPAmount{13'000'000'671}, + STAmount{USD, UINT64_C(13'123'98038488352), -11}, + ammTokens)); + } env(pay(carol, bob, USD(100)), path(~USD), sendmax(XRP(110))); env.close(); // carol pays ~1.008XRP in trading fee, which is // ~10 times more than the discounted fee. // 99.815876XRP is swapped in for 100USD - if (!features[fixAMMv1_1]) + if (!features[fixAMMv1_1] && !features[fixAMMv1_3]) { BEAST_EXPECT(ammAlice.expectBalances( XRPAmount(13'100'824'790), STAmount{USD, UINT64_C(13'023'98038490681), -11}, ammTokens)); } - else + else if (features[fixAMMv1_1] && !features[fixAMMv1_3]) { BEAST_EXPECT(ammAlice.expectBalances( XRPAmount(13'100'824'790), STAmount{USD, UINT64_C(13'023'98038490689), -11}, ammTokens)); } + else + { + BEAST_EXPECT(ammAlice.expectBalances( + XRPAmount(13'100'824'793), + STAmount{USD, UINT64_C(13'023'98038488352), -11}, + ammTokens)); + } }, std::nullopt, 1'000, @@ -4506,7 +4749,7 @@ private: testAMM([&](AMM& ammAlice, Env& env) { auto const baseFee = env.current()->fees().base.drops(); auto const token1 = ammAlice.lptIssue(); - auto priceXRP = withdrawByTokens( + auto priceXRP = ammAssetOut( STAmount{XRPAmount{10'000'000'000}}, STAmount{token1, 10'000'000}, STAmount{token1, 5'000'000}, @@ -4533,7 +4776,7 @@ private: BEAST_EXPECT( accountBalance(env, carol) == std::to_string(22500000000 - 4 * baseFee)); - priceXRP = withdrawByTokens( + priceXRP = ammAssetOut( STAmount{XRPAmount{10'000'000'000}}, STAmount{token1, 9'999'900}, STAmount{token1, 4'999'900}, @@ -4890,9 +5133,12 @@ private: carol, USD(100), std::nullopt, IOUAmount{520, 0}); // carol withdraws ~1,443.44USD auto const balanceAfterWithdraw = [&]() { - if (!features[fixAMMv1_1]) + if (!features[fixAMMv1_1] && !features[fixAMMv1_3]) return STAmount(USD, UINT64_C(30'443'43891402715), -11); - return STAmount(USD, UINT64_C(30'443'43891402714), -11); + else if (features[fixAMMv1_1] && !features[fixAMMv1_3]) + return STAmount(USD, UINT64_C(30'443'43891402714), -11); + else + return STAmount(USD, UINT64_C(30'443'43891402713), -11); }(); BEAST_EXPECT(env.balance(carol, USD) == balanceAfterWithdraw); // Set to original pool size @@ -4902,22 +5148,29 @@ private: ammAlice.vote(alice, 0); BEAST_EXPECT(ammAlice.expectTradingFee(0)); auto const tokensNoFee = ammAlice.withdraw(carol, deposit); - if (!features[fixAMMv1_1]) + if (!features[fixAMMv1_1] && !features[fixAMMv1_3]) BEAST_EXPECT( env.balance(carol, USD) == STAmount(USD, UINT64_C(30'443'43891402717), -11)); - else + else if (features[fixAMMv1_1] && !features[fixAMMv1_3]) BEAST_EXPECT( env.balance(carol, USD) == STAmount(USD, UINT64_C(30'443'43891402716), -11)); - // carol pays ~4008 LPTokens in fees or ~0.5% of the no-fee - // LPTokens - if (!features[fixAMMv1_1]) - BEAST_EXPECT( - tokensNoFee == IOUAmount(746'579'80779913, -8)); else + BEAST_EXPECT( + env.balance(carol, USD) == + STAmount(USD, UINT64_C(30'443'43891402713), -11)); + // carol pays ~4008 LPTokens in fees or ~0.5% of the no-fee + // LPTokens + if (!features[fixAMMv1_1] && !features[fixAMMv1_3]) + BEAST_EXPECT( + tokensNoFee == IOUAmount(746'579'80779913, -8)); + else if (features[fixAMMv1_1] && !features[fixAMMv1_3]) BEAST_EXPECT( tokensNoFee == IOUAmount(746'579'80779912, -8)); + else + BEAST_EXPECT( + tokensNoFee == IOUAmount(746'579'80779911, -8)); BEAST_EXPECT(tokensFee == IOUAmount(750'588'23529411, -8)); }, std::nullopt, @@ -5214,11 +5467,16 @@ private: // Due to round off some accounts have a tiny gain, while // other have a tiny loss. The last account to withdraw // gets everything in the pool. - if (!features[fixAMMv1_1]) + if (!features[fixAMMv1_1] && !features[fixAMMv1_3]) BEAST_EXPECT(ammAlice.expectBalances( XRP(10'000), STAmount{USD, UINT64_C(10'000'0000000013), -10}, IOUAmount{10'000'000})); + else if (features[fixAMMv1_3]) + BEAST_EXPECT(ammAlice.expectBalances( + XRP(10'000), + STAmount{USD, UINT64_C(10'000'0000000003), -10}, + IOUAmount{10'000'000})); else BEAST_EXPECT(ammAlice.expectBalances( XRP(10'000), USD(10'000), IOUAmount{10'000'000})); @@ -5226,25 +5484,29 @@ private: BEAST_EXPECT(expectLine(env, simon, USD(1'500'000))); BEAST_EXPECT(expectLine(env, chris, USD(1'500'000))); BEAST_EXPECT(expectLine(env, dan, USD(1'500'000))); - if (!features[fixAMMv1_1]) + if (!features[fixAMMv1_1] && !features[fixAMMv1_3]) BEAST_EXPECT(expectLine( env, carol, STAmount{USD, UINT64_C(30'000'00000000001), -11})); + else if (features[fixAMMv1_1] && !features[fixAMMv1_3]) + BEAST_EXPECT(expectLine(env, carol, USD(30'000))); else BEAST_EXPECT(expectLine(env, carol, USD(30'000))); BEAST_EXPECT(expectLine(env, ed, USD(1'500'000))); BEAST_EXPECT(expectLine(env, paul, USD(1'500'000))); - if (!features[fixAMMv1_1]) + if (!features[fixAMMv1_1] && !features[fixAMMv1_3]) BEAST_EXPECT(expectLine( env, nataly, STAmount{USD, UINT64_C(1'500'000'000000002), -9})); - else + else if (features[fixAMMv1_1] && !features[fixAMMv1_3]) BEAST_EXPECT(expectLine( env, nataly, STAmount{USD, UINT64_C(1'500'000'000000005), -9})); + else + BEAST_EXPECT(expectLine(env, nataly, USD(1'500'000))); ammAlice.withdrawAll(alice); BEAST_EXPECT(!ammAlice.ammExists()); if (!features[fixAMMv1_1]) @@ -5252,6 +5514,11 @@ private: env, alice, STAmount{USD, UINT64_C(30'000'0000000013), -10})); + else if (features[fixAMMv1_3]) + BEAST_EXPECT(expectLine( + env, + alice, + STAmount{USD, UINT64_C(30'000'0000000003), -10})); else BEAST_EXPECT(expectLine(env, alice, USD(30'000))); // alice XRP balance is 30,000initial - 50 ammcreate fee - @@ -5267,68 +5534,110 @@ private: {features}); // Same as above but deposit/withdraw in XRP - testAMM([&](AMM& ammAlice, Env& env) { - Account const bob("bob"); - Account const ed("ed"); - Account const paul("paul"); - Account const dan("dan"); - Account const chris("chris"); - Account const simon("simon"); - Account const ben("ben"); - Account const nataly("nataly"); - fund( - env, - gw, - {bob, ed, paul, dan, chris, simon, ben, nataly}, - XRP(2'000'000), - {}, - Fund::Acct); - for (int i = 0; i < 10; ++i) - { - ammAlice.deposit(ben, XRPAmount{1}); - ammAlice.withdrawAll(ben, XRP(0)); - ammAlice.deposit(simon, XRPAmount(1'000)); - ammAlice.withdrawAll(simon, XRP(0)); - ammAlice.deposit(chris, XRP(1)); - ammAlice.withdrawAll(chris, XRP(0)); - ammAlice.deposit(dan, XRP(10)); - ammAlice.withdrawAll(dan, XRP(0)); - ammAlice.deposit(bob, XRP(100)); - ammAlice.withdrawAll(bob, XRP(0)); - ammAlice.deposit(carol, XRP(1'000)); - ammAlice.withdrawAll(carol, XRP(0)); - ammAlice.deposit(ed, XRP(10'000)); - ammAlice.withdrawAll(ed, XRP(0)); - ammAlice.deposit(paul, XRP(100'000)); - ammAlice.withdrawAll(paul, XRP(0)); - ammAlice.deposit(nataly, XRP(1'000'000)); - ammAlice.withdrawAll(nataly, XRP(0)); - } - // No round off with XRP in this test - BEAST_EXPECT(ammAlice.expectBalances( - XRP(10'000), USD(10'000), IOUAmount{10'000'000})); - ammAlice.withdrawAll(alice); - BEAST_EXPECT(!ammAlice.ammExists()); - // 20,000 initial - (deposit+withdraw) * 10 - auto const xrpBalance = (XRP(2'000'000) - txfee(env, 20)).getText(); - BEAST_EXPECT(accountBalance(env, ben) == xrpBalance); - BEAST_EXPECT(accountBalance(env, simon) == xrpBalance); - BEAST_EXPECT(accountBalance(env, chris) == xrpBalance); - BEAST_EXPECT(accountBalance(env, dan) == xrpBalance); + testAMM( + [&](AMM& ammAlice, Env& env) { + Account const bob("bob"); + Account const ed("ed"); + Account const paul("paul"); + Account const dan("dan"); + Account const chris("chris"); + Account const simon("simon"); + Account const ben("ben"); + Account const nataly("nataly"); + fund( + env, + gw, + {bob, ed, paul, dan, chris, simon, ben, nataly}, + XRP(2'000'000), + {}, + Fund::Acct); + for (int i = 0; i < 10; ++i) + { + ammAlice.deposit(ben, XRPAmount{1}); + ammAlice.withdrawAll(ben, XRP(0)); + ammAlice.deposit(simon, XRPAmount(1'000)); + ammAlice.withdrawAll(simon, XRP(0)); + ammAlice.deposit(chris, XRP(1)); + ammAlice.withdrawAll(chris, XRP(0)); + ammAlice.deposit(dan, XRP(10)); + ammAlice.withdrawAll(dan, XRP(0)); + ammAlice.deposit(bob, XRP(100)); + ammAlice.withdrawAll(bob, XRP(0)); + ammAlice.deposit(carol, XRP(1'000)); + ammAlice.withdrawAll(carol, XRP(0)); + ammAlice.deposit(ed, XRP(10'000)); + ammAlice.withdrawAll(ed, XRP(0)); + ammAlice.deposit(paul, XRP(100'000)); + ammAlice.withdrawAll(paul, XRP(0)); + ammAlice.deposit(nataly, XRP(1'000'000)); + ammAlice.withdrawAll(nataly, XRP(0)); + } + auto const baseFee = env.current()->fees().base.drops(); + if (!features[fixAMMv1_3]) + { + // No round off with XRP in this test + BEAST_EXPECT(ammAlice.expectBalances( + XRP(10'000), USD(10'000), IOUAmount{10'000'000})); + ammAlice.withdrawAll(alice); + BEAST_EXPECT(!ammAlice.ammExists()); + // 20,000 initial - (deposit+withdraw) * 10 + auto const xrpBalance = + (XRP(2'000'000) - txfee(env, 20)).getText(); + BEAST_EXPECT(accountBalance(env, ben) == xrpBalance); + BEAST_EXPECT(accountBalance(env, simon) == xrpBalance); + BEAST_EXPECT(accountBalance(env, chris) == xrpBalance); + BEAST_EXPECT(accountBalance(env, dan) == xrpBalance); - auto const baseFee = env.current()->fees().base.drops(); - // 30,000 initial - (deposit+withdraw) * 10 - BEAST_EXPECT( - accountBalance(env, carol) == - std::to_string(30000000000 - 20 * baseFee)); - BEAST_EXPECT(accountBalance(env, ed) == xrpBalance); - BEAST_EXPECT(accountBalance(env, paul) == xrpBalance); - BEAST_EXPECT(accountBalance(env, nataly) == xrpBalance); - // 30,000 initial - 50 ammcreate fee - 10drops withdraw fee - BEAST_EXPECT( - accountBalance(env, alice) == - std::to_string(29950000000 - baseFee)); - }); + // 30,000 initial - (deposit+withdraw) * 10 + BEAST_EXPECT( + accountBalance(env, carol) == + std::to_string(30'000'000'000 - 20 * baseFee)); + BEAST_EXPECT(accountBalance(env, ed) == xrpBalance); + BEAST_EXPECT(accountBalance(env, paul) == xrpBalance); + BEAST_EXPECT(accountBalance(env, nataly) == xrpBalance); + // 30,000 initial - 50 ammcreate fee - 10drops withdraw fee + BEAST_EXPECT( + accountBalance(env, alice) == + std::to_string(29'950'000'000 - baseFee)); + } + else + { + // post-amendment the rounding takes place to ensure + // AMM invariant + BEAST_EXPECT(ammAlice.expectBalances( + XRPAmount(10'000'000'080), + USD(10'000), + IOUAmount{10'000'000})); + ammAlice.withdrawAll(alice); + BEAST_EXPECT(!ammAlice.ammExists()); + auto const xrpBalance = + XRP(2'000'000) - txfee(env, 20) - drops(10); + auto const xrpBalanceText = xrpBalance.getText(); + BEAST_EXPECT(accountBalance(env, ben) == xrpBalanceText); + BEAST_EXPECT(accountBalance(env, simon) == xrpBalanceText); + BEAST_EXPECT(accountBalance(env, chris) == xrpBalanceText); + BEAST_EXPECT(accountBalance(env, dan) == xrpBalanceText); + BEAST_EXPECT( + accountBalance(env, carol) == + std::to_string(30'000'000'000 - 20 * baseFee - 10)); + BEAST_EXPECT( + accountBalance(env, ed) == + (xrpBalance + drops(2)).getText()); + BEAST_EXPECT( + accountBalance(env, paul) == + (xrpBalance + drops(3)).getText()); + BEAST_EXPECT( + accountBalance(env, nataly) == + (xrpBalance + drops(5)).getText()); + BEAST_EXPECT( + accountBalance(env, alice) == + std::to_string(29'950'000'000 - baseFee + 80)); + } + }, + std::nullopt, + 0, + std::nullopt, + {features}); } void @@ -6370,11 +6679,11 @@ private: } void - testFixOverflowOffer(FeatureBitset features) + testFixOverflowOffer(FeatureBitset featuresInitial) { using namespace jtx; using namespace std::chrono; - FeatureBitset const all{features}; + FeatureBitset const all{featuresInitial}; std::string logs; @@ -6401,6 +6710,7 @@ private: STAmount const goodUsdBIT; STAmount const goodUsdBITr; IOUAmount const lpTokenBalance; + std::optional const lpTokenBalanceAlt = {}; double const offer1BtcGH = 0.1; double const offer2BtcGH = 0.1; double const offer2UsdGH = 1; @@ -6426,6 +6736,7 @@ private: .goodUsdBIT{usdBIT, uint64_t(8'464739069120721), -15}, // .goodUsdBITr{usdBIT, uint64_t(8'464739069098152), -15}, // .lpTokenBalance = {28'61817604250837, -14}, // + .lpTokenBalanceAlt = IOUAmount{28'61817604250836, -14}, // .offer1BtcGH = 0.1, // .offer2BtcGH = 0.1, // .offer2UsdGH = 1, // @@ -6604,7 +6915,7 @@ private: { testcase(input.testCase); for (auto const& features : - {all - fixAMMOverflowOffer, all | fixAMMOverflowOffer}) + {all - fixAMMOverflowOffer - fixAMMv1_1 - fixAMMv1_3, all}) { Env env(*this, features, std::make_unique(&logs)); @@ -6658,15 +6969,19 @@ private: features[fixAMMv1_1] ? input.goodUsdGHr : input.goodUsdGH; auto const goodUsdBIT = features[fixAMMv1_1] ? input.goodUsdBITr : input.goodUsdBIT; + auto const lpTokenBalance = + env.enabled(fixAMMv1_3) && input.lpTokenBalanceAlt + ? *input.lpTokenBalanceAlt + : input.lpTokenBalance; if (!features[fixAMMOverflowOffer]) { BEAST_EXPECT(amm.expectBalances( - failUsdGH, failUsdBIT, input.lpTokenBalance)); + failUsdGH, failUsdBIT, lpTokenBalance)); } else { BEAST_EXPECT(amm.expectBalances( - goodUsdGH, goodUsdBIT, input.lpTokenBalance)); + goodUsdGH, goodUsdBIT, lpTokenBalance)); // Invariant: LPToken balance must not change in a // payment or a swap transaction @@ -6862,11 +7177,13 @@ private: void testLPTokenBalance(FeatureBitset features) { + testcase("LPToken Balance"); using namespace jtx; // Last Liquidity Provider is the issuer of one token { - Env env(*this, features); + std::string logs; + Env env(*this, features, std::make_unique(&logs)); fund( env, gw, @@ -6877,7 +7194,9 @@ private: amm.deposit(alice, IOUAmount{1'876123487565916, -15}); amm.deposit(carol, IOUAmount{1'000'000}); amm.withdrawAll(alice); + BEAST_EXPECT(amm.expectLPTokens(alice, IOUAmount{0})); amm.withdrawAll(carol); + BEAST_EXPECT(amm.expectLPTokens(carol, IOUAmount{0})); auto const lpToken = getAccountLines( env, gw, amm.lptIssue())[jss::lines][0u][jss::balance]; auto const lpTokenBalance = @@ -7162,7 +7481,8 @@ private: auto const testCase = [&](std::string suffix, FeatureBitset features) { testcase("Failed pseudo-account allocation " + suffix); - Env env{*this, features}; + std::string logs; + Env env{*this, features, std::make_unique(&logs)}; env.fund(XRP(30'000), gw, alice); env.close(); env(trust(alice, gw["USD"](30'000), 0)); @@ -7199,6 +7519,378 @@ private: supported_amendments() | featureSingleAssetVault); } + void + testDepositAndWithdrawRounding(FeatureBitset features) + { + testcase("Deposit and Withdraw Rounding V2"); + using namespace jtx; + + auto const XPM = gw["XPM"]; + STAmount xrpBalance{XRPAmount(692'614'492'126)}; + STAmount xpmBalance{XPM, UINT64_C(18'610'359'80246901), -8}; + STAmount amount{XPM, UINT64_C(6'566'496939465400), -12}; + std::uint16_t tfee = 941; + + auto test = [&](auto&& cb, std::uint16_t tfee_) { + Env env(*this, features); + env.fund(XRP(1'000'000), gw); + env.fund(XRP(1'000), alice); + env(trust(alice, XPM(7'000))); + env(pay(gw, alice, amount)); + + AMM amm(env, gw, xrpBalance, xpmBalance, CreateArg{.tfee = tfee_}); + // AMM LPToken balance required to replicate single deposit failure + STAmount lptAMMBalance{ + amm.lptIssue(), UINT64_C(3'234'987'266'485968), -6}; + auto const burn = + IOUAmount{amm.getLPTokensBalance() - lptAMMBalance}; + // burn tokens to get to the required AMM state + env(amm.bid(BidArg{.account = gw, .bidMin = burn, .bidMax = burn})); + cb(amm, env); + }; + test( + [&](AMM& amm, Env& env) { + auto const err = env.enabled(fixAMMv1_3) ? ter(tesSUCCESS) + : ter(tecUNFUNDED_AMM); + amm.deposit(DepositArg{ + .account = alice, .asset1In = amount, .err = err}); + }, + tfee); + test( + [&](AMM& amm, Env& env) { + auto const [amount, amount2, lptAMM] = amm.balances(XRP, XPM); + auto const withdraw = STAmount{XPM, 1, -5}; + amm.withdraw(WithdrawArg{.asset1Out = STAmount{XPM, 1, -5}}); + auto const [amount_, amount2_, lptAMM_] = + amm.balances(XRP, XPM); + if (!env.enabled(fixAMMv1_3)) + BEAST_EXPECT((amount2 - amount2_) > withdraw); + else + BEAST_EXPECT((amount2 - amount2_) <= withdraw); + }, + 0); + } + + void + invariant( + jtx::AMM& amm, + jtx::Env& env, + std::string const& msg, + bool shouldFail) + { + auto const [amount, amount2, lptBalance] = amm.balances(GBP, EUR); + + NumberRoundModeGuard g( + env.enabled(fixAMMv1_3) ? Number::upward : Number::getround()); + auto const res = root2(amount * amount2); + + if (shouldFail) + BEAST_EXPECT(res < lptBalance); + else + BEAST_EXPECT(res >= lptBalance); + } + + void + testDepositRounding(FeatureBitset all) + { + testcase("Deposit Rounding"); + using namespace jtx; + + // Single asset deposit + for (auto const& deposit : + {STAmount(EUR, 1, 1), + STAmount(EUR, 1, 2), + STAmount(EUR, 1, 5), + STAmount(EUR, 1, -3), // fail + STAmount(EUR, 1, -6), + STAmount(EUR, 1, -9)}) + { + testAMM( + [&](AMM& ammAlice, Env& env) { + fund( + env, + gw, + {bob}, + XRP(10'000'000), + {GBP(100'000), EUR(100'000)}, + Fund::Acct); + env.close(); + + ammAlice.deposit( + DepositArg{.account = bob, .asset1In = deposit}); + invariant( + ammAlice, + env, + "dep1", + deposit == STAmount{EUR, 1, -3} && + !env.enabled(fixAMMv1_3)); + }, + {{GBP(30'000), EUR(30'000)}}, + 0, + std::nullopt, + {all}); + } + + // Two-asset proportional deposit (1:1 pool ratio) + testAMM( + [&](AMM& ammAlice, Env& env) { + fund( + env, + gw, + {bob}, + XRP(10'000'000), + {GBP(100'000), EUR(100'000)}, + Fund::Acct); + env.close(); + + STAmount const depositEuro{ + EUR, UINT64_C(10'1234567890123456), -16}; + STAmount const depositGBP{ + GBP, UINT64_C(10'1234567890123456), -16}; + + ammAlice.deposit(DepositArg{ + .account = bob, + .asset1In = depositEuro, + .asset2In = depositGBP}); + invariant(ammAlice, env, "dep2", false); + }, + {{GBP(30'000), EUR(30'000)}}, + 0, + std::nullopt, + {all}); + + // Two-asset proportional deposit (1:3 pool ratio) + for (auto const& exponent : {1, 2, 3, 4, -3 /*fail*/, -6, -9}) + { + testAMM( + [&](AMM& ammAlice, Env& env) { + fund( + env, + gw, + {bob}, + XRP(10'000'000), + {GBP(100'000), EUR(100'000)}, + Fund::Acct); + env.close(); + + STAmount const depositEuro{EUR, 1, exponent}; + STAmount const depositGBP{GBP, 1, exponent}; + + ammAlice.deposit(DepositArg{ + .account = bob, + .asset1In = depositEuro, + .asset2In = depositGBP}); + invariant( + ammAlice, + env, + "dep3", + exponent != -3 && !env.enabled(fixAMMv1_3)); + }, + {{GBP(10'000), EUR(30'000)}}, + 0, + std::nullopt, + {all}); + } + + // tfLPToken deposit + testAMM( + [&](AMM& ammAlice, Env& env) { + fund( + env, + gw, + {bob}, + XRP(10'000'000), + {GBP(100'000), EUR(100'000)}, + Fund::Acct); + env.close(); + + ammAlice.deposit(DepositArg{ + .account = bob, + .tokens = IOUAmount{10'1234567890123456, -16}}); + invariant(ammAlice, env, "dep4", false); + }, + {{GBP(7'000), EUR(30'000)}}, + 0, + std::nullopt, + {all}); + + // tfOneAssetLPToken deposit + for (auto const& tokens : + {IOUAmount{1, -3}, + IOUAmount{1, -2}, + IOUAmount{1, -1}, + IOUAmount{1}, + IOUAmount{10}, + IOUAmount{100}, + IOUAmount{1'000}, + IOUAmount{10'000}}) + { + testAMM( + [&](AMM& ammAlice, Env& env) { + fund( + env, + gw, + {bob}, + XRP(10'000'000), + {GBP(100'000), EUR(1'000'000)}, + Fund::Acct); + env.close(); + + ammAlice.deposit(DepositArg{ + .account = bob, + .tokens = tokens, + .asset1In = STAmount{EUR, 1, 6}}); + invariant(ammAlice, env, "dep5", false); + }, + {{GBP(7'000), EUR(30'000)}}, + 0, + std::nullopt, + {all}); + } + + // Single deposit with EP not exceeding specified: + // 1'000 GBP with EP not to exceed 5 (GBP/TokensOut) + testAMM( + [&](AMM& ammAlice, Env& env) { + fund( + env, + gw, + {bob}, + XRP(10'000'000), + {GBP(100'000), EUR(100'000)}, + Fund::Acct); + env.close(); + + ammAlice.deposit( + bob, GBP(1'000), std::nullopt, STAmount{GBP, 5}); + invariant(ammAlice, env, "dep6", false); + }, + {{GBP(30'000), EUR(30'000)}}, + 0, + std::nullopt, + {all}); + } + + void + testWithdrawRounding(FeatureBitset all) + { + testcase("Withdraw Rounding"); + + using namespace jtx; + + // tfLPToken mode + testAMM( + [&](AMM& ammAlice, Env& env) { + ammAlice.withdraw(alice, 1'000); + invariant(ammAlice, env, "with1", false); + }, + {{GBP(7'000), EUR(30'000)}}, + 0, + std::nullopt, + {all}); + + // tfWithdrawAll mode + testAMM( + [&](AMM& ammAlice, Env& env) { + ammAlice.withdraw( + WithdrawArg{.account = alice, .flags = tfWithdrawAll}); + invariant(ammAlice, env, "with2", false); + }, + {{GBP(7'000), EUR(30'000)}}, + 0, + std::nullopt, + {all}); + + // tfTwoAsset withdraw mode + testAMM( + [&](AMM& ammAlice, Env& env) { + ammAlice.withdraw(WithdrawArg{ + .account = alice, + .asset1Out = STAmount{GBP, 3'500}, + .asset2Out = STAmount{EUR, 15'000}, + .flags = tfTwoAsset}); + invariant(ammAlice, env, "with3", false); + }, + {{GBP(7'000), EUR(30'000)}}, + 0, + std::nullopt, + {all}); + + // tfSingleAsset withdraw mode + // Note: This test fails with 0 trading fees, but doesn't fail if + // trading fees is set to 1'000 -- I suspect the compound operations + // in AMMHelpers.cpp:withdrawByTokens compensate for the rounding + // errors + testAMM( + [&](AMM& ammAlice, Env& env) { + ammAlice.withdraw(WithdrawArg{ + .account = alice, + .asset1Out = STAmount{GBP, 1'234}, + .flags = tfSingleAsset}); + invariant(ammAlice, env, "with4", false); + }, + {{GBP(7'000), EUR(30'000)}}, + 0, + std::nullopt, + {all}); + + // tfOneAssetWithdrawAll mode + testAMM( + [&](AMM& ammAlice, Env& env) { + fund( + env, + gw, + {bob}, + XRP(10'000'000), + {GBP(100'000), EUR(100'000)}, + Fund::Acct); + env.close(); + + ammAlice.deposit(DepositArg{ + .account = bob, .asset1In = STAmount{GBP, 3'456}}); + + ammAlice.withdraw(WithdrawArg{ + .account = bob, + .asset1Out = STAmount{GBP, 1'000}, + .flags = tfOneAssetWithdrawAll}); + invariant(ammAlice, env, "with5", false); + }, + {{GBP(7'000), EUR(30'000)}}, + 0, + std::nullopt, + {all}); + + // tfOneAssetLPToken mode + testAMM( + [&](AMM& ammAlice, Env& env) { + ammAlice.withdraw(WithdrawArg{ + .account = alice, + .tokens = 1'000, + .asset1Out = STAmount{GBP, 100}, + .flags = tfOneAssetLPToken}); + invariant(ammAlice, env, "with6", false); + }, + {{GBP(7'000), EUR(30'000)}}, + 0, + std::nullopt, + {all}); + + // tfLimitLPToken mode + testAMM( + [&](AMM& ammAlice, Env& env) { + ammAlice.withdraw(WithdrawArg{ + .account = alice, + .asset1Out = STAmount{GBP, 100}, + .maxEP = IOUAmount{2}, + .flags = tfLimitLPToken}); + invariant(ammAlice, env, "with7", true); + }, + {{GBP(7'000), EUR(30'000)}}, + 0, + std::nullopt, + {all}); + } + void run() override { @@ -7214,46 +7906,60 @@ private: testFeeVote(); testInvalidBid(); testBid(all); - testBid(all - fixAMMv1_1); + testBid(all - fixAMMv1_3); + testBid(all - fixAMMv1_1 - fixAMMv1_3); testInvalidAMMPayment(); testBasicPaymentEngine(all); - testBasicPaymentEngine(all - fixAMMv1_1); + testBasicPaymentEngine(all - fixAMMv1_1 - fixAMMv1_3); testBasicPaymentEngine(all - fixReducedOffersV2); - testBasicPaymentEngine(all - fixAMMv1_1 - fixReducedOffersV2); + testBasicPaymentEngine( + all - fixAMMv1_1 - fixAMMv1_3 - fixReducedOffersV2); testAMMTokens(); testAmendment(); testFlags(); testRippling(); testAMMAndCLOB(all); - testAMMAndCLOB(all - fixAMMv1_1); + testAMMAndCLOB(all - fixAMMv1_1 - fixAMMv1_3); testTradingFee(all); - testTradingFee(all - fixAMMv1_1); + testTradingFee(all - fixAMMv1_3); + testTradingFee(all - fixAMMv1_1 - fixAMMv1_3); testAdjustedTokens(all); - testAdjustedTokens(all - fixAMMv1_1); + testAdjustedTokens(all - fixAMMv1_3); + testAdjustedTokens(all - fixAMMv1_1 - fixAMMv1_3); testAutoDelete(); testClawback(); testAMMID(); testSelection(all); - testSelection(all - fixAMMv1_1); + testSelection(all - fixAMMv1_1 - fixAMMv1_3); testFixDefaultInnerObj(); testMalformed(); testFixOverflowOffer(all); - testFixOverflowOffer(all - fixAMMv1_1); + testFixOverflowOffer(all - fixAMMv1_3); + testFixOverflowOffer(all - fixAMMv1_1 - fixAMMv1_3); testSwapRounding(); testFixChangeSpotPriceQuality(all); - testFixChangeSpotPriceQuality(all - fixAMMv1_1); + testFixChangeSpotPriceQuality(all - fixAMMv1_1 - fixAMMv1_3); testFixAMMOfferBlockedByLOB(all); - testFixAMMOfferBlockedByLOB(all - fixAMMv1_1); + testFixAMMOfferBlockedByLOB(all - fixAMMv1_1 - fixAMMv1_3); testLPTokenBalance(all); - testLPTokenBalance(all - fixAMMv1_1); + testLPTokenBalance(all - fixAMMv1_3); + testLPTokenBalance(all - fixAMMv1_1 - fixAMMv1_3); testAMMClawback(all); testAMMClawback(all - featureAMMClawback); - testAMMClawback(all - fixAMMv1_1 - featureAMMClawback); + testAMMClawback(all - fixAMMv1_1 - fixAMMv1_3 - featureAMMClawback); testAMMDepositWithFrozenAssets(all); testAMMDepositWithFrozenAssets(all - featureAMMClawback); testAMMDepositWithFrozenAssets(all - fixAMMv1_1 - featureAMMClawback); + testAMMDepositWithFrozenAssets( + all - fixAMMv1_1 - fixAMMv1_3 - featureAMMClawback); testFixReserveCheckOnWithdrawal(all); testFixReserveCheckOnWithdrawal(all - fixAMMv1_2); + testDepositAndWithdrawRounding(all); + testDepositAndWithdrawRounding(all - fixAMMv1_3); + testDepositRounding(all); + testDepositRounding(all - fixAMMv1_3); + testWithdrawRounding(all); + testWithdrawRounding(all - fixAMMv1_3); testFailedPseudoAccount(); } }; diff --git a/src/test/jtx/AMM.h b/src/test/jtx/AMM.h index 71e2e5f34c..07e60369fe 100644 --- a/src/test/jtx/AMM.h +++ b/src/test/jtx/AMM.h @@ -127,7 +127,6 @@ class AMM STAmount const asset1_; STAmount const asset2_; uint256 const ammID_; - IOUAmount const initialLPTokens_; bool log_; bool doClose_; // Predict next purchase price @@ -140,6 +139,7 @@ class AMM std::uint32_t const fee_; AccountID const ammAccount_; Issue const lptIssue_; + IOUAmount const initialLPTokens_; public: AMM(Env& env, @@ -196,6 +196,12 @@ public: Issue const& issue2, std::optional const& account = std::nullopt) const; + std::tuple + balances(std::optional const& account = std::nullopt) const + { + return balances(asset1_.get(), asset2_.get(), account); + } + [[nodiscard]] bool expectLPTokens(AccountID const& account, IOUAmount const& tokens) const; @@ -430,6 +436,9 @@ private: [[nodiscard]] bool expectAuctionSlot(auto&& cb) const; + + IOUAmount + initialTokens(); }; namespace amm { diff --git a/src/test/jtx/AMMTest.h b/src/test/jtx/AMMTest.h index 5ff2d21a19..28b9affa8f 100644 --- a/src/test/jtx/AMMTest.h +++ b/src/test/jtx/AMMTest.h @@ -35,6 +35,15 @@ class AMM; enum class Fund { All, Acct, Gw, IOUOnly }; +struct TestAMMArg +{ + std::optional> pool = std::nullopt; + std::uint16_t tfee = 0; + std::optional ter = std::nullopt; + std::vector features = {supported_amendments()}; + bool noLog = false; +}; + void fund( jtx::Env& env, @@ -87,6 +96,11 @@ protected: std::uint16_t tfee = 0, std::optional const& ter = std::nullopt, std::vector const& features = {supported_amendments()}); + + void + testAMM( + std::function&& cb, + TestAMMArg const& arg); }; class AMMTest : public jtx::AMMTestBase diff --git a/src/test/jtx/Env.h b/src/test/jtx/Env.h index 2b5397b903..042bda39a6 100644 --- a/src/test/jtx/Env.h +++ b/src/test/jtx/Env.h @@ -622,6 +622,12 @@ public: void disableFeature(uint256 const feature); + bool + enabled(uint256 feature) const + { + return current()->rules().enabled(feature); + } + private: void fund(bool setDefaultRipple, STAmount const& amount, Account const& account); diff --git a/src/test/jtx/impl/AMM.cpp b/src/test/jtx/impl/AMM.cpp index 6345253584..ca96401bc4 100644 --- a/src/test/jtx/impl/AMM.cpp +++ b/src/test/jtx/impl/AMM.cpp @@ -20,6 +20,7 @@ #include #include +#include #include #include @@ -39,12 +40,16 @@ number(STAmount const& a) return a; } -static IOUAmount -initialTokens(STAmount const& asset1, STAmount const& asset2) +IOUAmount +AMM::initialTokens() { - auto const product = number(asset1) * number(asset2); - return (IOUAmount)(product.mantissa() >= 0 ? root2(product) - : root2(-product)); + if (!env_.enabled(fixAMMv1_3)) + { + auto const product = number(asset1_) * number(asset2_); + return (IOUAmount)(product.mantissa() >= 0 ? root2(product) + : root2(-product)); + } + return getLPTokensBalance(); } AMM::AMM( @@ -65,7 +70,6 @@ AMM::AMM( , asset1_(asset1) , asset2_(asset2) , ammID_(keylet::amm(asset1_.issue(), asset2_.issue()).key) - , initialLPTokens_(initialTokens(asset1, asset2)) , log_(log) , doClose_(close) , lastPurchasePrice_(0) @@ -78,6 +82,7 @@ AMM::AMM( asset1_.issue().currency, asset2_.issue().currency, ammAccount_)) + , initialLPTokens_(initialTokens()) { } diff --git a/src/test/jtx/impl/AMMTest.cpp b/src/test/jtx/impl/AMMTest.cpp index 8555be01a9..5bb8f14cbf 100644 --- a/src/test/jtx/impl/AMMTest.cpp +++ b/src/test/jtx/impl/AMMTest.cpp @@ -19,6 +19,7 @@ #include #include +#include #include #include @@ -105,15 +106,31 @@ AMMTestBase::testAMM( std::uint16_t tfee, std::optional const& ter, std::vector const& vfeatures) +{ + testAMM( + std::move(cb), + TestAMMArg{ + .pool = pool, .tfee = tfee, .ter = ter, .features = vfeatures}); +} + +void +AMMTestBase::testAMM( + std::function&& cb, + TestAMMArg const& arg) { using namespace jtx; - for (auto const& features : vfeatures) + std::string logs; + + for (auto const& features : arg.features) { - Env env{*this, features}; + Env env{ + *this, + features, + arg.noLog ? std::make_unique(&logs) : nullptr}; auto const [asset1, asset2] = - pool ? *pool : std::make_pair(XRP(10000), USD(10000)); + arg.pool ? *arg.pool : std::make_pair(XRP(10000), USD(10000)); auto tofund = [&](STAmount const& a) -> STAmount { if (a.native()) { @@ -143,7 +160,7 @@ AMMTestBase::testAMM( alice, asset1, asset2, - CreateArg{.log = false, .tfee = tfee, .err = ter}); + CreateArg{.log = false, .tfee = arg.tfee, .err = arg.ter}); if (BEAST_EXPECT( ammAlice.expectBalances(asset1, asset2, ammAlice.tokens()))) cb(ammAlice, env); diff --git a/src/test/rpc/AMMInfo_test.cpp b/src/test/rpc/AMMInfo_test.cpp index a0985ea104..1c54580aa7 100644 --- a/src/test/rpc/AMMInfo_test.cpp +++ b/src/test/rpc/AMMInfo_test.cpp @@ -203,98 +203,119 @@ public: } void - testVoteAndBid() + testVoteAndBid(FeatureBitset features) { testcase("Vote and Bid"); using namespace jtx; - testAMM([&](AMM& ammAlice, Env& env) { - BEAST_EXPECT(ammAlice.expectAmmRpcInfo( - XRP(10000), USD(10000), IOUAmount{10000000, 0})); - std::unordered_map votes; - votes.insert({alice.human(), 0}); - for (int i = 0; i < 7; ++i) - { - Account a(std::to_string(i)); - votes.insert({a.human(), 50 * (i + 1)}); - fund(env, gw, {a}, {USD(10000)}, Fund::Acct); - ammAlice.deposit(a, 10000000); - ammAlice.vote(a, 50 * (i + 1)); - } - BEAST_EXPECT(ammAlice.expectTradingFee(175)); - Account ed("ed"); - Account bill("bill"); - env.fund(XRP(1000), bob, ed, bill); - env(ammAlice.bid( - {.bidMin = 100, .authAccounts = {carol, bob, ed, bill}})); - BEAST_EXPECT(ammAlice.expectAmmRpcInfo( - XRP(80000), - USD(80000), - IOUAmount{79994400}, - std::nullopt, - std::nullopt, - ammAlice.ammAccount())); - for (auto i = 0; i < 2; ++i) - { - std::unordered_set authAccounts = { - carol.human(), bob.human(), ed.human(), bill.human()}; - auto const ammInfo = i ? ammAlice.ammRpcInfo() - : ammAlice.ammRpcInfo( - std::nullopt, - std::nullopt, - std::nullopt, - std::nullopt, - ammAlice.ammAccount()); - auto const& amm = ammInfo[jss::amm]; - try + testAMM( + [&](AMM& ammAlice, Env& env) { + BEAST_EXPECT(ammAlice.expectAmmRpcInfo( + XRP(10000), USD(10000), IOUAmount{10000000, 0})); + std::unordered_map votes; + votes.insert({alice.human(), 0}); + for (int i = 0; i < 7; ++i) { - // votes - auto const voteSlots = amm[jss::vote_slots]; - auto votesCopy = votes; - for (std::uint8_t i = 0; i < 8; ++i) + Account a(std::to_string(i)); + votes.insert({a.human(), 50 * (i + 1)}); + if (!features[fixAMMv1_3]) + fund(env, gw, {a}, {USD(10000)}, Fund::Acct); + else + fund(env, gw, {a}, {USD(10001)}, Fund::Acct); + ammAlice.deposit(a, 10000000); + ammAlice.vote(a, 50 * (i + 1)); + } + BEAST_EXPECT(ammAlice.expectTradingFee(175)); + Account ed("ed"); + Account bill("bill"); + env.fund(XRP(1000), bob, ed, bill); + env(ammAlice.bid( + {.bidMin = 100, .authAccounts = {carol, bob, ed, bill}})); + if (!features[fixAMMv1_3]) + BEAST_EXPECT(ammAlice.expectAmmRpcInfo( + XRP(80000), + USD(80000), + IOUAmount{79994400}, + std::nullopt, + std::nullopt, + ammAlice.ammAccount())); + else + BEAST_EXPECT(ammAlice.expectAmmRpcInfo( + XRPAmount(80000000005), + STAmount{USD, UINT64_C(80'000'00000000005), -11}, + IOUAmount{79994400}, + std::nullopt, + std::nullopt, + ammAlice.ammAccount())); + for (auto i = 0; i < 2; ++i) + { + std::unordered_set authAccounts = { + carol.human(), bob.human(), ed.human(), bill.human()}; + auto const ammInfo = i ? ammAlice.ammRpcInfo() + : ammAlice.ammRpcInfo( + std::nullopt, + std::nullopt, + std::nullopt, + std::nullopt, + ammAlice.ammAccount()); + auto const& amm = ammInfo[jss::amm]; + try { - if (!BEAST_EXPECT( - votes[voteSlots[i][jss::account].asString()] == - voteSlots[i][jss::trading_fee].asUInt() && - voteSlots[i][jss::vote_weight].asUInt() == - 12500)) + // votes + auto const voteSlots = amm[jss::vote_slots]; + auto votesCopy = votes; + for (std::uint8_t i = 0; i < 8; ++i) + { + if (!BEAST_EXPECT( + votes[voteSlots[i][jss::account] + .asString()] == + voteSlots[i][jss::trading_fee] + .asUInt() && + voteSlots[i][jss::vote_weight].asUInt() == + 12500)) + return; + votes.erase(voteSlots[i][jss::account].asString()); + } + if (!BEAST_EXPECT(votes.empty())) return; - votes.erase(voteSlots[i][jss::account].asString()); - } - if (!BEAST_EXPECT(votes.empty())) - return; - votes = votesCopy; + votes = votesCopy; - // bid - auto const auctionSlot = amm[jss::auction_slot]; - for (std::uint8_t i = 0; i < 4; ++i) - { - if (!BEAST_EXPECT(authAccounts.contains( + // bid + auto const auctionSlot = amm[jss::auction_slot]; + for (std::uint8_t i = 0; i < 4; ++i) + { + if (!BEAST_EXPECT(authAccounts.contains( + auctionSlot[jss::auth_accounts][i] + [jss::account] + .asString()))) + return; + authAccounts.erase( auctionSlot[jss::auth_accounts][i][jss::account] - .asString()))) + .asString()); + } + if (!BEAST_EXPECT(authAccounts.empty())) return; - authAccounts.erase( - auctionSlot[jss::auth_accounts][i][jss::account] - .asString()); + BEAST_EXPECT( + auctionSlot[jss::account].asString() == + alice.human() && + auctionSlot[jss::discounted_fee].asUInt() == 17 && + auctionSlot[jss::price][jss::value].asString() == + "5600" && + auctionSlot[jss::price][jss::currency].asString() == + to_string(ammAlice.lptIssue().currency) && + auctionSlot[jss::price][jss::issuer].asString() == + to_string(ammAlice.lptIssue().account)); + } + catch (std::exception const& e) + { + fail(e.what(), __FILE__, __LINE__); } - if (!BEAST_EXPECT(authAccounts.empty())) - return; - BEAST_EXPECT( - auctionSlot[jss::account].asString() == alice.human() && - auctionSlot[jss::discounted_fee].asUInt() == 17 && - auctionSlot[jss::price][jss::value].asString() == - "5600" && - auctionSlot[jss::price][jss::currency].asString() == - to_string(ammAlice.lptIssue().currency) && - auctionSlot[jss::price][jss::issuer].asString() == - to_string(ammAlice.lptIssue().account)); } - catch (std::exception const& e) - { - fail(e.what(), __FILE__, __LINE__); - } - } - }); + }, + std::nullopt, + 0, + std::nullopt, + {features}); } void @@ -337,9 +358,12 @@ public: void run() override { + using namespace jtx; + auto const all = supported_amendments(); testErrors(); testSimpleRpc(); - testVoteAndBid(); + testVoteAndBid(all); + testVoteAndBid(all - fixAMMv1_3); testFreeze(); testInvalidAmmField(); } diff --git a/src/xrpld/app/misc/AMMHelpers.h b/src/xrpld/app/misc/AMMHelpers.h index 97554b7e15..8cc39468b1 100644 --- a/src/xrpld/app/misc/AMMHelpers.h +++ b/src/xrpld/app/misc/AMMHelpers.h @@ -48,6 +48,8 @@ reduceOffer(auto const& amount) } // namespace detail +enum class IsDeposit : bool { No = false, Yes = true }; + /** Calculate LP Tokens given AMM pool reserves. * @param asset1 AMM one side of the pool reserve * @param asset2 AMM another side of the pool reserve @@ -67,7 +69,7 @@ ammLPTokens( * @return tokens */ STAmount -lpTokensIn( +lpTokensOut( STAmount const& asset1Balance, STAmount const& asset1Deposit, STAmount const& lptAMMBalance, @@ -96,7 +98,7 @@ ammAssetIn( * @return tokens out amount */ STAmount -lpTokensOut( +lpTokensIn( STAmount const& asset1Balance, STAmount const& asset1Withdraw, STAmount const& lptAMMBalance, @@ -110,7 +112,7 @@ lpTokensOut( * @return calculated asset amount */ STAmount -withdrawByTokens( +ammAssetOut( STAmount const& assetBalance, STAmount const& lptAMMBalance, STAmount const& lpTokens, @@ -608,13 +610,13 @@ square(Number const& n); * withdraw to cancel out the precision loss. * @param lptAMMBalance LPT AMM Balance * @param lpTokens LP tokens to deposit or withdraw - * @param isDeposit true if deposit, false if withdraw + * @param isDeposit Yes if deposit, No if withdraw */ STAmount adjustLPTokens( STAmount const& lptAMMBalance, STAmount const& lpTokens, - bool isDeposit); + IsDeposit isDeposit); /** Calls adjustLPTokens() and adjusts deposit or withdraw amounts if * the adjusted LP tokens are less than the provided LP tokens. @@ -624,7 +626,7 @@ adjustLPTokens( * @param lptAMMBalance LPT AMM Balance * @param lpTokens LP tokens to deposit or withdraw * @param tfee trading fee in basis points - * @param isDeposit true if deposit, false if withdraw + * @param isDeposit Yes if deposit, No if withdraw * @return */ std::tuple, STAmount> @@ -635,7 +637,7 @@ adjustAmountsByLPTokens( STAmount const& lptAMMBalance, STAmount const& lpTokens, std::uint16_t tfee, - bool isDeposit); + IsDeposit isDeposit); /** Positive solution for quadratic equation: * x = (-b + sqrt(b**2 + 4*a*c))/(2*a) @@ -643,6 +645,141 @@ adjustAmountsByLPTokens( Number solveQuadraticEq(Number const& a, Number const& b, Number const& c); +STAmount +multiply(STAmount const& amount, Number const& frac, Number::rounding_mode rm); + +namespace detail { + +inline Number::rounding_mode +getLPTokenRounding(IsDeposit isDeposit) +{ + // Minimize on deposit, maximize on withdraw to ensure + // AMM invariant sqrt(poolAsset1 * poolAsset2) >= LPTokensBalance + return isDeposit == IsDeposit::Yes ? Number::downward : Number::upward; +} + +inline Number::rounding_mode +getAssetRounding(IsDeposit isDeposit) +{ + // Maximize on deposit, minimize on withdraw to ensure + // AMM invariant sqrt(poolAsset1 * poolAsset2) >= LPTokensBalance + return isDeposit == IsDeposit::Yes ? Number::upward : Number::downward; +} + +} // namespace detail + +/** Round AMM equal deposit/withdrawal amount. Deposit/withdrawal formulas + * calculate the amount as a fractional value of the pool balance. The rounding + * takes place on the last step of multiplying the balance by the fraction if + * AMMv1_3 is enabled. + */ +template +STAmount +getRoundedAsset( + Rules const& rules, + STAmount const& balance, + A const& frac, + IsDeposit isDeposit) +{ + if (!rules.enabled(fixAMMv1_3)) + { + if constexpr (std::is_same_v) + return multiply(balance, frac, balance.issue()); + else + return toSTAmount(balance.issue(), balance * frac); + } + auto const rm = detail::getAssetRounding(isDeposit); + return multiply(balance, frac, rm); +} + +/** Round AMM single deposit/withdrawal amount. + * The lambda's are used to delay evaluation until the function + * is executed so that the calculation is not done twice. noRoundCb() is + * called if AMMv1_3 is disabled. Otherwise, the rounding is set and + * the amount is: + * isDeposit is Yes - the balance multiplied by productCb() + * isDeposit is No - the result of productCb(). The rounding is + * the same for all calculations in productCb() + */ +STAmount +getRoundedAsset( + Rules const& rules, + std::function&& noRoundCb, + STAmount const& balance, + std::function&& productCb, + IsDeposit isDeposit); + +/** Round AMM deposit/withdrawal LPToken amount. Deposit/withdrawal formulas + * calculate the lptokens as a fractional value of the AMM total lptokens. + * The rounding takes place on the last step of multiplying the balance by + * the fraction if AMMv1_3 is enabled. The tokens are then + * adjusted to factor in the loss in precision (we only keep 16 significant + * digits) when adding the lptokens to the balance. + */ +STAmount +getRoundedLPTokens( + Rules const& rules, + STAmount const& balance, + Number const& frac, + IsDeposit isDeposit); + +/** Round AMM single deposit/withdrawal LPToken amount. + * The lambda's are used to delay evaluation until the function is executed + * so that the calculations are not done twice. + * noRoundCb() is called if AMMv1_3 is disabled. Otherwise, the rounding is set + * and the lptokens are: + * if isDeposit is Yes - the result of productCb(). The rounding is + * the same for all calculations in productCb() + * if isDeposit is No - the balance multiplied by productCb() + * The lptokens are then adjusted to factor in the loss in precision + * (we only keep 16 significant digits) when adding the lptokens to the balance. + */ +STAmount +getRoundedLPTokens( + Rules const& rules, + std::function&& noRoundCb, + STAmount const& lptAMMBalance, + std::function&& productCb, + IsDeposit isDeposit); + +/* Next two functions adjust asset in/out amount to factor in the adjusted + * lptokens. The lptokens are calculated from the asset in/out. The lptokens are + * then adjusted to factor in the loss in precision. The adjusted lptokens might + * be less than the initially calculated tokens. Therefore, the asset in/out + * must be adjusted. The rounding might result in the adjusted amount being + * greater than the original asset in/out amount. If this happens, + * then the original amount is reduced by the difference in the adjusted amount + * and the original amount. The actual tokens and the actual adjusted amount + * are then recalculated. The minimum of the original and the actual + * adjusted amount is returned. + */ +std::pair +adjustAssetInByTokens( + Rules const& rules, + STAmount const& balance, + STAmount const& amount, + STAmount const& lptAMMBalance, + STAmount const& tokens, + std::uint16_t tfee); +std::pair +adjustAssetOutByTokens( + Rules const& rules, + STAmount const& balance, + STAmount const& amount, + STAmount const& lptAMMBalance, + STAmount const& tokens, + std::uint16_t tfee); + +/** Find a fraction of tokens after the tokens are adjusted. The fraction + * is used to adjust equal deposit/withdraw amount. + */ +Number +adjustFracByTokens( + Rules const& rules, + STAmount const& lptAMMBalance, + STAmount const& tokens, + Number const& frac); + } // namespace ripple #endif // RIPPLE_APP_MISC_AMMHELPERS_H_INCLUDED diff --git a/src/xrpld/app/misc/detail/AMMHelpers.cpp b/src/xrpld/app/misc/detail/AMMHelpers.cpp index 8724c413a6..49ad01c3ae 100644 --- a/src/xrpld/app/misc/detail/AMMHelpers.cpp +++ b/src/xrpld/app/misc/detail/AMMHelpers.cpp @@ -27,6 +27,10 @@ ammLPTokens( STAmount const& asset2, Issue const& lptIssue) { + // AMM invariant: sqrt(asset1 * asset2) >= LPTokensBalance + auto const rounding = + isFeatureEnabled(fixAMMv1_3) ? Number::downward : Number::getround(); + NumberRoundModeGuard g(rounding); auto const tokens = root2(asset1 * asset2); return toSTAmount(lptIssue, tokens); } @@ -38,7 +42,7 @@ ammLPTokens( * where f1 = 1 - tfee, f2 = (1 - tfee/2)/f1 */ STAmount -lpTokensIn( +lpTokensOut( STAmount const& asset1Balance, STAmount const& asset1Deposit, STAmount const& lptAMMBalance, @@ -48,8 +52,17 @@ lpTokensIn( auto const f2 = feeMultHalf(tfee) / f1; Number const r = asset1Deposit / asset1Balance; auto const c = root2(f2 * f2 + r / f1) - f2; - auto const t = lptAMMBalance * (r - c) / (1 + c); - return toSTAmount(lptAMMBalance.issue(), t); + if (!isFeatureEnabled(fixAMMv1_3)) + { + auto const t = lptAMMBalance * (r - c) / (1 + c); + return toSTAmount(lptAMMBalance.issue(), t); + } + else + { + // minimize tokens out + auto const frac = (r - c) / (1 + c); + return multiply(lptAMMBalance, frac, Number::downward); + } } /* Equation 4 solves equation 3 for b: @@ -78,8 +91,17 @@ ammAssetIn( auto const a = 1 / (t2 * t2); auto const b = 2 * d / t2 - 1 / f1; auto const c = d * d - f2 * f2; - return toSTAmount( - asset1Balance.issue(), asset1Balance * solveQuadraticEq(a, b, c)); + if (!isFeatureEnabled(fixAMMv1_3)) + { + return toSTAmount( + asset1Balance.issue(), asset1Balance * solveQuadraticEq(a, b, c)); + } + else + { + // maximize deposit + auto const frac = solveQuadraticEq(a, b, c); + return multiply(asset1Balance, frac, Number::upward); + } } /* Equation 7: @@ -87,7 +109,7 @@ ammAssetIn( * where R = b/B, c = R*fee + 2 - fee */ STAmount -lpTokensOut( +lpTokensIn( STAmount const& asset1Balance, STAmount const& asset1Withdraw, STAmount const& lptAMMBalance, @@ -96,8 +118,17 @@ lpTokensOut( Number const fr = asset1Withdraw / asset1Balance; auto const f1 = getFee(tfee); auto const c = fr * f1 + 2 - f1; - auto const t = lptAMMBalance * (c - root2(c * c - 4 * fr)) / 2; - return toSTAmount(lptAMMBalance.issue(), t); + if (!isFeatureEnabled(fixAMMv1_3)) + { + auto const t = lptAMMBalance * (c - root2(c * c - 4 * fr)) / 2; + return toSTAmount(lptAMMBalance.issue(), t); + } + else + { + // maximize tokens in + auto const frac = (c - root2(c * c - 4 * fr)) / 2; + return multiply(lptAMMBalance, frac, Number::upward); + } } /* Equation 8 solves equation 7 for b: @@ -111,7 +142,7 @@ lpTokensOut( * R = (t1**2 + t1*(f - 2)) / (t1*f - 1) */ STAmount -withdrawByTokens( +ammAssetOut( STAmount const& assetBalance, STAmount const& lptAMMBalance, STAmount const& lpTokens, @@ -119,8 +150,17 @@ withdrawByTokens( { auto const f = getFee(tfee); Number const t1 = lpTokens / lptAMMBalance; - auto const b = assetBalance * (t1 * t1 - t1 * (2 - f)) / (t1 * f - 1); - return toSTAmount(assetBalance.issue(), b); + if (!isFeatureEnabled(fixAMMv1_3)) + { + auto const b = assetBalance * (t1 * t1 - t1 * (2 - f)) / (t1 * f - 1); + return toSTAmount(assetBalance.issue(), b); + } + else + { + // minimize withdraw + auto const frac = (t1 * t1 - t1 * (2 - f)) / (t1 * f - 1); + return multiply(assetBalance, frac, Number::downward); + } } Number @@ -133,12 +173,12 @@ STAmount adjustLPTokens( STAmount const& lptAMMBalance, STAmount const& lpTokens, - bool isDeposit) + IsDeposit isDeposit) { // Force rounding downward to ensure adjusted tokens are less or equal // to requested tokens. saveNumberRoundMode rm(Number::setround(Number::rounding_mode::downward)); - if (isDeposit) + if (isDeposit == IsDeposit::Yes) return (lptAMMBalance + lpTokens) - lptAMMBalance; return (lpTokens - lptAMMBalance) + lptAMMBalance; } @@ -151,8 +191,12 @@ adjustAmountsByLPTokens( STAmount const& lptAMMBalance, STAmount const& lpTokens, std::uint16_t tfee, - bool isDeposit) + IsDeposit isDeposit) { + // AMMv1_3 amendment adjusts tokens and amounts in deposit/withdraw + if (isFeatureEnabled(fixAMMv1_3)) + return std::make_tuple(amount, amount2, lpTokens); + auto const lpTokensActual = adjustLPTokens(lptAMMBalance, lpTokens, isDeposit); @@ -191,14 +235,14 @@ adjustAmountsByLPTokens( // Single trade auto const amountActual = [&]() { - if (isDeposit) + if (isDeposit == IsDeposit::Yes) return ammAssetIn( amountBalance, lptAMMBalance, lpTokensActual, tfee); else if (!ammRoundingEnabled) - return withdrawByTokens( + return ammAssetOut( amountBalance, lptAMMBalance, lpTokens, tfee); else - return withdrawByTokens( + return ammAssetOut( amountBalance, lptAMMBalance, lpTokensActual, tfee); }(); if (!ammRoundingEnabled) @@ -237,4 +281,132 @@ solveQuadraticEqSmallest(Number const& a, Number const& b, Number const& c) return (2 * c) / (-b + root2(d)); } +STAmount +multiply(STAmount const& amount, Number const& frac, Number::rounding_mode rm) +{ + NumberRoundModeGuard g(rm); + auto const t = amount * frac; + return toSTAmount(amount.issue(), t, rm); +} + +STAmount +getRoundedAsset( + Rules const& rules, + std::function&& noRoundCb, + STAmount const& balance, + std::function&& productCb, + IsDeposit isDeposit) +{ + if (!rules.enabled(fixAMMv1_3)) + return toSTAmount(balance.issue(), noRoundCb()); + + auto const rm = detail::getAssetRounding(isDeposit); + if (isDeposit == IsDeposit::Yes) + return multiply(balance, productCb(), rm); + NumberRoundModeGuard g(rm); + return toSTAmount(balance.issue(), productCb(), rm); +} + +STAmount +getRoundedLPTokens( + Rules const& rules, + STAmount const& balance, + Number const& frac, + IsDeposit isDeposit) +{ + if (!rules.enabled(fixAMMv1_3)) + return toSTAmount(balance.issue(), balance * frac); + + auto const rm = detail::getLPTokenRounding(isDeposit); + auto const tokens = multiply(balance, frac, rm); + return adjustLPTokens(balance, tokens, isDeposit); +} + +STAmount +getRoundedLPTokens( + Rules const& rules, + std::function&& noRoundCb, + STAmount const& lptAMMBalance, + std::function&& productCb, + IsDeposit isDeposit) +{ + if (!rules.enabled(fixAMMv1_3)) + return toSTAmount(lptAMMBalance.issue(), noRoundCb()); + + auto const tokens = [&] { + auto const rm = detail::getLPTokenRounding(isDeposit); + if (isDeposit == IsDeposit::Yes) + { + NumberRoundModeGuard g(rm); + return toSTAmount(lptAMMBalance.issue(), productCb(), rm); + } + return multiply(lptAMMBalance, productCb(), rm); + }(); + return adjustLPTokens(lptAMMBalance, tokens, isDeposit); +} + +std::pair +adjustAssetInByTokens( + Rules const& rules, + STAmount const& balance, + STAmount const& amount, + STAmount const& lptAMMBalance, + STAmount const& tokens, + std::uint16_t tfee) +{ + if (!rules.enabled(fixAMMv1_3)) + return {tokens, amount}; + auto assetAdj = ammAssetIn(balance, lptAMMBalance, tokens, tfee); + auto tokensAdj = tokens; + // Rounding didn't work the right way. + // Try to adjust the original deposit amount by difference + // in adjust and original amount. Then adjust tokens and deposit amount. + if (assetAdj > amount) + { + auto const adjAmount = amount - (assetAdj - amount); + auto const t = lpTokensOut(balance, adjAmount, lptAMMBalance, tfee); + tokensAdj = adjustLPTokens(lptAMMBalance, t, IsDeposit::Yes); + assetAdj = ammAssetIn(balance, lptAMMBalance, tokensAdj, tfee); + } + return {tokensAdj, std::min(amount, assetAdj)}; +} + +std::pair +adjustAssetOutByTokens( + Rules const& rules, + STAmount const& balance, + STAmount const& amount, + STAmount const& lptAMMBalance, + STAmount const& tokens, + std::uint16_t tfee) +{ + if (!rules.enabled(fixAMMv1_3)) + return {tokens, amount}; + auto assetAdj = ammAssetOut(balance, lptAMMBalance, tokens, tfee); + auto tokensAdj = tokens; + // Rounding didn't work the right way. + // Try to adjust the original deposit amount by difference + // in adjust and original amount. Then adjust tokens and deposit amount. + if (assetAdj > amount) + { + auto const adjAmount = amount - (assetAdj - amount); + auto const t = lpTokensIn(balance, adjAmount, lptAMMBalance, tfee); + tokensAdj = adjustLPTokens(lptAMMBalance, t, IsDeposit::No); + assetAdj = ammAssetOut(balance, lptAMMBalance, tokensAdj, tfee); + } + return {tokensAdj, std::min(amount, assetAdj)}; +} + +Number +adjustFracByTokens( + Rules const& rules, + STAmount const& lptAMMBalance, + STAmount const& tokens, + Number const& frac) +{ + if (!rules.enabled(fixAMMv1_3)) + return frac; + return tokens / lptAMMBalance; +} + } // namespace ripple diff --git a/src/xrpld/app/tx/detail/AMMBid.cpp b/src/xrpld/app/tx/detail/AMMBid.cpp index 6fec46be90..86a80431b4 100644 --- a/src/xrpld/app/tx/detail/AMMBid.cpp +++ b/src/xrpld/app/tx/detail/AMMBid.cpp @@ -78,6 +78,21 @@ AMMBid::preflight(PreflightContext const& ctx) JLOG(ctx.j.debug()) << "AMM Bid: Invalid number of AuthAccounts."; return temMALFORMED; } + else if (ctx.rules.enabled(fixAMMv1_3)) + { + AccountID account = ctx.tx[sfAccount]; + std::set unique; + for (auto const& obj : authAccounts) + { + auto authAccount = obj[sfAccount]; + if (authAccount == account || unique.contains(authAccount)) + { + JLOG(ctx.j.debug()) << "AMM Bid: Invalid auth.account."; + return temMALFORMED; + } + unique.insert(authAccount); + } + } } return preflight2(ctx); @@ -232,7 +247,9 @@ applyBid( auctionSlot.makeFieldAbsent(sfAuthAccounts); // Burn the remaining bid amount auto const saBurn = adjustLPTokens( - lptAMMBalance, toSTAmount(lptAMMBalance.issue(), burn), false); + lptAMMBalance, + toSTAmount(lptAMMBalance.issue(), burn), + IsDeposit::No); if (saBurn >= lptAMMBalance) { // This error case should never occur. diff --git a/src/xrpld/app/tx/detail/AMMDeposit.cpp b/src/xrpld/app/tx/detail/AMMDeposit.cpp index 6a718a3f04..0dafa0da6c 100644 --- a/src/xrpld/app/tx/detail/AMMDeposit.cpp +++ b/src/xrpld/app/tx/detail/AMMDeposit.cpp @@ -542,7 +542,7 @@ AMMDeposit::deposit( lptAMMBalance, lpTokensDeposit, tfee, - true); + IsDeposit::Yes); if (lpTokensDepositActual <= beast::zero) { @@ -625,6 +625,17 @@ AMMDeposit::deposit( return {tesSUCCESS, lptAMMBalance + lpTokensDepositActual}; } +static STAmount +adjustLPTokensOut( + Rules const& rules, + STAmount const& lptAMMBalance, + STAmount const& lpTokensDeposit) +{ + if (!rules.enabled(fixAMMv1_3)) + return lpTokensDeposit; + return adjustLPTokens(lptAMMBalance, lpTokensDeposit, IsDeposit::Yes); +} + /** Proportional deposit of pools assets in exchange for the specified * amount of LPTokens. */ @@ -642,16 +653,25 @@ AMMDeposit::equalDepositTokens( { try { + auto const tokensAdj = + adjustLPTokensOut(view.rules(), lptAMMBalance, lpTokensDeposit); + if (view.rules().enabled(fixAMMv1_3) && tokensAdj == beast::zero) + return {tecAMM_INVALID_TOKENS, STAmount{}}; auto const frac = - divide(lpTokensDeposit, lptAMMBalance, lptAMMBalance.issue()); + divide(tokensAdj, lptAMMBalance, lptAMMBalance.issue()); + // amounts factor in the adjusted tokens + auto const amountDeposit = + getRoundedAsset(view.rules(), amountBalance, frac, IsDeposit::Yes); + auto const amount2Deposit = + getRoundedAsset(view.rules(), amount2Balance, frac, IsDeposit::Yes); return deposit( view, ammAccount, amountBalance, - multiply(amountBalance, frac, amountBalance.issue()), - multiply(amount2Balance, frac, amount2Balance.issue()), + amountDeposit, + amount2Deposit, lptAMMBalance, - lpTokensDeposit, + tokensAdj, depositMin, deposit2Min, std::nullopt, @@ -708,37 +728,55 @@ AMMDeposit::equalDepositLimit( std::uint16_t tfee) { auto frac = Number{amount} / amountBalance; - auto tokens = toSTAmount(lptAMMBalance.issue(), lptAMMBalance * frac); - if (tokens == beast::zero) - return {tecAMM_FAILED, STAmount{}}; - auto const amount2Deposit = amount2Balance * frac; + auto tokensAdj = + getRoundedLPTokens(view.rules(), lptAMMBalance, frac, IsDeposit::Yes); + if (tokensAdj == beast::zero) + { + if (!view.rules().enabled(fixAMMv1_3)) + return {tecAMM_FAILED, STAmount{}}; // LCOV_EXCL_LINE + else + return {tecAMM_INVALID_TOKENS, STAmount{}}; + } + // factor in the adjusted tokens + frac = adjustFracByTokens(view.rules(), lptAMMBalance, tokensAdj, frac); + auto const amount2Deposit = + getRoundedAsset(view.rules(), amount2Balance, frac, IsDeposit::Yes); if (amount2Deposit <= amount2) return deposit( view, ammAccount, amountBalance, amount, - toSTAmount(amount2Balance.issue(), amount2Deposit), + amount2Deposit, lptAMMBalance, - tokens, + tokensAdj, std::nullopt, std::nullopt, lpTokensDepositMin, tfee); frac = Number{amount2} / amount2Balance; - tokens = toSTAmount(lptAMMBalance.issue(), lptAMMBalance * frac); - if (tokens == beast::zero) - return {tecAMM_FAILED, STAmount{}}; - auto const amountDeposit = amountBalance * frac; + tokensAdj = + getRoundedLPTokens(view.rules(), lptAMMBalance, frac, IsDeposit::Yes); + if (tokensAdj == beast::zero) + { + if (!view.rules().enabled(fixAMMv1_3)) + return {tecAMM_FAILED, STAmount{}}; // LCOV_EXCL_LINE + else + return {tecAMM_INVALID_TOKENS, STAmount{}}; // LCOV_EXCL_LINE + } + // factor in the adjusted tokens + frac = adjustFracByTokens(view.rules(), lptAMMBalance, tokensAdj, frac); + auto const amountDeposit = + getRoundedAsset(view.rules(), amountBalance, frac, IsDeposit::Yes); if (amountDeposit <= amount) return deposit( view, ammAccount, amountBalance, - toSTAmount(amountBalance.issue(), amountDeposit), + amountDeposit, amount2, lptAMMBalance, - tokens, + tokensAdj, std::nullopt, std::nullopt, lpTokensDepositMin, @@ -764,17 +802,30 @@ AMMDeposit::singleDeposit( std::optional const& lpTokensDepositMin, std::uint16_t tfee) { - auto const tokens = lpTokensIn(amountBalance, amount, lptAMMBalance, tfee); + auto const tokens = adjustLPTokensOut( + view.rules(), + lptAMMBalance, + lpTokensOut(amountBalance, amount, lptAMMBalance, tfee)); if (tokens == beast::zero) - return {tecAMM_FAILED, STAmount{}}; + { + if (!view.rules().enabled(fixAMMv1_3)) + return {tecAMM_FAILED, STAmount{}}; // LCOV_EXCL_LINE + else + return {tecAMM_INVALID_TOKENS, STAmount{}}; + } + // factor in the adjusted tokens + auto const [tokensAdj, amountDepositAdj] = adjustAssetInByTokens( + view.rules(), amountBalance, amount, lptAMMBalance, tokens, tfee); + if (view.rules().enabled(fixAMMv1_3) && tokensAdj == beast::zero) + return {tecAMM_INVALID_TOKENS, STAmount{}}; // LCOV_EXCL_LINE return deposit( view, ammAccount, amountBalance, - amount, + amountDepositAdj, std::nullopt, lptAMMBalance, - tokens, + tokensAdj, std::nullopt, std::nullopt, lpTokensDepositMin, @@ -798,8 +849,13 @@ AMMDeposit::singleDepositTokens( STAmount const& lpTokensDeposit, std::uint16_t tfee) { + auto const tokensAdj = + adjustLPTokensOut(view.rules(), lptAMMBalance, lpTokensDeposit); + if (view.rules().enabled(fixAMMv1_3) && tokensAdj == beast::zero) + return {tecAMM_INVALID_TOKENS, STAmount{}}; + // the adjusted tokens are factored in auto const amountDeposit = - ammAssetIn(amountBalance, lptAMMBalance, lpTokensDeposit, tfee); + ammAssetIn(amountBalance, lptAMMBalance, tokensAdj, tfee); if (amountDeposit > amount) return {tecAMM_FAILED, STAmount{}}; return deposit( @@ -809,7 +865,7 @@ AMMDeposit::singleDepositTokens( amountDeposit, std::nullopt, lptAMMBalance, - lpTokensDeposit, + tokensAdj, std::nullopt, std::nullopt, std::nullopt, @@ -853,20 +909,32 @@ AMMDeposit::singleDepositEPrice( { if (amount != beast::zero) { - auto const tokens = - lpTokensIn(amountBalance, amount, lptAMMBalance, tfee); + auto const tokens = adjustLPTokensOut( + view.rules(), + lptAMMBalance, + lpTokensOut(amountBalance, amount, lptAMMBalance, tfee)); if (tokens <= beast::zero) - return {tecAMM_FAILED, STAmount{}}; - auto const ep = Number{amount} / tokens; + { + if (!view.rules().enabled(fixAMMv1_3)) + return {tecAMM_FAILED, STAmount{}}; // LCOV_EXCL_LINE + else + return {tecAMM_INVALID_TOKENS, STAmount{}}; + } + // factor in the adjusted tokens + auto const [tokensAdj, amountDepositAdj] = adjustAssetInByTokens( + view.rules(), amountBalance, amount, lptAMMBalance, tokens, tfee); + if (view.rules().enabled(fixAMMv1_3) && tokensAdj == beast::zero) + return {tecAMM_INVALID_TOKENS, STAmount{}}; // LCOV_EXCL_LINE + auto const ep = Number{amountDepositAdj} / tokensAdj; if (ep <= ePrice) return deposit( view, ammAccount, amountBalance, - amount, + amountDepositAdj, std::nullopt, lptAMMBalance, - tokens, + tokensAdj, std::nullopt, std::nullopt, std::nullopt, @@ -897,21 +965,37 @@ AMMDeposit::singleDepositEPrice( auto const a1 = c * c; auto const b1 = c * c * f2 * f2 + 2 * c - d * d; auto const c1 = 2 * c * f2 * f2 + 1 - 2 * d * f2; - auto const amountDeposit = toSTAmount( - amountBalance.issue(), - f1 * amountBalance * solveQuadraticEq(a1, b1, c1)); + auto amtNoRoundCb = [&] { + return f1 * amountBalance * solveQuadraticEq(a1, b1, c1); + }; + auto amtProdCb = [&] { return f1 * solveQuadraticEq(a1, b1, c1); }; + auto const amountDeposit = getRoundedAsset( + view.rules(), amtNoRoundCb, amountBalance, amtProdCb, IsDeposit::Yes); if (amountDeposit <= beast::zero) return {tecAMM_FAILED, STAmount{}}; - auto const tokens = - toSTAmount(lptAMMBalance.issue(), amountDeposit / ePrice); + auto tokNoRoundCb = [&] { return amountDeposit / ePrice; }; + auto tokProdCb = [&] { return amountDeposit / ePrice; }; + auto const tokens = getRoundedLPTokens( + view.rules(), tokNoRoundCb, lptAMMBalance, tokProdCb, IsDeposit::Yes); + // factor in the adjusted tokens + auto const [tokensAdj, amountDepositAdj] = adjustAssetInByTokens( + view.rules(), + amountBalance, + amountDeposit, + lptAMMBalance, + tokens, + tfee); + if (view.rules().enabled(fixAMMv1_3) && tokensAdj == beast::zero) + return {tecAMM_INVALID_TOKENS, STAmount{}}; // LCOV_EXCL_LINE + return deposit( view, ammAccount, amountBalance, - amountDeposit, + amountDepositAdj, std::nullopt, lptAMMBalance, - tokens, + tokensAdj, std::nullopt, std::nullopt, std::nullopt, diff --git a/src/xrpld/app/tx/detail/AMMWithdraw.cpp b/src/xrpld/app/tx/detail/AMMWithdraw.cpp index 586f453c6f..69243f3f48 100644 --- a/src/xrpld/app/tx/detail/AMMWithdraw.cpp +++ b/src/xrpld/app/tx/detail/AMMWithdraw.cpp @@ -522,7 +522,7 @@ AMMWithdraw::withdraw( lpTokensAMMBalance, lpTokensWithdraw, tfee, - false); + IsDeposit::No); return std::make_tuple( amountWithdraw, amount2Withdraw, lpTokensWithdraw); }(); @@ -683,6 +683,20 @@ AMMWithdraw::withdraw( amount2WithdrawActual); } +static STAmount +adjustLPTokensIn( + Rules const& rules, + STAmount const& lptAMMBalance, + STAmount const& lpTokensWithdraw, + WithdrawAll withdrawAll) +{ + if (!rules.enabled(fixAMMv1_3) || withdrawAll == WithdrawAll::Yes) + return lpTokensWithdraw; + return adjustLPTokens(lptAMMBalance, lpTokensWithdraw, IsDeposit::No); +} + +/** Proportional withdrawal of pool assets for the amount of LPTokens. + */ std::pair AMMWithdraw::equalWithdrawTokens( Sandbox& view, @@ -786,16 +800,22 @@ AMMWithdraw::equalWithdrawTokens( journal); } - auto const frac = divide(lpTokensWithdraw, lptAMMBalance, noIssue()); - auto const withdrawAmount = - multiply(amountBalance, frac, amountBalance.issue()); - auto const withdraw2Amount = - multiply(amount2Balance, frac, amount2Balance.issue()); + auto const tokensAdj = adjustLPTokensIn( + view.rules(), lptAMMBalance, lpTokensWithdraw, withdrawAll); + if (view.rules().enabled(fixAMMv1_3) && tokensAdj == beast::zero) + return { + tecAMM_INVALID_TOKENS, STAmount{}, STAmount{}, std::nullopt}; + // the adjusted tokens are factored in + auto const frac = divide(tokensAdj, lptAMMBalance, noIssue()); + auto const amountWithdraw = + getRoundedAsset(view.rules(), amountBalance, frac, IsDeposit::No); + auto const amount2Withdraw = + getRoundedAsset(view.rules(), amount2Balance, frac, IsDeposit::No); // LP is making equal withdrawal by tokens but the requested amount // of LP tokens is likely too small and results in one-sided pool // withdrawal due to round off. Fail so the user withdraws // more tokens. - if (withdrawAmount == beast::zero || withdraw2Amount == beast::zero) + if (amountWithdraw == beast::zero || amount2Withdraw == beast::zero) return {tecAMM_FAILED, STAmount{}, STAmount{}, STAmount{}}; return withdraw( @@ -804,10 +824,10 @@ AMMWithdraw::equalWithdrawTokens( ammAccount, account, amountBalance, - withdrawAmount, - withdraw2Amount, + amountWithdraw, + amount2Withdraw, lptAMMBalance, - lpTokensWithdraw, + tokensAdj, tfee, freezeHanding, withdrawAll, @@ -862,7 +882,16 @@ AMMWithdraw::equalWithdrawLimit( std::uint16_t tfee) { auto frac = Number{amount} / amountBalance; - auto const amount2Withdraw = amount2Balance * frac; + auto amount2Withdraw = + getRoundedAsset(view.rules(), amount2Balance, frac, IsDeposit::No); + auto tokensAdj = + getRoundedLPTokens(view.rules(), lptAMMBalance, frac, IsDeposit::No); + if (view.rules().enabled(fixAMMv1_3) && tokensAdj == beast::zero) + return {tecAMM_INVALID_TOKENS, STAmount{}}; + // factor in the adjusted tokens + frac = adjustFracByTokens(view.rules(), lptAMMBalance, tokensAdj, frac); + amount2Withdraw = + getRoundedAsset(view.rules(), amount2Balance, frac, IsDeposit::No); if (amount2Withdraw <= amount2) { return withdraw( @@ -871,26 +900,42 @@ AMMWithdraw::equalWithdrawLimit( ammAccount, amountBalance, amount, - toSTAmount(amount2.issue(), amount2Withdraw), + amount2Withdraw, lptAMMBalance, - toSTAmount(lptAMMBalance.issue(), lptAMMBalance * frac), + tokensAdj, tfee); } frac = Number{amount2} / amount2Balance; - auto const amountWithdraw = amountBalance * frac; - XRPL_ASSERT( - amountWithdraw <= amount, - "ripple::AMMWithdraw::equalWithdrawLimit : maximum amountWithdraw"); + auto amountWithdraw = + getRoundedAsset(view.rules(), amountBalance, frac, IsDeposit::No); + tokensAdj = + getRoundedLPTokens(view.rules(), lptAMMBalance, frac, IsDeposit::No); + if (view.rules().enabled(fixAMMv1_3) && tokensAdj == beast::zero) + return {tecAMM_INVALID_TOKENS, STAmount{}}; // LCOV_EXCL_LINE + // factor in the adjusted tokens + frac = adjustFracByTokens(view.rules(), lptAMMBalance, tokensAdj, frac); + amountWithdraw = + getRoundedAsset(view.rules(), amountBalance, frac, IsDeposit::No); + if (!view.rules().enabled(fixAMMv1_3)) + { + // LCOV_EXCL_START + XRPL_ASSERT( + amountWithdraw <= amount, + "ripple::AMMWithdraw::equalWithdrawLimit : maximum amountWithdraw"); + // LCOV_EXCL_STOP + } + else if (amountWithdraw > amount) + return {tecAMM_FAILED, STAmount{}}; // LCOV_EXCL_LINE return withdraw( view, ammSle, ammAccount, amountBalance, - toSTAmount(amount.issue(), amountWithdraw), + amountWithdraw, amount2, lptAMMBalance, - toSTAmount(lptAMMBalance.issue(), lptAMMBalance * frac), + tokensAdj, tfee); } @@ -909,19 +954,32 @@ AMMWithdraw::singleWithdraw( STAmount const& amount, std::uint16_t tfee) { - auto const tokens = lpTokensOut(amountBalance, amount, lptAMMBalance, tfee); + auto const tokens = adjustLPTokensIn( + view.rules(), + lptAMMBalance, + lpTokensIn(amountBalance, amount, lptAMMBalance, tfee), + isWithdrawAll(ctx_.tx)); if (tokens == beast::zero) - return {tecAMM_FAILED, STAmount{}}; - + { + if (!view.rules().enabled(fixAMMv1_3)) + return {tecAMM_FAILED, STAmount{}}; // LCOV_EXCL_LINE + else + return {tecAMM_INVALID_TOKENS, STAmount{}}; + } + // factor in the adjusted tokens + auto const [tokensAdj, amountWithdrawAdj] = adjustAssetOutByTokens( + view.rules(), amountBalance, amount, lptAMMBalance, tokens, tfee); + if (view.rules().enabled(fixAMMv1_3) && tokensAdj == beast::zero) + return {tecAMM_INVALID_TOKENS, STAmount{}}; // LCOV_EXCL_LINE return withdraw( view, ammSle, ammAccount, amountBalance, - amount, + amountWithdrawAdj, std::nullopt, lptAMMBalance, - tokens, + tokensAdj, tfee); } @@ -946,8 +1004,13 @@ AMMWithdraw::singleWithdrawTokens( STAmount const& lpTokensWithdraw, std::uint16_t tfee) { + auto const tokensAdj = adjustLPTokensIn( + view.rules(), lptAMMBalance, lpTokensWithdraw, isWithdrawAll(ctx_.tx)); + if (view.rules().enabled(fixAMMv1_3) && tokensAdj == beast::zero) + return {tecAMM_INVALID_TOKENS, STAmount{}}; + // the adjusted tokens are factored in auto const amountWithdraw = - withdrawByTokens(amountBalance, lptAMMBalance, lpTokensWithdraw, tfee); + ammAssetOut(amountBalance, lptAMMBalance, tokensAdj, tfee); if (amount == beast::zero || amountWithdraw >= amount) { return withdraw( @@ -958,7 +1021,7 @@ AMMWithdraw::singleWithdrawTokens( amountWithdraw, std::nullopt, lptAMMBalance, - lpTokensWithdraw, + tokensAdj, tfee); } @@ -1007,11 +1070,27 @@ AMMWithdraw::singleWithdrawEPrice( // t = T*(T + A*E*(f - 2))/(T*f - A*E) Number const ae = amountBalance * ePrice; auto const f = getFee(tfee); - auto const tokens = lptAMMBalance * (lptAMMBalance + ae * (f - 2)) / - (lptAMMBalance * f - ae); - if (tokens <= 0) - return {tecAMM_FAILED, STAmount{}}; - auto const amountWithdraw = toSTAmount(amount.issue(), tokens / ePrice); + auto tokNoRoundCb = [&] { + return lptAMMBalance * (lptAMMBalance + ae * (f - 2)) / + (lptAMMBalance * f - ae); + }; + auto tokProdCb = [&] { + return (lptAMMBalance + ae * (f - 2)) / (lptAMMBalance * f - ae); + }; + auto const tokensAdj = getRoundedLPTokens( + view.rules(), tokNoRoundCb, lptAMMBalance, tokProdCb, IsDeposit::No); + if (tokensAdj <= beast::zero) + { + if (!view.rules().enabled(fixAMMv1_3)) + return {tecAMM_FAILED, STAmount{}}; + else + return {tecAMM_INVALID_TOKENS, STAmount{}}; + } + auto amtNoRoundCb = [&] { return tokensAdj / ePrice; }; + auto amtProdCb = [&] { return tokensAdj / ePrice; }; + // the adjusted tokens are factored in + auto const amountWithdraw = getRoundedAsset( + view.rules(), amtNoRoundCb, amount, amtProdCb, IsDeposit::No); if (amount == beast::zero || amountWithdraw >= amount) { return withdraw( @@ -1022,7 +1101,7 @@ AMMWithdraw::singleWithdrawEPrice( amountWithdraw, std::nullopt, lptAMMBalance, - toSTAmount(lptAMMBalance.issue(), tokens), + tokensAdj, tfee); } diff --git a/src/xrpld/app/tx/detail/AMMWithdraw.h b/src/xrpld/app/tx/detail/AMMWithdraw.h index ae9328cb05..1de91fd787 100644 --- a/src/xrpld/app/tx/detail/AMMWithdraw.h +++ b/src/xrpld/app/tx/detail/AMMWithdraw.h @@ -301,7 +301,7 @@ private: std::uint16_t tfee); /** Check from the flags if it's withdraw all */ - WithdrawAll + static WithdrawAll isWithdrawAll(STTx const& tx); }; diff --git a/src/xrpld/app/tx/detail/InvariantCheck.cpp b/src/xrpld/app/tx/detail/InvariantCheck.cpp index 729f69a03b..05c2a5d620 100644 --- a/src/xrpld/app/tx/detail/InvariantCheck.cpp +++ b/src/xrpld/app/tx/detail/InvariantCheck.cpp @@ -17,6 +17,8 @@ */ //============================================================================== +#include +#include #include #include #include @@ -1663,4 +1665,309 @@ ValidPermissionedDEX::finalize( return true; } +void +ValidAMM::visitEntry( + bool isDelete, + std::shared_ptr const& before, + std::shared_ptr const& after) +{ + if (isDelete) + return; + + if (after) + { + auto const type = after->getType(); + // AMM object changed + if (type == ltAMM) + { + ammAccount_ = after->getAccountID(sfAccount); + lptAMMBalanceAfter_ = after->getFieldAmount(sfLPTokenBalance); + } + // AMM pool changed + else if ( + (type == ltRIPPLE_STATE && after->getFlags() & lsfAMMNode) || + (type == ltACCOUNT_ROOT && after->isFieldPresent(sfAMMID))) + { + ammPoolChanged_ = true; + } + } + + if (before) + { + // AMM object changed + if (before->getType() == ltAMM) + { + lptAMMBalanceBefore_ = before->getFieldAmount(sfLPTokenBalance); + } + } +} + +static bool +validBalances( + STAmount const& amount, + STAmount const& amount2, + STAmount const& lptAMMBalance, + ValidAMM::ZeroAllowed zeroAllowed) +{ + bool const positive = amount > beast::zero && amount2 > beast::zero && + lptAMMBalance > beast::zero; + if (zeroAllowed == ValidAMM::ZeroAllowed::Yes) + return positive || + (amount == beast::zero && amount2 == beast::zero && + lptAMMBalance == beast::zero); + return positive; +} + +bool +ValidAMM::finalizeVote(bool enforce, beast::Journal const& j) const +{ + if (lptAMMBalanceAfter_ != lptAMMBalanceBefore_ || ammPoolChanged_) + { + // LPTokens and the pool can not change on vote + // LCOV_EXCL_START + JLOG(j.error()) << "AMMVote invariant failed: " + << lptAMMBalanceBefore_.value_or(STAmount{}) << " " + << lptAMMBalanceAfter_.value_or(STAmount{}) << " " + << ammPoolChanged_; + if (enforce) + return false; + // LCOV_EXCL_STOP + } + + return true; +} + +bool +ValidAMM::finalizeBid(bool enforce, beast::Journal const& j) const +{ + if (ammPoolChanged_) + { + // The pool can not change on bid + // LCOV_EXCL_START + JLOG(j.error()) << "AMMBid invariant failed: pool changed"; + if (enforce) + return false; + // LCOV_EXCL_STOP + } + // LPTokens are burnt, therefore there should be fewer LPTokens + else if ( + lptAMMBalanceBefore_ && lptAMMBalanceAfter_ && + (*lptAMMBalanceAfter_ > *lptAMMBalanceBefore_ || + *lptAMMBalanceAfter_ <= beast::zero)) + { + // LCOV_EXCL_START + JLOG(j.error()) << "AMMBid invariant failed: " << *lptAMMBalanceBefore_ + << " " << *lptAMMBalanceAfter_; + if (enforce) + return false; + // LCOV_EXCL_STOP + } + + return true; +} + +bool +ValidAMM::finalizeCreate( + STTx const& tx, + ReadView const& view, + bool enforce, + beast::Journal const& j) const +{ + if (!ammAccount_) + { + // LCOV_EXCL_START + JLOG(j.error()) + << "AMMCreate invariant failed: AMM object is not created"; + if (enforce) + return false; + // LCOV_EXCL_STOP + } + else + { + auto const [amount, amount2] = ammPoolHolds( + view, + *ammAccount_, + tx[sfAmount].get(), + tx[sfAmount2].get(), + fhIGNORE_FREEZE, + j); + // Create invariant: + // sqrt(amount * amount2) == LPTokens + // all balances are greater than zero + if (!validBalances( + amount, amount2, *lptAMMBalanceAfter_, ZeroAllowed::No) || + ammLPTokens(amount, amount2, lptAMMBalanceAfter_->issue()) != + *lptAMMBalanceAfter_) + { + JLOG(j.error()) << "AMMCreate invariant failed: " << amount << " " + << amount2 << " " << *lptAMMBalanceAfter_; + if (enforce) + return false; + } + } + + return true; +} + +bool +ValidAMM::finalizeDelete(bool enforce, TER res, beast::Journal const& j) const +{ + if (ammAccount_) + { + // LCOV_EXCL_START + std::string const msg = (res == tesSUCCESS) + ? "AMM object is not deleted on tesSUCCESS" + : "AMM object is changed on tecINCOMPLETE"; + JLOG(j.error()) << "AMMDelete invariant failed: " << msg; + if (enforce) + return false; + // LCOV_EXCL_STOP + } + + return true; +} + +bool +ValidAMM::finalizeDEX(bool enforce, beast::Journal const& j) const +{ + if (ammAccount_) + { + // LCOV_EXCL_START + JLOG(j.error()) << "AMM swap invariant failed: AMM object changed"; + if (enforce) + return false; + // LCOV_EXCL_STOP + } + + return true; +} + +bool +ValidAMM::generalInvariant( + ripple::STTx const& tx, + ripple::ReadView const& view, + ZeroAllowed zeroAllowed, + beast::Journal const& j) const +{ + auto const [amount, amount2] = ammPoolHolds( + view, + *ammAccount_, + tx[sfAsset].get(), + tx[sfAsset2].get(), + fhIGNORE_FREEZE, + j); + // Deposit and Withdrawal invariant: + // sqrt(amount * amount2) >= LPTokens + // all balances are greater than zero + // unless on last withdrawal + auto const poolProductMean = root2(amount * amount2); + bool const nonNegativeBalances = + validBalances(amount, amount2, *lptAMMBalanceAfter_, zeroAllowed); + bool const strongInvariantCheck = poolProductMean >= *lptAMMBalanceAfter_; + // Allow for a small relative error if strongInvariantCheck fails + auto weakInvariantCheck = [&]() { + return *lptAMMBalanceAfter_ != beast::zero && + withinRelativeDistance( + poolProductMean, Number{*lptAMMBalanceAfter_}, Number{1, -11}); + }; + if (!nonNegativeBalances || + (!strongInvariantCheck && !weakInvariantCheck())) + { + JLOG(j.error()) << "AMM " << tx.getTxnType() << " invariant failed: " + << tx.getHash(HashPrefix::transactionID) << " " + << ammPoolChanged_ << " " << amount << " " << amount2 + << " " << poolProductMean << " " + << lptAMMBalanceAfter_->getText() << " " + << ((*lptAMMBalanceAfter_ == beast::zero) + ? Number{1} + : ((*lptAMMBalanceAfter_ - poolProductMean) / + poolProductMean)); + return false; + } + + return true; +} + +bool +ValidAMM::finalizeDeposit( + ripple::STTx const& tx, + ripple::ReadView const& view, + bool enforce, + beast::Journal const& j) const +{ + if (!ammAccount_) + { + // LCOV_EXCL_START + JLOG(j.error()) << "AMMDeposit invariant failed: AMM object is deleted"; + if (enforce) + return false; + // LCOV_EXCL_STOP + } + else if (!generalInvariant(tx, view, ZeroAllowed::No, j) && enforce) + return false; + + return true; +} + +bool +ValidAMM::finalizeWithdraw( + ripple::STTx const& tx, + ripple::ReadView const& view, + bool enforce, + beast::Journal const& j) const +{ + if (!ammAccount_) + { + // Last Withdraw or Clawback deleted AMM + } + else if (!generalInvariant(tx, view, ZeroAllowed::Yes, j)) + { + if (enforce) + return false; + } + + return true; +} + +bool +ValidAMM::finalize( + STTx const& tx, + TER const result, + XRPAmount const, + ReadView const& view, + beast::Journal const& j) +{ + // Delete may return tecINCOMPLETE if there are too many + // trustlines to delete. + if (result != tesSUCCESS && result != tecINCOMPLETE) + return true; + + bool const enforce = view.rules().enabled(fixAMMv1_3); + + switch (tx.getTxnType()) + { + case ttAMM_CREATE: + return finalizeCreate(tx, view, enforce, j); + case ttAMM_DEPOSIT: + return finalizeDeposit(tx, view, enforce, j); + case ttAMM_CLAWBACK: + case ttAMM_WITHDRAW: + return finalizeWithdraw(tx, view, enforce, j); + case ttAMM_BID: + return finalizeBid(enforce, j); + case ttAMM_VOTE: + return finalizeVote(enforce, j); + case ttAMM_DELETE: + return finalizeDelete(enforce, result, j); + case ttCHECK_CASH: + case ttOFFER_CREATE: + case ttPAYMENT: + return finalizeDEX(enforce, j); + default: + break; + } + + return true; +} + } // namespace ripple diff --git a/src/xrpld/app/tx/detail/InvariantCheck.h b/src/xrpld/app/tx/detail/InvariantCheck.h index fdde8427fb..529c05ce0e 100644 --- a/src/xrpld/app/tx/detail/InvariantCheck.h +++ b/src/xrpld/app/tx/detail/InvariantCheck.h @@ -641,6 +641,69 @@ public: beast::Journal const&); }; +class ValidAMM +{ + std::optional ammAccount_; + std::optional lptAMMBalanceAfter_; + std::optional lptAMMBalanceBefore_; + bool ammPoolChanged_; + +public: + enum class ZeroAllowed : bool { No = false, Yes = true }; + + ValidAMM() : ammPoolChanged_{false} + { + } + void + visitEntry( + bool, + std::shared_ptr const&, + std::shared_ptr const&); + + bool + finalize( + STTx const&, + TER const, + XRPAmount const, + ReadView const&, + beast::Journal const&); + +private: + bool + finalizeBid(bool enforce, beast::Journal const&) const; + bool + finalizeVote(bool enforce, beast::Journal const&) const; + bool + finalizeCreate( + STTx const&, + ReadView const&, + bool enforce, + beast::Journal const&) const; + bool + finalizeDelete(bool enforce, TER res, beast::Journal const&) const; + bool + finalizeDeposit( + STTx const&, + ReadView const&, + bool enforce, + beast::Journal const&) const; + // Includes clawback + bool + finalizeWithdraw( + STTx const&, + ReadView const&, + bool enforce, + beast::Journal const&) const; + bool + finalizeDEX(bool enforce, beast::Journal const&) const; + bool + generalInvariant( + STTx const&, + ReadView const&, + ZeroAllowed zeroAllowed, + beast::Journal const&) const; +}; + // additional invariant checks can be declared above and then added to this // tuple using InvariantChecks = std::tuple< @@ -661,7 +724,8 @@ using InvariantChecks = std::tuple< ValidClawback, ValidMPTIssuance, ValidPermissionedDomain, - ValidPermissionedDEX>; + ValidPermissionedDEX, + ValidAMM>; /** * @brief get a tuple of all invariant checks diff --git a/src/xrpld/app/tx/detail/Offer.h b/src/xrpld/app/tx/detail/Offer.h index abc0212335..d6ff4c7699 100644 --- a/src/xrpld/app/tx/detail/Offer.h +++ b/src/xrpld/app/tx/detail/Offer.h @@ -22,6 +22,7 @@ #include +#include #include #include #include @@ -170,8 +171,24 @@ public: * always returns true. */ bool - checkInvariant(TAmounts const&, beast::Journal j) const + checkInvariant(TAmounts const& consumed, beast::Journal j) const { + if (!isFeatureEnabled(fixAMMv1_3)) + return true; + + if (consumed.in > m_amounts.in || consumed.out > m_amounts.out) + { + // LCOV_EXCL_START + JLOG(j.error()) + << "AMMOffer::checkInvariant failed: consumed " + << to_string(consumed.in) << " " << to_string(consumed.out) + << " amounts " << to_string(m_amounts.in) << " " + << to_string(m_amounts.out); + + return false; + // LCOV_EXCL_STOP + } + return true; } }; From 7e24adbdd0b61fb50967c4c6d4b27cc6d81b33f3 Mon Sep 17 00:00:00 2001 From: Vlad <129996061+vvysokikh1@users.noreply.github.com> Date: Mon, 2 Jun 2025 17:13:20 +0100 Subject: [PATCH 044/244] fix: Address NFT interactions with trustlines (#5297) The changes are focused on fixing NFT transactions bypassing the trustline authorization requirement and potential invariant violation when interacting with deep frozen trustlines. --- include/xrpl/protocol/detail/features.macro | 1 + src/test/app/Freeze_test.cpp | 101 ++- src/test/app/NFTokenAuth_test.cpp | 624 ++++++++++++++++++ .../app/tx/detail/NFTokenAcceptOffer.cpp | 195 +++--- src/xrpld/app/tx/detail/NFTokenAcceptOffer.h | 8 - src/xrpld/app/tx/detail/NFTokenUtils.cpp | 122 ++++ src/xrpld/app/tx/detail/NFTokenUtils.h | 14 + 7 files changed, 969 insertions(+), 96 deletions(-) create mode 100644 src/test/app/NFTokenAuth_test.cpp diff --git a/include/xrpl/protocol/detail/features.macro b/include/xrpl/protocol/detail/features.macro index f458b57219..df4af23e96 100644 --- a/include/xrpl/protocol/detail/features.macro +++ b/include/xrpl/protocol/detail/features.macro @@ -32,6 +32,7 @@ // If you add an amendment here, then do not forget to increment `numFeatures` // in include/xrpl/protocol/Feature.h. +XRPL_FIX (EnforceNFTokenTrustlineV2, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (AMMv1_3, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(PermissionedDEX, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(Batch, Supported::yes, VoteBehavior::DefaultNo) diff --git a/src/test/app/Freeze_test.cpp b/src/test/app/Freeze_test.cpp index b28e794688..8c2021d657 100644 --- a/src/test/app/Freeze_test.cpp +++ b/src/test/app/Freeze_test.cpp @@ -1885,6 +1885,31 @@ class Freeze_test : public beast::unit_test::suite env.close(); } + // Testing A1 nft buy offer when A2 deep frozen by issuer + if (features[featureDeepFreeze] && + features[fixEnforceNFTokenTrustlineV2]) + { + env(trust(G1, A2["USD"](0), tfSetFreeze | tfSetDeepFreeze)); + env.close(); + + uint256 const nftID{token::getNextID(env, A2, 0u, tfTransferable)}; + env(token::mint(A2, 0), txflags(tfTransferable)); + env.close(); + + auto const buyIdx = keylet::nftoffer(A1, env.seq(A1)).key; + env(token::createOffer(A1, nftID, USD(10)), token::owner(A2)); + env.close(); + + env(token::acceptBuyOffer(A2, buyIdx), ter(tecFROZEN)); + env.close(); + + env(trust(G1, A2["USD"](0), tfClearFreeze | tfClearDeepFreeze)); + env.close(); + + env(token::acceptBuyOffer(A2, buyIdx)); + env.close(); + } + // Testing A2 nft offer sell when A2 frozen by currency holder { auto const sellOfferIndex = createNFTSellOffer(env, A2, USD(10)); @@ -1944,6 +1969,68 @@ class Freeze_test : public beast::unit_test::suite env(trust(A2, limit, tfClearFreeze | tfClearDeepFreeze)); env.close(); } + + // Testing brokered offer acceptance + if (features[featureDeepFreeze] && + features[fixEnforceNFTokenTrustlineV2]) + { + Account broker{"broker"}; + env.fund(XRP(10000), broker); + env.close(); + env(trust(G1, broker["USD"](1000), tfSetFreeze | tfSetDeepFreeze)); + env.close(); + + uint256 const nftID{token::getNextID(env, A2, 0u, tfTransferable)}; + env(token::mint(A2, 0), txflags(tfTransferable)); + env.close(); + + uint256 const sellIdx = keylet::nftoffer(A2, env.seq(A2)).key; + env(token::createOffer(A2, nftID, USD(10)), txflags(tfSellNFToken)); + env.close(); + auto const buyIdx = keylet::nftoffer(A1, env.seq(A1)).key; + env(token::createOffer(A1, nftID, USD(11)), token::owner(A2)); + env.close(); + + env(token::brokerOffers(broker, buyIdx, sellIdx), + token::brokerFee(USD(1)), + ter(tecFROZEN)); + env.close(); + } + + // Testing transfer fee + if (features[featureDeepFreeze] && + features[fixEnforceNFTokenTrustlineV2]) + { + Account minter{"minter"}; + env.fund(XRP(10000), minter); + env.close(); + env(trust(G1, minter["USD"](1000))); + env.close(); + + uint256 const nftID{ + token::getNextID(env, minter, 0u, tfTransferable, 1u)}; + env(token::mint(minter, 0), + token::xferFee(1u), + txflags(tfTransferable)); + env.close(); + + uint256 const minterSellIdx = + keylet::nftoffer(minter, env.seq(minter)).key; + env(token::createOffer(minter, nftID, drops(1)), + txflags(tfSellNFToken)); + env.close(); + env(token::acceptSellOffer(A2, minterSellIdx)); + env.close(); + + uint256 const sellIdx = keylet::nftoffer(A2, env.seq(A2)).key; + env(token::createOffer(A2, nftID, USD(100)), + txflags(tfSellNFToken)); + env.close(); + env(trust(G1, minter["USD"](1000), tfSetFreeze | tfSetDeepFreeze)); + env.close(); + env(token::acceptSellOffer(A1, sellIdx), ter(tecFROZEN)); + env.close(); + } } // Helper function to extract trustline flags from open ledger @@ -2021,10 +2108,16 @@ public: using namespace test::jtx; auto const sa = supported_amendments(); testAll( - sa - featureFlowCross - featureDeepFreeze - featurePermissionedDEX); - testAll(sa - featureFlowCross - featurePermissionedDEX); - testAll(sa - featureDeepFreeze - featurePermissionedDEX); - testAll(sa - featurePermissionedDEX); + sa - featureFlowCross - featureDeepFreeze - featurePermissionedDEX - + fixEnforceNFTokenTrustlineV2); + testAll( + sa - featureFlowCross - featurePermissionedDEX - + fixEnforceNFTokenTrustlineV2); + testAll( + sa - featureDeepFreeze - featurePermissionedDEX - + fixEnforceNFTokenTrustlineV2); + testAll(sa - featurePermissionedDEX - fixEnforceNFTokenTrustlineV2); + testAll(sa - fixEnforceNFTokenTrustlineV2); testAll(sa); } }; diff --git a/src/test/app/NFTokenAuth_test.cpp b/src/test/app/NFTokenAuth_test.cpp new file mode 100644 index 0000000000..9558a03f7a --- /dev/null +++ b/src/test/app/NFTokenAuth_test.cpp @@ -0,0 +1,624 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include + +namespace ripple { + +class NFTokenAuth_test : public beast::unit_test::suite +{ + auto + mintAndOfferNFT( + test::jtx::Env& env, + test::jtx::Account const& account, + test::jtx::PrettyAmount const& currency, + uint32_t xfee = 0u) + { + using namespace test::jtx; + auto const nftID{ + token::getNextID(env, account, 0u, tfTransferable, xfee)}; + env(token::mint(account, 0), + token::xferFee(xfee), + txflags(tfTransferable)); + env.close(); + + auto const sellIdx = keylet::nftoffer(account, env.seq(account)).key; + env(token::createOffer(account, nftID, currency), + txflags(tfSellNFToken)); + env.close(); + + return std::make_tuple(nftID, sellIdx); + } + +public: + void + testBuyOffer_UnauthorizedSeller(FeatureBitset features) + { + testcase("Unauthorized seller tries to accept buy offer"); + using namespace test::jtx; + + Env env(*this, features); + Account G1{"G1"}; + Account A1{"A1"}; + Account A2{"A2"}; + auto const USD{G1["USD"]}; + + env.fund(XRP(10000), G1, A1, A2); + env(fset(G1, asfRequireAuth)); + env.close(); + + auto const limit = USD(10000); + + env(trust(A1, limit)); + env(trust(G1, limit, A1, tfSetfAuth)); + env(pay(G1, A1, USD(1000))); + + auto const [nftID, _] = mintAndOfferNFT(env, A2, drops(1)); + auto const buyIdx = keylet::nftoffer(A1, env.seq(A1)).key; + + // It should be possible to create a buy offer even if NFT owner is not + // authorized + env(token::createOffer(A1, nftID, USD(10)), token::owner(A2)); + + if (features[fixEnforceNFTokenTrustlineV2]) + { + // test: G1 requires authorization of A2, no trust line exists + env(token::acceptBuyOffer(A2, buyIdx), ter(tecNO_LINE)); + env.close(); + + // trust line created, but not authorized + env(trust(A2, limit)); + + // test: G1 requires authorization of A2 + env(token::acceptBuyOffer(A2, buyIdx), ter(tecNO_AUTH)); + env.close(); + } + else + { + // Old behavior: it is possible to sell tokens and receive IOUs + // without the authorization + env(token::acceptBuyOffer(A2, buyIdx)); + env.close(); + + BEAST_EXPECT(env.balance(A2, USD) == USD(10)); + } + } + + void + testCreateBuyOffer_UnauthorizedBuyer(FeatureBitset features) + { + testcase("Unauthorized buyer tries to create buy offer"); + using namespace test::jtx; + + Env env(*this, features); + Account G1{"G1"}; + Account A1{"A1"}; + Account A2{"A2"}; + auto const USD{G1["USD"]}; + + env.fund(XRP(10000), G1, A1, A2); + env(fset(G1, asfRequireAuth)); + env.close(); + + auto const [nftID, _] = mintAndOfferNFT(env, A2, drops(1)); + + // test: check that buyer can't make an offer if they're not authorized. + env(token::createOffer(A1, nftID, USD(10)), + token::owner(A2), + ter(tecUNFUNDED_OFFER)); + env.close(); + + // Artificially create an unauthorized trustline with balance. Don't + // close ledger before running the actual tests against this trustline. + // After ledger is closed, the trustline will not exist. + auto const unauthTrustline = [&](OpenView& view, + beast::Journal) -> bool { + auto const sleA1 = + std::make_shared(keylet::line(A1, G1, G1["USD"].currency)); + sleA1->setFieldAmount(sfBalance, A1["USD"](-1000)); + view.rawInsert(sleA1); + return true; + }; + env.app().openLedger().modify(unauthTrustline); + + if (features[fixEnforceNFTokenTrustlineV2]) + { + // test: check that buyer can't make an offer even with balance + env(token::createOffer(A1, nftID, USD(10)), + token::owner(A2), + ter(tecNO_AUTH)); + } + else + { + // old behavior: can create an offer if balance allows, regardless + // ot authorization + env(token::createOffer(A1, nftID, USD(10)), token::owner(A2)); + } + } + + void + testAcceptBuyOffer_UnauthorizedBuyer(FeatureBitset features) + { + testcase("Seller tries to accept buy offer from unauth buyer"); + using namespace test::jtx; + + Env env(*this, features); + Account G1{"G1"}; + Account A1{"A1"}; + Account A2{"A2"}; + auto const USD{G1["USD"]}; + + env.fund(XRP(10000), G1, A1, A2); + env(fset(G1, asfRequireAuth)); + env.close(); + + auto const limit = USD(10000); + + auto const [nftID, _] = mintAndOfferNFT(env, A2, drops(1)); + + // First we authorize buyer and seller so that he can create buy offer + env(trust(A1, limit)); + env(trust(G1, limit, A1, tfSetfAuth)); + env(pay(G1, A1, USD(10))); + env(trust(A2, limit)); + env(trust(G1, limit, A2, tfSetfAuth)); + env(pay(G1, A2, USD(10))); + env.close(); + + auto const buyIdx = keylet::nftoffer(A1, env.seq(A1)).key; + env(token::createOffer(A1, nftID, USD(10)), token::owner(A2)); + env.close(); + + env(pay(A1, G1, USD(10))); + env(trust(A1, USD(0))); + env(trust(G1, A1["USD"](0))); + env.close(); + + // Replace an existing authorized trustline with artificial unauthorized + // trustline with balance. Don't close ledger before running the actual + // tests against this trustline. After ledger is closed, the trustline + // will not exist. + auto const unauthTrustline = [&](OpenView& view, + beast::Journal) -> bool { + auto const sleA1 = + std::make_shared(keylet::line(A1, G1, G1["USD"].currency)); + sleA1->setFieldAmount(sfBalance, A1["USD"](-1000)); + view.rawInsert(sleA1); + return true; + }; + env.app().openLedger().modify(unauthTrustline); + if (features[fixEnforceNFTokenTrustlineV2]) + { + // test: check that offer can't be accepted even with balance + env(token::acceptBuyOffer(A2, buyIdx), ter(tecNO_AUTH)); + } + } + + void + testSellOffer_UnauthorizedSeller(FeatureBitset features) + { + testcase( + "Authorized buyer tries to accept sell offer from unauthorized " + "seller"); + using namespace test::jtx; + + Env env(*this, features); + Account G1{"G1"}; + Account A1{"A1"}; + Account A2{"A2"}; + auto const USD{G1["USD"]}; + + env.fund(XRP(10000), G1, A1, A2); + env(fset(G1, asfRequireAuth)); + env.close(); + + auto const limit = USD(10000); + + env(trust(A1, limit)); + env(trust(G1, limit, A1, tfSetfAuth)); + env(pay(G1, A1, USD(1000))); + + auto const [nftID, _] = mintAndOfferNFT(env, A2, drops(1)); + if (features[fixEnforceNFTokenTrustlineV2]) + { + // test: can't create sell offer if there is no trustline but auth + // required + env(token::createOffer(A2, nftID, USD(10)), + txflags(tfSellNFToken), + ter(tecNO_LINE)); + + env(trust(A2, limit)); + // test: can't create sell offer if not authorized to hold token + env(token::createOffer(A2, nftID, USD(10)), + txflags(tfSellNFToken), + ter(tecNO_AUTH)); + + // Authorizing trustline to make an offer creation possible + env(trust(G1, USD(0), A2, tfSetfAuth)); + env.close(); + auto const sellIdx = keylet::nftoffer(A2, env.seq(A2)).key; + env(token::createOffer(A2, nftID, USD(10)), txflags(tfSellNFToken)); + env.close(); + // + + // Reseting trustline to delete it. This allows to check if + // already existing offers handled correctly + env(trust(A2, USD(0))); + env.close(); + + // test: G1 requires authorization of A1, no trust line exists + env(token::acceptSellOffer(A1, sellIdx), ter(tecNO_LINE)); + env.close(); + + // trust line created, but not authorized + env(trust(A2, limit)); + env.close(); + + // test: G1 requires authorization of A1 + env(token::acceptSellOffer(A1, sellIdx), ter(tecNO_AUTH)); + env.close(); + } + else + { + auto const sellIdx = keylet::nftoffer(A2, env.seq(A2)).key; + + // Old behavior: sell offer can be created without authorization + env(token::createOffer(A2, nftID, USD(10)), txflags(tfSellNFToken)); + env.close(); + + // Old behavior: it is possible to sell NFT and receive IOUs + // without the authorization + env(token::acceptSellOffer(A1, sellIdx)); + env.close(); + + BEAST_EXPECT(env.balance(A2, USD) == USD(10)); + } + } + + void + testSellOffer_UnauthorizedBuyer(FeatureBitset features) + { + testcase("Unauthorized buyer tries to accept sell offer"); + using namespace test::jtx; + + Env env(*this, features); + Account G1{"G1"}; + Account A1{"A1"}; + Account A2{"A2"}; + auto const USD{G1["USD"]}; + + env.fund(XRP(10000), G1, A1, A2); + env(fset(G1, asfRequireAuth)); + env.close(); + + auto const limit = USD(10000); + + env(trust(A2, limit)); + env(trust(G1, limit, A2, tfSetfAuth)); + + auto const [_, sellIdx] = mintAndOfferNFT(env, A2, USD(10)); + + // test: check that buyer can't accept an offer if they're not + // authorized. + env(token::acceptSellOffer(A1, sellIdx), ter(tecINSUFFICIENT_FUNDS)); + env.close(); + + // Creating an artificial unauth trustline + auto const unauthTrustline = [&](OpenView& view, + beast::Journal) -> bool { + auto const sleA1 = + std::make_shared(keylet::line(A1, G1, G1["USD"].currency)); + sleA1->setFieldAmount(sfBalance, A1["USD"](-1000)); + view.rawInsert(sleA1); + return true; + }; + env.app().openLedger().modify(unauthTrustline); + if (features[fixEnforceNFTokenTrustlineV2]) + { + env(token::acceptSellOffer(A1, sellIdx), ter(tecNO_AUTH)); + } + } + + void + testBrokeredAcceptOffer_UnauthorizedBroker(FeatureBitset features) + { + testcase("Unauthorized broker bridges authorized buyer and seller."); + using namespace test::jtx; + + Env env(*this, features); + Account G1{"G1"}; + Account A1{"A1"}; + Account A2{"A2"}; + Account broker{"broker"}; + auto const USD{G1["USD"]}; + + env.fund(XRP(10000), G1, A1, A2, broker); + env(fset(G1, asfRequireAuth)); + env.close(); + + auto const limit = USD(10000); + + env(trust(A1, limit)); + env(trust(G1, limit, A1, tfSetfAuth)); + env(pay(G1, A1, USD(1000))); + env(trust(A2, limit)); + env(trust(G1, limit, A2, tfSetfAuth)); + env(pay(G1, A2, USD(1000))); + env.close(); + + auto const [nftID, sellIdx] = mintAndOfferNFT(env, A2, USD(10)); + auto const buyIdx = keylet::nftoffer(A1, env.seq(A1)).key; + env(token::createOffer(A1, nftID, USD(11)), token::owner(A2)); + env.close(); + + if (features[fixEnforceNFTokenTrustlineV2]) + { + // test: G1 requires authorization of broker, no trust line exists + env(token::brokerOffers(broker, buyIdx, sellIdx), + token::brokerFee(USD(1)), + ter(tecNO_LINE)); + env.close(); + + // trust line created, but not authorized + env(trust(broker, limit)); + env.close(); + + // test: G1 requires authorization of broker + env(token::brokerOffers(broker, buyIdx, sellIdx), + token::brokerFee(USD(1)), + ter(tecNO_AUTH)); + env.close(); + + // test: can still be brokered without broker fee. + env(token::brokerOffers(broker, buyIdx, sellIdx)); + env.close(); + } + else + { + // Old behavior: broker can receive IOUs without the authorization + env(token::brokerOffers(broker, buyIdx, sellIdx), + token::brokerFee(USD(1))); + env.close(); + + BEAST_EXPECT(env.balance(broker, USD) == USD(1)); + } + } + + void + testBrokeredAcceptOffer_UnauthorizedBuyer(FeatureBitset features) + { + testcase( + "Authorized broker tries to bridge offers from unauthorized " + "buyer."); + using namespace test::jtx; + + Env env(*this, features); + Account G1{"G1"}; + Account A1{"A1"}; + Account A2{"A2"}; + Account broker{"broker"}; + auto const USD{G1["USD"]}; + + env.fund(XRP(10000), G1, A1, A2, broker); + env(fset(G1, asfRequireAuth)); + env.close(); + + auto const limit = USD(10000); + + env(trust(A1, limit)); + env(trust(G1, USD(0), A1, tfSetfAuth)); + env(pay(G1, A1, USD(1000))); + env(trust(A2, limit)); + env(trust(G1, USD(0), A2, tfSetfAuth)); + env(pay(G1, A2, USD(1000))); + env(trust(broker, limit)); + env(trust(G1, USD(0), broker, tfSetfAuth)); + env(pay(G1, broker, USD(1000))); + env.close(); + + auto const [nftID, sellIdx] = mintAndOfferNFT(env, A2, USD(10)); + auto const buyIdx = keylet::nftoffer(A1, env.seq(A1)).key; + env(token::createOffer(A1, nftID, USD(11)), token::owner(A2)); + env.close(); + + // Resetting buyer's trust line to delete it + env(pay(A1, G1, USD(1000))); + env(trust(A1, USD(0))); + env.close(); + + auto const unauthTrustline = [&](OpenView& view, + beast::Journal) -> bool { + auto const sleA1 = + std::make_shared(keylet::line(A1, G1, G1["USD"].currency)); + sleA1->setFieldAmount(sfBalance, A1["USD"](-1000)); + view.rawInsert(sleA1); + return true; + }; + env.app().openLedger().modify(unauthTrustline); + + if (features[fixEnforceNFTokenTrustlineV2]) + { + // test: G1 requires authorization of A2 + env(token::brokerOffers(broker, buyIdx, sellIdx), + token::brokerFee(USD(1)), + ter(tecNO_AUTH)); + env.close(); + } + } + + void + testBrokeredAcceptOffer_UnauthorizedSeller(FeatureBitset features) + { + testcase( + "Authorized broker tries to bridge offers from unauthorized " + "seller."); + using namespace test::jtx; + + Env env(*this, features); + Account G1{"G1"}; + Account A1{"A1"}; + Account A2{"A2"}; + Account broker{"broker"}; + auto const USD{G1["USD"]}; + + env.fund(XRP(10000), G1, A1, A2, broker); + env(fset(G1, asfRequireAuth)); + env.close(); + + auto const limit = USD(10000); + + env(trust(A1, limit)); + env(trust(G1, limit, A1, tfSetfAuth)); + env(pay(G1, A1, USD(1000))); + env(trust(broker, limit)); + env(trust(G1, limit, broker, tfSetfAuth)); + env(pay(G1, broker, USD(1000))); + env.close(); + + // Authorizing trustline to make an offer creation possible + env(trust(G1, USD(0), A2, tfSetfAuth)); + env.close(); + + auto const [nftID, sellIdx] = mintAndOfferNFT(env, A2, USD(10)); + auto const buyIdx = keylet::nftoffer(A1, env.seq(A1)).key; + env(token::createOffer(A1, nftID, USD(11)), token::owner(A2)); + env.close(); + + // Reseting trustline to delete it. This allows to check if + // already existing offers handled correctly + env(trust(A2, USD(0))); + env.close(); + + if (features[fixEnforceNFTokenTrustlineV2]) + { + // test: G1 requires authorization of broker, no trust line exists + env(token::brokerOffers(broker, buyIdx, sellIdx), + token::brokerFee(USD(1)), + ter(tecNO_LINE)); + env.close(); + + // trust line created, but not authorized + env(trust(A2, limit)); + env.close(); + + // test: G1 requires authorization of A2 + env(token::brokerOffers(broker, buyIdx, sellIdx), + token::brokerFee(USD(1)), + ter(tecNO_AUTH)); + env.close(); + + // test: cannot be brokered even without broker fee. + env(token::brokerOffers(broker, buyIdx, sellIdx), ter(tecNO_AUTH)); + env.close(); + } + else + { + // Old behavior: broker can receive IOUs without the authorization + env(token::brokerOffers(broker, buyIdx, sellIdx), + token::brokerFee(USD(1))); + env.close(); + + BEAST_EXPECT(env.balance(A2, USD) == USD(10)); + return; + } + } + + void + testTransferFee_UnauthorizedMinter(FeatureBitset features) + { + testcase("Unauthorized minter receives transfer fee."); + using namespace test::jtx; + + Env env(*this, features); + Account G1{"G1"}; + Account minter{"minter"}; + Account A1{"A1"}; + Account A2{"A2"}; + auto const USD{G1["USD"]}; + + env.fund(XRP(10000), G1, minter, A1, A2); + env(fset(G1, asfRequireAuth)); + env.close(); + + auto const limit = USD(10000); + + env(trust(A1, limit)); + env(trust(G1, limit, A1, tfSetfAuth)); + env(pay(G1, A1, USD(1000))); + env(trust(A2, limit)); + env(trust(G1, limit, A2, tfSetfAuth)); + env(pay(G1, A2, USD(1000))); + + env(trust(minter, limit)); + env.close(); + + // We authorized A1 and A2, but not the minter. + // Now mint NFT + auto const [nftID, minterSellIdx] = + mintAndOfferNFT(env, minter, drops(1), 1); + env(token::acceptSellOffer(A1, minterSellIdx)); + + uint256 const sellIdx = keylet::nftoffer(A1, env.seq(A1)).key; + env(token::createOffer(A1, nftID, USD(100)), txflags(tfSellNFToken)); + + if (features[fixEnforceNFTokenTrustlineV2]) + { + // test: G1 requires authorization + env(token::acceptSellOffer(A2, sellIdx), ter(tecNO_AUTH)); + env.close(); + } + else + { + // Old behavior: can sell for USD. Minter can receive tokens + env(token::acceptSellOffer(A2, sellIdx)); + env.close(); + + BEAST_EXPECT(env.balance(minter, USD) == USD(0.001)); + } + } + + void + run() override + { + using namespace test::jtx; + static FeatureBitset const all{supported_amendments()}; + + static std::array const features = { + all - fixEnforceNFTokenTrustlineV2, all}; + + for (auto const feature : features) + { + testBuyOffer_UnauthorizedSeller(feature); + testCreateBuyOffer_UnauthorizedBuyer(feature); + testAcceptBuyOffer_UnauthorizedBuyer(feature); + testSellOffer_UnauthorizedSeller(feature); + testSellOffer_UnauthorizedBuyer(feature); + testBrokeredAcceptOffer_UnauthorizedBroker(feature); + testBrokeredAcceptOffer_UnauthorizedBuyer(feature); + testBrokeredAcceptOffer_UnauthorizedSeller(feature); + testTransferFee_UnauthorizedMinter(feature); + } + } +}; + +BEAST_DEFINE_TESTSUITE_PRIO(NFTokenAuth, tx, ripple, 2); + +} // namespace ripple \ No newline at end of file diff --git a/src/xrpld/app/tx/detail/NFTokenAcceptOffer.cpp b/src/xrpld/app/tx/detail/NFTokenAcceptOffer.cpp index 4c5fdb7683..ab74e5ac39 100644 --- a/src/xrpld/app/tx/detail/NFTokenAcceptOffer.cpp +++ b/src/xrpld/app/tx/detail/NFTokenAcceptOffer.cpp @@ -160,6 +160,27 @@ NFTokenAcceptOffer::preclaim(PreclaimContext const& ctx) if ((*so)[sfAmount] > (*bo)[sfAmount] - *brokerFee) return tecINSUFFICIENT_PAYMENT; + + // Check if broker is allowed to receive the fee with these IOUs. + if (!brokerFee->native() && + ctx.view.rules().enabled(fixEnforceNFTokenTrustlineV2)) + { + auto res = nft::checkTrustlineAuthorized( + ctx.view, + ctx.tx[sfAccount], + ctx.j, + brokerFee->asset().get()); + if (res != tesSUCCESS) + return res; + + res = nft::checkTrustlineDeepFrozen( + ctx.view, + ctx.tx[sfAccount], + ctx.j, + brokerFee->asset().get()); + if (res != tesSUCCESS) + return res; + } } } @@ -208,6 +229,38 @@ NFTokenAcceptOffer::preclaim(PreclaimContext const& ctx) fhZERO_IF_FROZEN, ctx.j) < needed) return tecINSUFFICIENT_FUNDS; + + // Check that the account accepting the buy offer (he's selling the NFT) + // is allowed to receive IOUs. Also check that this offer's creator is + // authorized. But we need to exclude the case when the transaction is + // created by the broker. + if (ctx.view.rules().enabled(fixEnforceNFTokenTrustlineV2) && + !needed.native()) + { + auto res = nft::checkTrustlineAuthorized( + ctx.view, bo->at(sfOwner), ctx.j, needed.asset().get()); + if (res != tesSUCCESS) + return res; + + if (!so) + { + res = nft::checkTrustlineAuthorized( + ctx.view, + ctx.tx[sfAccount], + ctx.j, + needed.asset().get()); + if (res != tesSUCCESS) + return res; + + res = nft::checkTrustlineDeepFrozen( + ctx.view, + ctx.tx[sfAccount], + ctx.j, + needed.asset().get()); + if (res != tesSUCCESS) + return res; + } + } } if (so) @@ -270,42 +323,74 @@ NFTokenAcceptOffer::preclaim(PreclaimContext const& ctx) } // Make sure that we are allowed to hold what the taker will pay us. - // This is a similar approach taken by usual offers. if (!needed.native()) { - auto const result = checkAcceptAsset( - ctx.view, - ctx.flags, - (*so)[sfOwner], - ctx.j, - needed.asset().get()); - if (result != tesSUCCESS) - return result; + if (ctx.view.rules().enabled(fixEnforceNFTokenTrustlineV2)) + { + auto res = nft::checkTrustlineAuthorized( + ctx.view, + (*so)[sfOwner], + ctx.j, + needed.asset().get()); + if (res != tesSUCCESS) + return res; + + if (!bo) + { + res = nft::checkTrustlineAuthorized( + ctx.view, + ctx.tx[sfAccount], + ctx.j, + needed.asset().get()); + if (res != tesSUCCESS) + return res; + } + } + + auto const res = nft::checkTrustlineDeepFrozen( + ctx.view, (*so)[sfOwner], ctx.j, needed.asset().get()); + if (res != tesSUCCESS) + return res; } } - // Fix a bug where the transfer of an NFToken with a transfer fee could - // give the NFToken issuer an undesired trust line. - if (ctx.view.rules().enabled(fixEnforceNFTokenTrustline)) + // Additional checks are required in case a minter set a transfer fee for + // this nftoken + auto const& offer = bo ? bo : so; + if (!offer) + // Purely defensive, should be caught in preflight. + return tecINTERNAL; + + auto const& tokenID = offer->at(sfNFTokenID); + auto const& amount = offer->at(sfAmount); + auto const nftMinter = nft::getIssuer(tokenID); + + if (nft::getTransferFee(tokenID) != 0 && !amount.native()) { - std::shared_ptr const& offer = bo ? bo : so; - if (!offer) - // Should be caught in preflight. - return tecINTERNAL; - - uint256 const& tokenID = offer->at(sfNFTokenID); - STAmount const& amount = offer->at(sfAmount); - if (nft::getTransferFee(tokenID) != 0 && + // Fix a bug where the transfer of an NFToken with a transfer fee could + // give the NFToken issuer an undesired trust line. + // Issuer doesn't need a trust line to accept their own currency. + if (ctx.view.rules().enabled(fixEnforceNFTokenTrustline) && (nft::getFlags(tokenID) & nft::flagCreateTrustLines) == 0 && - !amount.native()) + nftMinter != amount.getIssuer() && + !ctx.view.read(keylet::line(nftMinter, amount.issue()))) + return tecNO_LINE; + + // Check that the issuer is allowed to receive IOUs. + if (ctx.view.rules().enabled(fixEnforceNFTokenTrustlineV2)) { - auto const issuer = nft::getIssuer(tokenID); - // Issuer doesn't need a trust line to accept their own currency. - if (issuer != amount.getIssuer() && - !ctx.view.read(keylet::line(issuer, amount.issue()))) - return tecNO_LINE; + auto res = nft::checkTrustlineAuthorized( + ctx.view, nftMinter, ctx.j, amount.asset().get()); + if (res != tesSUCCESS) + return res; + + res = nft::checkTrustlineDeepFrozen( + ctx.view, nftMinter, ctx.j, amount.asset().get()); + if (res != tesSUCCESS) + return res; } } + return tesSUCCESS; } @@ -524,62 +609,4 @@ NFTokenAcceptOffer::doApply() return tecINTERNAL; } -TER -NFTokenAcceptOffer::checkAcceptAsset( - ReadView const& view, - ApplyFlags const flags, - AccountID const id, - beast::Journal const j, - Issue const& issue) -{ - // Only valid for custom currencies - - if (!view.rules().enabled(featureDeepFreeze)) - { - return tesSUCCESS; - } - - XRPL_ASSERT( - !isXRP(issue.currency), - "NFTokenAcceptOffer::checkAcceptAsset : valid to check."); - auto const issuerAccount = view.read(keylet::account(issue.account)); - - if (!issuerAccount) - { - JLOG(j.debug()) - << "delay: can't receive IOUs from non-existent issuer: " - << to_string(issue.account); - - return tecNO_ISSUER; - } - - // An account can not create a trustline to itself, so no line can exist - // to be frozen. Additionally, an issuer can always accept its own - // issuance. - if (issue.account == id) - { - return tesSUCCESS; - } - - auto const trustLine = - view.read(keylet::line(id, issue.account, issue.currency)); - - if (!trustLine) - { - return tesSUCCESS; - } - - // There's no difference which side enacted deep freeze, accepting - // tokens shouldn't be possible. - bool const deepFrozen = - (*trustLine)[sfFlags] & (lsfLowDeepFreeze | lsfHighDeepFreeze); - - if (deepFrozen) - { - return tecFROZEN; - } - - return tesSUCCESS; -} - } // namespace ripple diff --git a/src/xrpld/app/tx/detail/NFTokenAcceptOffer.h b/src/xrpld/app/tx/detail/NFTokenAcceptOffer.h index 6a594e2b2c..dff3febbb2 100644 --- a/src/xrpld/app/tx/detail/NFTokenAcceptOffer.h +++ b/src/xrpld/app/tx/detail/NFTokenAcceptOffer.h @@ -44,14 +44,6 @@ private: AccountID const& seller, uint256 const& nfTokenID); - static TER - checkAcceptAsset( - ReadView const& view, - ApplyFlags const flags, - AccountID const id, - beast::Journal const j, - Issue const& issue); - public: static constexpr ConsequencesFactoryType ConsequencesFactory{Normal}; diff --git a/src/xrpld/app/tx/detail/NFTokenUtils.cpp b/src/xrpld/app/tx/detail/NFTokenUtils.cpp index 9c9754aa95..4866a3b385 100644 --- a/src/xrpld/app/tx/detail/NFTokenUtils.cpp +++ b/src/xrpld/app/tx/detail/NFTokenUtils.cpp @@ -1004,6 +1004,18 @@ tokenOfferCreatePreclaim( } } + if (view.rules().enabled(fixEnforceNFTokenTrustlineV2) && !amount.native()) + { + // If this is a sell offer, check that the account is allowed to + // receive IOUs. If this is a buy offer, we have to check that trustline + // is authorized, even though we previosly checked it's balance via + // accountHolds. This is due to a possibility of existence of + // unauthorized trustlines with balance + auto const res = nft::checkTrustlineAuthorized( + view, acctID, j, amount.asset().get()); + if (res != tesSUCCESS) + return res; + } return tesSUCCESS; } @@ -1081,5 +1093,115 @@ tokenOfferCreateApply( return tesSUCCESS; } +TER +checkTrustlineAuthorized( + ReadView const& view, + AccountID const id, + beast::Journal const j, + Issue const& issue) +{ + // Only valid for custom currencies + XRPL_ASSERT( + !isXRP(issue.currency), + "ripple::nft::checkTrustlineAuthorized : valid to check."); + + if (view.rules().enabled(fixEnforceNFTokenTrustlineV2)) + { + auto const issuerAccount = view.read(keylet::account(issue.account)); + if (!issuerAccount) + { + JLOG(j.debug()) << "ripple::nft::checkTrustlineAuthorized: can't " + "receive IOUs from non-existent issuer: " + << to_string(issue.account); + + return tecNO_ISSUER; + } + + // An account can not create a trustline to itself, so no line can + // exist to be authorized. Additionally, an issuer can always accept + // its own issuance. + if (issue.account == id) + { + return tesSUCCESS; + } + + if (issuerAccount->isFlag(lsfRequireAuth)) + { + auto const trustLine = + view.read(keylet::line(id, issue.account, issue.currency)); + + if (!trustLine) + { + return tecNO_LINE; + } + + // Entries have a canonical representation, determined by a + // lexicographical "greater than" comparison employing strict + // weak ordering. Determine which entry we need to access. + if (!trustLine->isFlag( + id > issue.account ? lsfLowAuth : lsfHighAuth)) + { + return tecNO_AUTH; + } + } + } + + return tesSUCCESS; +} + +TER +checkTrustlineDeepFrozen( + ReadView const& view, + AccountID const id, + beast::Journal const j, + Issue const& issue) +{ + // Only valid for custom currencies + XRPL_ASSERT( + !isXRP(issue.currency), + "ripple::nft::checkTrustlineDeepFrozen : valid to check."); + + if (view.rules().enabled(featureDeepFreeze)) + { + auto const issuerAccount = view.read(keylet::account(issue.account)); + if (!issuerAccount) + { + JLOG(j.debug()) << "ripple::nft::checkTrustlineDeepFrozen: can't " + "receive IOUs from non-existent issuer: " + << to_string(issue.account); + + return tecNO_ISSUER; + } + + // An account can not create a trustline to itself, so no line can + // exist to be frozen. Additionally, an issuer can always accept its + // own issuance. + if (issue.account == id) + { + return tesSUCCESS; + } + + auto const trustLine = + view.read(keylet::line(id, issue.account, issue.currency)); + + if (!trustLine) + { + return tesSUCCESS; + } + + // There's no difference which side enacted deep freeze, accepting + // tokens shouldn't be possible. + bool const deepFrozen = + (*trustLine)[sfFlags] & (lsfLowDeepFreeze | lsfHighDeepFreeze); + + if (deepFrozen) + { + return tecFROZEN; + } + } + + return tesSUCCESS; +} + } // namespace nft } // namespace ripple diff --git a/src/xrpld/app/tx/detail/NFTokenUtils.h b/src/xrpld/app/tx/detail/NFTokenUtils.h index 38ced59e9c..7ee0541984 100644 --- a/src/xrpld/app/tx/detail/NFTokenUtils.h +++ b/src/xrpld/app/tx/detail/NFTokenUtils.h @@ -152,6 +152,20 @@ tokenOfferCreateApply( beast::Journal j, std::uint32_t txFlags = lsfSellNFToken); +TER +checkTrustlineAuthorized( + ReadView const& view, + AccountID const id, + beast::Journal const j, + Issue const& issue); + +TER +checkTrustlineDeepFrozen( + ReadView const& view, + AccountID const id, + beast::Journal const j, + Issue const& issue); + } // namespace nft } // namespace ripple From 053e1af7ff4886d84a278317cbdfcdab1ecd557f Mon Sep 17 00:00:00 2001 From: Denis Angell Date: Tue, 3 Jun 2025 18:51:55 +0200 Subject: [PATCH 045/244] Add support for XLS-85 Token Escrow (#5185) - Specification: https://github.com/XRPLF/XRPL-Standards/pull/272 - Amendment: `TokenEscrow` - Enables escrowing of IOU and MPT tokens in addition to native XRP. - Allows accounts to lock issued tokens (IOU/MPT) in escrow objects, with support for freeze, authorization, and transfer rates. - Adds new ledger fields (`sfLockedAmount`, `sfIssuerNode`, etc.) to track locked balances for IOU and MPT escrows. - Updates EscrowCreate, EscrowFinish, and EscrowCancel transaction logic to support IOU and MPT assets, including proper handling of trustlines and MPT authorization, transfer rates, and locked balances. - Enforces invariant checks for escrowed IOU/MPT amounts. - Extends GatewayBalances RPC to report locked (escrowed) balances. --- include/xrpl/protocol/LedgerFormats.h | 3 +- include/xrpl/protocol/STAmount.h | 6 + include/xrpl/protocol/TER.h | 1 + include/xrpl/protocol/TxFlags.h | 1 + include/xrpl/protocol/detail/features.macro | 1 + .../xrpl/protocol/detail/ledger_entries.macro | 4 + include/xrpl/protocol/detail/sfields.macro | 1 + .../xrpl/protocol/detail/transactions.macro | 2 +- include/xrpl/protocol/jss.h | 1 + src/libxrpl/protocol/STAmount.cpp | 151 + src/libxrpl/protocol/TER.cpp | 1 + src/test/app/AMM_test.cpp | 8 +- src/test/app/AccountDelete_test.cpp | 65 +- src/test/app/DepositAuth_test.cpp | 15 +- src/test/app/EscrowToken_test.cpp | 3736 +++++++++++++++++ src/test/app/Escrow_test.cpp | 972 ++--- src/test/app/MPToken_test.cpp | 9 - src/test/jtx.h | 1 + src/test/jtx/Env.h | 9 + src/test/jtx/TestHelpers.h | 121 - src/test/jtx/escrow.h | 185 + src/test/jtx/flags.h | 3 + src/test/jtx/impl/Env.cpp | 42 + src/test/jtx/impl/TestHelpers.cpp | 36 - src/test/jtx/impl/escrow.cpp | 82 + src/test/ledger/Invariants_test.cpp | 15 - src/test/protocol/STAmount_test.cpp | 368 ++ src/test/rpc/AccountLines_test.cpp | 21 +- src/test/rpc/AccountSet_test.cpp | 6 + src/xrpld/app/tx/detail/Escrow.cpp | 873 +++- src/xrpld/app/tx/detail/Escrow.h | 3 + src/xrpld/app/tx/detail/InvariantCheck.cpp | 71 +- src/xrpld/app/tx/detail/MPTokenAuthorize.cpp | 35 + src/xrpld/app/tx/detail/MPTokenAuthorize.h | 7 + .../app/tx/detail/MPTokenIssuanceDestroy.cpp | 3 + src/xrpld/app/tx/detail/SetAccount.cpp | 9 + src/xrpld/ledger/View.h | 22 + src/xrpld/ledger/detail/View.cpp | 256 ++ src/xrpld/rpc/handlers/GatewayBalances.cpp | 41 + 39 files changed, 6420 insertions(+), 766 deletions(-) create mode 100644 src/test/app/EscrowToken_test.cpp create mode 100644 src/test/jtx/escrow.h create mode 100644 src/test/jtx/impl/escrow.cpp diff --git a/include/xrpl/protocol/LedgerFormats.h b/include/xrpl/protocol/LedgerFormats.h index 58ebbe69cc..e3efe8fec2 100644 --- a/include/xrpl/protocol/LedgerFormats.h +++ b/include/xrpl/protocol/LedgerFormats.h @@ -145,7 +145,8 @@ enum LedgerSpecificFlags { 0x10000000, // True, reject new paychans lsfDisallowIncomingTrustline = 0x20000000, // True, reject new trustlines (only if no issued assets) - // 0x40000000 is available + lsfAllowTrustLineLocking = + 0x40000000, // True, enable trustline locking lsfAllowTrustLineClawback = 0x80000000, // True, enable clawback diff --git a/include/xrpl/protocol/STAmount.h b/include/xrpl/protocol/STAmount.h index c66d273254..f1e34463b6 100644 --- a/include/xrpl/protocol/STAmount.h +++ b/include/xrpl/protocol/STAmount.h @@ -703,6 +703,12 @@ isXRP(STAmount const& amount) return amount.native(); } +bool +canAdd(STAmount const& amt1, STAmount const& amt2); + +bool +canSubtract(STAmount const& amt1, STAmount const& amt2); + // Since `canonicalize` does not have access to a ledger, this is needed to put // the low-level routine stAmountCanonicalize on an amendment switch. Only // transactions need to use this switchover. Outside of a transaction it's safe diff --git a/include/xrpl/protocol/TER.h b/include/xrpl/protocol/TER.h index 4483d6251a..f71153cddb 100644 --- a/include/xrpl/protocol/TER.h +++ b/include/xrpl/protocol/TER.h @@ -360,6 +360,7 @@ enum TECcodes : TERUnderlyingType { tecWRONG_ASSET = 194, tecLIMIT_EXCEEDED = 195, tecPSEUDO_ACCOUNT = 196, + tecPRECISION_LOSS = 197, }; //------------------------------------------------------------------------------ diff --git a/include/xrpl/protocol/TxFlags.h b/include/xrpl/protocol/TxFlags.h index 80f6a78727..2ce7a6b6a8 100644 --- a/include/xrpl/protocol/TxFlags.h +++ b/include/xrpl/protocol/TxFlags.h @@ -92,6 +92,7 @@ constexpr std::uint32_t asfDisallowIncomingCheck = 13; constexpr std::uint32_t asfDisallowIncomingPayChan = 14; constexpr std::uint32_t asfDisallowIncomingTrustline = 15; constexpr std::uint32_t asfAllowTrustLineClawback = 16; +constexpr std::uint32_t asfAllowTrustLineLocking = 17; // OfferCreate flags: constexpr std::uint32_t tfPassive = 0x00010000; diff --git a/include/xrpl/protocol/detail/features.macro b/include/xrpl/protocol/detail/features.macro index df4af23e96..1be0af5d01 100644 --- a/include/xrpl/protocol/detail/features.macro +++ b/include/xrpl/protocol/detail/features.macro @@ -32,6 +32,7 @@ // If you add an amendment here, then do not forget to increment `numFeatures` // in include/xrpl/protocol/Feature.h. +XRPL_FEATURE(TokenEscrow, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (EnforceNFTokenTrustlineV2, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (AMMv1_3, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(PermissionedDEX, Supported::yes, VoteBehavior::DefaultNo) diff --git a/include/xrpl/protocol/detail/ledger_entries.macro b/include/xrpl/protocol/detail/ledger_entries.macro index 06fe9d45bd..46c6e60db3 100644 --- a/include/xrpl/protocol/detail/ledger_entries.macro +++ b/include/xrpl/protocol/detail/ledger_entries.macro @@ -354,6 +354,8 @@ LEDGER_ENTRY(ltESCROW, 0x0075, Escrow, escrow, ({ {sfPreviousTxnID, soeREQUIRED}, {sfPreviousTxnLgrSeq, soeREQUIRED}, {sfDestinationNode, soeOPTIONAL}, + {sfTransferRate, soeOPTIONAL}, + {sfIssuerNode, soeOPTIONAL}, })) /** A ledger object describing a single unidirectional XRP payment channel. @@ -405,6 +407,7 @@ LEDGER_ENTRY(ltMPTOKEN_ISSUANCE, 0x007e, MPTokenIssuance, mpt_issuance, ({ {sfAssetScale, soeDEFAULT}, {sfMaximumAmount, soeOPTIONAL}, {sfOutstandingAmount, soeREQUIRED}, + {sfLockedAmount, soeOPTIONAL}, {sfMPTokenMetadata, soeOPTIONAL}, {sfPreviousTxnID, soeREQUIRED}, {sfPreviousTxnLgrSeq, soeREQUIRED}, @@ -418,6 +421,7 @@ LEDGER_ENTRY(ltMPTOKEN, 0x007f, MPToken, mptoken, ({ {sfAccount, soeREQUIRED}, {sfMPTokenIssuanceID, soeREQUIRED}, {sfMPTAmount, soeDEFAULT}, + {sfLockedAmount, soeOPTIONAL}, {sfOwnerNode, soeREQUIRED}, {sfPreviousTxnID, soeREQUIRED}, {sfPreviousTxnLgrSeq, soeREQUIRED}, diff --git a/include/xrpl/protocol/detail/sfields.macro b/include/xrpl/protocol/detail/sfields.macro index 2053ac94bb..537fcae479 100644 --- a/include/xrpl/protocol/detail/sfields.macro +++ b/include/xrpl/protocol/detail/sfields.macro @@ -144,6 +144,7 @@ TYPED_SFIELD(sfOutstandingAmount, UINT64, 25, SField::sMD_BaseTen|SFie TYPED_SFIELD(sfMPTAmount, UINT64, 26, SField::sMD_BaseTen|SField::sMD_Default) TYPED_SFIELD(sfIssuerNode, UINT64, 27) TYPED_SFIELD(sfSubjectNode, UINT64, 28) +TYPED_SFIELD(sfLockedAmount, UINT64, 29, SField::sMD_BaseTen|SField::sMD_Default) // 128-bit TYPED_SFIELD(sfEmailHash, UINT128, 1) diff --git a/include/xrpl/protocol/detail/transactions.macro b/include/xrpl/protocol/detail/transactions.macro index 6992410e4c..1d59e71850 100644 --- a/include/xrpl/protocol/detail/transactions.macro +++ b/include/xrpl/protocol/detail/transactions.macro @@ -44,7 +44,7 @@ TRANSACTION(ttPAYMENT, 0, Payment, Delegation::delegatable, ({ /** This transaction type creates an escrow object. */ TRANSACTION(ttESCROW_CREATE, 1, EscrowCreate, Delegation::delegatable, ({ {sfDestination, soeREQUIRED}, - {sfAmount, soeREQUIRED}, + {sfAmount, soeREQUIRED, soeMPTSupported}, {sfCondition, soeOPTIONAL}, {sfCancelAfter, soeOPTIONAL}, {sfFinishAfter, soeOPTIONAL}, diff --git a/include/xrpl/protocol/jss.h b/include/xrpl/protocol/jss.h index 9dff4cc4f3..67a045fa58 100644 --- a/include/xrpl/protocol/jss.h +++ b/include/xrpl/protocol/jss.h @@ -391,6 +391,7 @@ JSS(load_fee); // out: LoadFeeTrackImp, NetworkOPs JSS(local); // out: resource/Logic.h JSS(local_txs); // out: GetCounts JSS(local_static_keys); // out: ValidatorList +JSS(locked); // out: GatewayBalances JSS(low); // out: BookChanges JSS(lowest_sequence); // out: AccountInfo JSS(lowest_ticket); // out: AccountInfo diff --git a/src/libxrpl/protocol/STAmount.cpp b/src/libxrpl/protocol/STAmount.cpp index 02de5d4c58..845ad6481a 100644 --- a/src/libxrpl/protocol/STAmount.cpp +++ b/src/libxrpl/protocol/STAmount.cpp @@ -506,6 +506,157 @@ getRate(STAmount const& offerOut, STAmount const& offerIn) return 0; } +/** + * @brief Safely checks if two STAmount values can be added without overflow, + * underflow, or precision loss. + * + * This function determines whether the addition of two STAmount objects is + * safe, depending on their type: + * - For XRP amounts, it checks for integer overflow and underflow. + * - For IOU amounts, it checks for acceptable precision loss. + * - For MPT amounts, it checks for overflow and underflow within 63-bit signed + * integer limits. + * - If either amount is zero, addition is always considered safe. + * - If the amounts are of different currencies or types, addition is not + * allowed. + * + * @param a The first STAmount to add. + * @param b The second STAmount to add. + * @return true if the addition is safe; false otherwise. + */ +bool +canAdd(STAmount const& a, STAmount const& b) +{ + // cannot add different currencies + if (!areComparable(a, b)) + return false; + + // special case: adding anything to zero is always fine + if (a == beast::zero || b == beast::zero) + return true; + + // XRP case (overflow & underflow check) + if (isXRP(a) && isXRP(b)) + { + XRPAmount A = a.xrp(); + XRPAmount B = b.xrp(); + + if ((B > XRPAmount{0} && + A > XRPAmount{std::numeric_limits::max()} - + B) || + (B < XRPAmount{0} && + A < XRPAmount{std::numeric_limits::min()} - + B)) + { + return false; + } + return true; + } + + // IOU case (precision check) + if (a.holds() && b.holds()) + { + static STAmount const one{IOUAmount{1, 0}, noIssue()}; + static STAmount const maxLoss{IOUAmount{1, -4}, noIssue()}; + STAmount lhs = divide((a - b) + b, a, noIssue()) - one; + STAmount rhs = divide((b - a) + a, b, noIssue()) - one; + return ((rhs.negative() ? -rhs : rhs) + + (lhs.negative() ? -lhs : lhs)) <= maxLoss; + } + + // MPT (overflow & underflow check) + if (a.holds() && b.holds()) + { + MPTAmount A = a.mpt(); + MPTAmount B = b.mpt(); + if ((B > MPTAmount{0} && + A > MPTAmount{std::numeric_limits::max()} - + B) || + (B < MPTAmount{0} && + A < MPTAmount{std::numeric_limits::min()} - + B)) + { + return false; + } + + return true; + } + return false; +} + +/** + * @brief Determines if it is safe to subtract one STAmount from another. + * + * This function checks whether subtracting amount `b` from amount `a` is valid, + * considering currency compatibility and underflow conditions for specific + * types. + * + * - Subtracting zero is always allowed. + * - Subtraction is only allowed between comparable currencies. + * - For XRP amounts, ensures no underflow or overflow occurs. + * - For IOU amounts, subtraction is always allowed (no underflow). + * - For MPT amounts, ensures no underflow or overflow occurs. + * + * @param a The minuend (amount to subtract from). + * @param b The subtrahend (amount to subtract). + * @return true if subtraction is allowed, false otherwise. + */ +bool +canSubtract(STAmount const& a, STAmount const& b) +{ + // Cannot subtract different currencies + if (!areComparable(a, b)) + return false; + + // Special case: subtracting zero is always fine + if (b == beast::zero) + return true; + + // XRP case (underflow & overflow check) + if (isXRP(a) && isXRP(b)) + { + XRPAmount A = a.xrp(); + XRPAmount B = b.xrp(); + // Check for underflow + if (B > XRPAmount{0} && A < B) + return false; + + // Check for overflow + if (B < XRPAmount{0} && + A > XRPAmount{std::numeric_limits::max()} + + B) + return false; + + return true; + } + + // IOU case (no underflow) + if (a.holds() && b.holds()) + { + return true; + } + + // MPT case (underflow & overflow check) + if (a.holds() && b.holds()) + { + MPTAmount A = a.mpt(); + MPTAmount B = b.mpt(); + + // Underflow check + if (B > MPTAmount{0} && A < B) + return false; + + // Overflow check + if (B < MPTAmount{0} && + A > MPTAmount{std::numeric_limits::max()} + + B) + return false; + return true; + } + + return false; +} + void STAmount::setJson(Json::Value& elem) const { diff --git a/src/libxrpl/protocol/TER.cpp b/src/libxrpl/protocol/TER.cpp index 68125fab83..18bf0e2936 100644 --- a/src/libxrpl/protocol/TER.cpp +++ b/src/libxrpl/protocol/TER.cpp @@ -126,6 +126,7 @@ transResults() MAKE_ERROR(tecWRONG_ASSET, "Wrong asset given."), MAKE_ERROR(tecLIMIT_EXCEEDED, "Limit exceeded."), MAKE_ERROR(tecPSEUDO_ACCOUNT, "This operation is not allowed against a pseudo-account."), + MAKE_ERROR(tecPRECISION_LOSS, "The amounts used by the transaction cannot interact."), MAKE_ERROR(tefALREADY, "The exact transaction was already in this ledger."), MAKE_ERROR(tefBAD_ADD_AUTH, "Not authorized to add account."), diff --git a/src/test/app/AMM_test.cpp b/src/test/app/AMM_test.cpp index 7211d37730..2ee9e5f1f3 100644 --- a/src/test/app/AMM_test.cpp +++ b/src/test/app/AMM_test.cpp @@ -3651,10 +3651,10 @@ private: // Can't pay into AMM with escrow. testAMM([&](AMM& ammAlice, Env& env) { auto const baseFee = env.current()->fees().base; - env(escrow(carol, ammAlice.ammAccount(), XRP(1)), - condition(cb1), - finish_time(env.now() + 1s), - cancel_time(env.now() + 2s), + env(escrow::create(carol, ammAlice.ammAccount(), XRP(1)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + escrow::cancel_time(env.now() + 2s), fee(baseFee * 150), ter(tecNO_PERMISSION)); }); diff --git a/src/test/app/AccountDelete_test.cpp b/src/test/app/AccountDelete_test.cpp index 03283e4611..1ac0256dcb 100644 --- a/src/test/app/AccountDelete_test.cpp +++ b/src/test/app/AccountDelete_test.cpp @@ -335,26 +335,11 @@ public: env(check::cancel(becky, checkId)); env.close(); - // Lambda to create an escrow. - auto escrowCreate = [](jtx::Account const& account, - jtx::Account const& to, - STAmount const& amount, - NetClock::time_point const& cancelAfter) { - Json::Value jv; - jv[jss::TransactionType] = jss::EscrowCreate; - jv[jss::Account] = account.human(); - jv[jss::Destination] = to.human(); - jv[jss::Amount] = amount.getJson(JsonOptions::none); - jv[sfFinishAfter.jsonName] = - cancelAfter.time_since_epoch().count() + 1; - jv[sfCancelAfter.jsonName] = - cancelAfter.time_since_epoch().count() + 2; - return jv; - }; - using namespace std::chrono_literals; std::uint32_t const escrowSeq{env.seq(alice)}; - env(escrowCreate(alice, becky, XRP(333), env.now() + 2s)); + env(escrow::create(alice, becky, XRP(333)), + escrow::finish_time(env.now() + 3s), + escrow::cancel_time(env.now() + 4s)); env.close(); // alice and becky should be unable to delete their accounts because @@ -366,17 +351,39 @@ public: // Now cancel the escrow, but create a payment channel between // alice and becky. - // Lambda to cancel an escrow. - auto escrowCancel = - [](Account const& account, Account const& from, std::uint32_t seq) { - Json::Value jv; - jv[jss::TransactionType] = jss::EscrowCancel; - jv[jss::Account] = account.human(); - jv[sfOwner.jsonName] = from.human(); - jv[sfOfferSequence.jsonName] = seq; - return jv; - }; - env(escrowCancel(becky, alice, escrowSeq)); + bool const withTokenEscrow = + env.current()->rules().enabled(featureTokenEscrow); + if (withTokenEscrow) + { + Account const gw1("gw1"); + Account const carol("carol"); + auto const USD = gw1["USD"]; + env.fund(XRP(100000), carol, gw1); + env(fset(gw1, asfAllowTrustLineLocking)); + env.close(); + env.trust(USD(10000), carol); + env.close(); + env(pay(gw1, carol, USD(100))); + env.close(); + + std::uint32_t const escrowSeq{env.seq(carol)}; + env(escrow::create(carol, becky, USD(1)), + escrow::finish_time(env.now() + 3s), + escrow::cancel_time(env.now() + 4s)); + env.close(); + + incLgrSeqForAccDel(env, gw1); + + env(acctdelete(gw1, becky), + fee(acctDelFee), + ter(tecHAS_OBLIGATIONS)); + env.close(); + + env(escrow::cancel(becky, carol, escrowSeq)); + env.close(); + } + + env(escrow::cancel(becky, alice, escrowSeq)); env.close(); Keylet const alicePayChanKey{ diff --git a/src/test/app/DepositAuth_test.cpp b/src/test/app/DepositAuth_test.cpp index c8dc3c00eb..6f314e3a79 100644 --- a/src/test/app/DepositAuth_test.cpp +++ b/src/test/app/DepositAuth_test.cpp @@ -714,12 +714,12 @@ struct DepositPreauth_test : public beast::unit_test::suite if (!supportsPreauth) { auto const seq1 = env.seq(alice); - env(escrow(alice, becky, XRP(100)), - finish_time(env.now() + 1s)); + env(escrow::create(alice, becky, XRP(100)), + escrow::finish_time(env.now() + 1s)); env.close(); // Failed as rule is disabled - env(finish(gw, alice, seq1), + env(escrow::finish(gw, alice, seq1), fee(1500), ter(tecNO_PERMISSION)); env.close(); @@ -1387,12 +1387,13 @@ struct DepositPreauth_test : public beast::unit_test::suite env.close(); auto const seq = env.seq(alice); - env(escrow(alice, bob, XRP(1000)), finish_time(env.now() + 1s)); + env(escrow::create(alice, bob, XRP(1000)), + escrow::finish_time(env.now() + 1s)); env.close(); // zelda can't finish escrow with invalid credentials { - env(finish(zelda, alice, seq), + env(escrow::finish(zelda, alice, seq), credentials::ids({}), ter(temMALFORMED)); env.close(); @@ -1404,14 +1405,14 @@ struct DepositPreauth_test : public beast::unit_test::suite "0E0B04ED60588A758B67E21FBBE95AC5A63598BA951761DC0EC9C08D7E" "01E034"; - env(finish(zelda, alice, seq), + env(escrow::finish(zelda, alice, seq), credentials::ids({invalidIdx}), ter(tecBAD_CREDENTIALS)); env.close(); } { // Ledger closed, time increased, zelda can't finish escrow - env(finish(zelda, alice, seq), + env(escrow::finish(zelda, alice, seq), credentials::ids({credIdx}), fee(1500), ter(tecEXPIRED)); diff --git a/src/test/app/EscrowToken_test.cpp b/src/test/app/EscrowToken_test.cpp new file mode 100644 index 0000000000..da9610f0c3 --- /dev/null +++ b/src/test/app/EscrowToken_test.cpp @@ -0,0 +1,3736 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2024 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include +#include + +#include +#include +#include +#include + +#include +#include + +namespace ripple { +namespace test { + +struct EscrowToken_test : public beast::unit_test::suite +{ + static uint64_t + mptEscrowed( + jtx::Env const& env, + jtx::Account const& account, + jtx::MPT const& mpt) + { + auto const sle = env.le(keylet::mptoken(mpt.mpt(), account)); + if (sle && sle->isFieldPresent(sfLockedAmount)) + return (*sle)[sfLockedAmount]; + return 0; + } + + static uint64_t + issuerMPTEscrowed(jtx::Env const& env, jtx::MPT const& mpt) + { + auto const sle = env.le(keylet::mptIssuance(mpt.mpt())); + if (sle && sle->isFieldPresent(sfLockedAmount)) + return (*sle)[sfLockedAmount]; + return 0; + } + + void + issuerIOUEscrowed( + jtx::Env& env, + jtx::Account const& account, + Currency const& currency, + int const& outstanding, + int const& locked) + { + Json::Value params; + params[jss::account] = account.human(); + auto jrr = env.rpc("json", "gateway_balances", to_string(params)); + auto const result = jrr[jss::result]; + auto const actualOutstanding = + result[jss::obligations][to_string(currency)]; + BEAST_EXPECT(actualOutstanding == to_string(outstanding)); + if (locked != 0) + { + auto const actualEscrowed = + result[jss::locked][to_string(currency)]; + BEAST_EXPECT(actualEscrowed == to_string(locked)); + } + } + + void + testIOUEnablement(FeatureBitset features) + { + testcase("IOU Enablement"); + + using namespace jtx; + using namespace std::chrono; + + for (bool const withTokenEscrow : {false, true}) + { + auto const amend = + withTokenEscrow ? features : features - featureTokenEscrow; + Env env{*this, amend}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + env.fund(XRP(5000), alice, bob, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env.close(); + env.trust(USD(10'000), alice, bob); + env.close(); + env(pay(gw, alice, USD(5000))); + env(pay(gw, bob, USD(5000))); + env.close(); + + auto const createResult = + withTokenEscrow ? ter(tesSUCCESS) : ter(temBAD_AMOUNT); + auto const finishResult = + withTokenEscrow ? ter(tesSUCCESS) : ter(tecNO_TARGET); + + auto const seq1 = env.seq(alice); + env(escrow::create(alice, bob, USD(1'000)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + createResult); + env.close(); + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150), + finishResult); + env.close(); + + auto const seq2 = env.seq(alice); + env(escrow::create(alice, bob, USD(1'000)), + escrow::condition(escrow::cb2), + escrow::finish_time(env.now() + 1s), + escrow::cancel_time(env.now() + 2s), + fee(baseFee * 150), + createResult); + env.close(); + env(escrow::cancel(bob, alice, seq2), finishResult); + env.close(); + } + } + + void + testIOUAllowLockingFlag(FeatureBitset features) + { + testcase("IOU Allow Locking Flag"); + + using namespace jtx; + using namespace std::chrono; + + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + env.fund(XRP(5000), alice, bob, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env.close(); + env.trust(USD(10'000), alice, bob); + env.close(); + env(pay(gw, alice, USD(5000))); + env(pay(gw, bob, USD(5000))); + env.close(); + + // Create Escrow #1 & #2 + auto const seq1 = env.seq(alice); + env(escrow::create(alice, bob, USD(1'000)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + auto const seq2 = env.seq(alice); + env(escrow::create(alice, bob, USD(1'000)), + escrow::finish_time(env.now() + 1s), + escrow::cancel_time(env.now() + 3s), + fee(baseFee), + ter(tesSUCCESS)); + env.close(); + + // Clear the asfAllowTrustLineLocking flag + env(fclear(gw, asfAllowTrustLineLocking)); + env.close(); + env.require(nflags(gw, asfAllowTrustLineLocking)); + + // Cannot Create Escrow without asfAllowTrustLineLocking + env(escrow::create(alice, bob, USD(1'000)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecNO_PERMISSION)); + env.close(); + + // Can finish the escrow created before the flag was cleared + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + // Can cancel the escrow created before the flag was cleared + env(escrow::cancel(bob, alice, seq2), ter(tesSUCCESS)); + env.close(); + } + + void + testIOUCreatePreflight(FeatureBitset features) + { + testcase("IOU Create Preflight"); + using namespace test::jtx; + using namespace std::literals; + + // temBAD_FEE: Exercises invalid preflight1. + { + Env env{*this, features}; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + env.fund(XRP(5000), alice, bob, gw); + + env(escrow::create(alice, bob, USD(1)), + escrow::finish_time(env.now() + 1s), + fee(XRP(-1)), + ter(temBAD_FEE)); + env.close(); + } + + // temBAD_AMOUNT: amount <= 0 + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + env.fund(XRP(5000), alice, bob, gw); + + env(escrow::create(alice, bob, USD(-1)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(temBAD_AMOUNT)); + env.close(); + } + + // temBAD_CURRENCY: badCurrency() == amount.getCurrency() + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account{"gateway"}; + auto const BAD = IOU(gw, badCurrency()); + env.fund(XRP(5000), alice, bob, gw); + + env(escrow::create(alice, bob, BAD(1)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(temBAD_CURRENCY)); + env.close(); + } + } + + void + testIOUCreatePreclaim(FeatureBitset features) + { + testcase("IOU Create Preclaim"); + using namespace test::jtx; + using namespace std::literals; + + // tecNO_PERMISSION: issuer is the same as the account + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + env.fund(XRP(5000), alice, bob, gw); + + env(escrow::create(gw, alice, USD(1)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecNO_PERMISSION)); + env.close(); + } + + // tecNO_ISSUER: Issuer does not exist + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + env.fund(XRP(5000), alice, bob); + env.close(); + env.memoize(gw); + + env(escrow::create(alice, bob, USD(1)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecNO_ISSUER)); + env.close(); + } + + // tecNO_PERMISSION: asfAllowTrustLineLocking is not set + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + env.fund(XRP(5000), alice, bob, gw); + env.close(); + env.trust(USD(10'000), alice, bob); + env.close(); + env(pay(gw, alice, USD(5000))); + env(pay(gw, bob, USD(5000))); + env.close(); + + env(escrow::create(gw, alice, USD(1)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecNO_PERMISSION)); + env.close(); + } + + // tecNO_LINE: account does not have a trustline to the issuer + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + env.fund(XRP(5000), alice, bob, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env.close(); + env(escrow::create(alice, bob, USD(1)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecNO_LINE)); + env.close(); + } + + // tecNO_PERMISSION: Not testable + // tecNO_PERMISSION: Not testable + // tecNO_AUTH: requireAuth + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + env.fund(XRP(5000), alice, bob, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env(fset(gw, asfRequireAuth)); + env.close(); + env.trust(USD(10'000), alice, bob); + env.close(); + + env(escrow::create(alice, bob, USD(1)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecNO_AUTH)); + env.close(); + } + + // tecNO_AUTH: requireAuth + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + auto const aliceUSD = alice["USD"]; + env.fund(XRP(5000), alice, bob, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env(fset(gw, asfRequireAuth)); + env.close(); + env(trust(gw, aliceUSD(10'000)), txflags(tfSetfAuth)); + env.trust(USD(10'000), alice, bob); + env.close(); + + env(escrow::create(alice, bob, USD(1)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecNO_AUTH)); + env.close(); + } + + // tecFROZEN: account is frozen + { + // Env Setup + Env env{*this, features}; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + auto const baseFee = env.current()->fees().base; + env.fund(XRP(10'000), alice, bob, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env.close(); + env(trust(alice, USD(100'000))); + env(trust(bob, USD(100'000))); + env.close(); + env(pay(gw, alice, USD(10'000))); + env(pay(gw, bob, USD(10'000))); + env.close(); + + // set freeze on alice trustline + env(trust(gw, USD(10'000), alice, tfSetFreeze)); + env.close(); + + env(escrow::create(alice, bob, USD(1)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecFROZEN)); + env.close(); + } + + // tecFROZEN: dest is frozen + { + // Env Setup + Env env{*this, features}; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + auto const baseFee = env.current()->fees().base; + env.fund(XRP(10'000), alice, bob, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env.close(); + env(trust(alice, USD(100'000))); + env(trust(bob, USD(100'000))); + env.close(); + env(pay(gw, alice, USD(10'000))); + env(pay(gw, bob, USD(10'000))); + env.close(); + + // set freeze on bob trustline + env(trust(gw, USD(10'000), bob, tfSetFreeze)); + env.close(); + + env(escrow::create(alice, bob, USD(1)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecFROZEN)); + env.close(); + } + + // tecINSUFFICIENT_FUNDS + { + // Env Setup + Env env{*this, features}; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + auto const baseFee = env.current()->fees().base; + env.fund(XRP(10'000), alice, bob, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env.close(); + env(trust(alice, USD(100'000))); + env(trust(bob, USD(100'000))); + env.close(); + + env(escrow::create(alice, bob, USD(1)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecINSUFFICIENT_FUNDS)); + env.close(); + } + + // tecINSUFFICIENT_FUNDS + { + // Env Setup + Env env{*this, features}; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + auto const baseFee = env.current()->fees().base; + env.fund(XRP(10'000), alice, bob, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env.close(); + env(trust(alice, USD(100'000))); + env(trust(bob, USD(100'000))); + env.close(); + env(pay(gw, alice, USD(10'000))); + env(pay(gw, bob, USD(10'000))); + env.close(); + + env(escrow::create(alice, bob, USD(10'001)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecINSUFFICIENT_FUNDS)); + env.close(); + } + + // tecPRECISION_LOSS + { + Env env{*this, features}; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + auto const baseFee = env.current()->fees().base; + env.fund(XRP(10'000), alice, bob, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env.close(); + env.trust(USD(100000000000000000), alice); + env.trust(USD(100000000000000000), bob); + env.close(); + env(pay(gw, alice, USD(10000000000000000))); + env(pay(gw, bob, USD(1))); + env.close(); + + // alice cannot create escrow for 1/10 iou - precision loss + env(escrow::create(alice, bob, USD(1)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecPRECISION_LOSS)); + env.close(); + } + } + + void + testIOUFinishPreclaim(FeatureBitset features) + { + testcase("IOU Finish Preclaim"); + using namespace test::jtx; + using namespace std::literals; + + // tecNO_AUTH: requireAuth set: dest not authorized + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + auto const aliceUSD = alice["USD"]; + auto const bobUSD = bob["USD"]; + env.fund(XRP(5000), alice, bob, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env(fset(gw, asfRequireAuth)); + env.close(); + env(trust(gw, aliceUSD(10'000)), txflags(tfSetfAuth)); + env(trust(gw, bobUSD(10'000)), txflags(tfSetfAuth)); + env.trust(USD(10'000), alice, bob); + env.close(); + env(pay(gw, alice, USD(10'000))); + env(pay(gw, bob, USD(10'000))); + env.close(); + + auto const seq1 = env.seq(alice); + env(escrow::create(alice, bob, USD(1)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + env(pay(bob, gw, USD(10'000))); + env(trust(gw, bobUSD(0)), txflags(tfSetfAuth)); + env(trust(bob, USD(0))); + env.close(); + + env.trust(USD(10'000), bob); + env.close(); + + // bob cannot finish because he is not authorized + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150), + ter(tecNO_AUTH)); + env.close(); + } + + // tecFROZEN: issuer has deep frozen the dest + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + env.fund(XRP(5000), alice, bob, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env.close(); + env.trust(USD(10'000), alice, bob); + env.close(); + env(pay(gw, alice, USD(10'000))); + env(pay(gw, bob, USD(10'000))); + env.close(); + + auto const seq1 = env.seq(alice); + env(escrow::create(alice, bob, USD(1)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + // set freeze on bob trustline + env(trust(gw, USD(10'000), bob, tfSetFreeze | tfSetDeepFreeze)); + + // bob cannot finish because of deep freeze + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150), + ter(tecFROZEN)); + env.close(); + } + } + + void + testIOUFinishDoApply(FeatureBitset features) + { + testcase("IOU Finish Do Apply"); + using namespace test::jtx; + using namespace std::literals; + + // tecNO_LINE_INSUF_RESERVE: insufficient reserve to create line + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const acctReserve = env.current()->fees().accountReserve(0); + auto const incReserve = env.current()->fees().increment; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + env.fund(XRP(5000), alice, gw); + env.fund(acctReserve + (incReserve - 1), bob); + env.close(); + env(fset(gw, asfAllowTrustLineLocking)); + env.close(); + env.trust(USD(10'000), alice); + env.close(); + env(pay(gw, alice, USD(10'000))); + env.close(); + + auto const seq1 = env.seq(alice); + env(escrow::create(alice, bob, USD(1)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + // bob cannot finish because insufficient reserve to create line + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150), + ter(tecNO_LINE_INSUF_RESERVE)); + env.close(); + } + + // tecNO_LINE: alice submits; finish IOU not created + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + env.fund(XRP(5000), alice, bob, gw); + env.close(); + env(fset(gw, asfAllowTrustLineLocking)); + env.close(); + env.trust(USD(10'000), alice); + env.close(); + env(pay(gw, alice, USD(10'000))); + env.close(); + + auto const seq1 = env.seq(alice); + env(escrow::create(alice, bob, USD(1)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + // alice cannot finish because bob does not have a trustline + env(escrow::finish(alice, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150), + ter(tecNO_LINE)); + env.close(); + } + + // tecLIMIT_EXCEEDED: alice submits; IOU Limit < balance + amount + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + env.fund(XRP(5000), alice, bob, gw); + env.close(); + env(fset(gw, asfAllowTrustLineLocking)); + env.close(); + env.trust(USD(1000), alice, bob); + env.close(); + env(pay(gw, alice, USD(1000))); + env.close(); + + auto const seq1 = env.seq(alice); + env(escrow::create(alice, bob, USD(5)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + env.trust(USD(1), bob); + env.close(); + + // alice cannot finish because bobs limit is too low + env(escrow::finish(alice, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150), + ter(tecLIMIT_EXCEEDED)); + env.close(); + } + + // tesSUCCESS: bob submits; IOU Limit < balance + amount + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + env.fund(XRP(5000), alice, bob, gw); + env.close(); + env(fset(gw, asfAllowTrustLineLocking)); + env.close(); + env.trust(USD(1000), alice, bob); + env.close(); + env(pay(gw, alice, USD(1000))); + env.close(); + + auto const seq1 = env.seq(alice); + env(escrow::create(alice, bob, USD(5)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + env.trust(USD(1), bob); + env.close(); + + // bob can finish even if bobs limit is too low + auto const bobPreLimit = env.limit(bob, USD); + + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + // bobs limit is not changed + BEAST_EXPECT(env.limit(bob, USD) == bobPreLimit); + } + } + + void + testIOUCancelPreclaim(FeatureBitset features) + { + testcase("IOU Cancel Preclaim"); + using namespace test::jtx; + using namespace std::literals; + + // tecNO_AUTH: requireAuth set: account not authorized + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + auto const aliceUSD = alice["USD"]; + auto const bobUSD = bob["USD"]; + env.fund(XRP(5000), alice, bob, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env(fset(gw, asfRequireAuth)); + env.close(); + env(trust(gw, aliceUSD(10'000)), txflags(tfSetfAuth)); + env(trust(gw, bobUSD(10'000)), txflags(tfSetfAuth)); + env.trust(USD(10'000), alice, bob); + env.close(); + env(pay(gw, alice, USD(10'000))); + env(pay(gw, bob, USD(10'000))); + env.close(); + + auto const seq1 = env.seq(alice); + env(escrow::create(alice, bob, USD(1)), + escrow::finish_time(env.now() + 1s), + escrow::cancel_time(env.now() + 2s), + fee(baseFee), + ter(tesSUCCESS)); + env.close(); + + env(pay(alice, gw, USD(9'999))); + env(trust(gw, aliceUSD(0)), txflags(tfSetfAuth)); + env(trust(alice, USD(0))); + env.close(); + + env.trust(USD(10'000), alice); + env.close(); + + // alice cannot cancel because she is not authorized + env(escrow::cancel(bob, alice, seq1), + fee(baseFee), + ter(tecNO_AUTH)); + env.close(); + } + } + + void + testIOUBalances(FeatureBitset features) + { + testcase("IOU Balances"); + + using namespace jtx; + using namespace std::chrono; + + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + env.fund(XRP(5000), alice, bob, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env.close(); + env.trust(USD(10'000), alice, bob); + env.close(); + env(pay(gw, alice, USD(5000))); + env(pay(gw, bob, USD(5000))); + env.close(); + + auto const seq1 = env.seq(alice); + env(escrow::create(alice, bob, USD(1'000)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + auto const seq2 = env.seq(alice); + env(escrow::create(alice, bob, USD(1'000)), + escrow::condition(escrow::cb2), + escrow::finish_time(env.now() + 1s), + escrow::cancel_time(env.now() + 2s), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + env(escrow::cancel(bob, alice, seq2), ter(tesSUCCESS)); + env.close(); + } + + void + testIOUMetaAndOwnership(FeatureBitset features) + { + using namespace jtx; + using namespace std::chrono; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const carol = Account("carol"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + { + testcase("IOU Metadata to self"); + + Env env{*this, features}; + env.fund(XRP(5000), alice, bob, carol, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env.close(); + env.trust(USD(10'000), alice, bob, carol); + env.close(); + env(pay(gw, alice, USD(5000))); + env(pay(gw, bob, USD(5000))); + env(pay(gw, carol, USD(5000))); + env.close(); + auto const aseq = env.seq(alice); + auto const bseq = env.seq(bob); + + env(escrow::create(alice, alice, USD(1'000)), + escrow::finish_time(env.now() + 1s), + escrow::cancel_time(env.now() + 500s)); + BEAST_EXPECT( + (*env.meta())[sfTransactionResult] == + static_cast(tesSUCCESS)); + env.close(5s); + auto const aa = env.le(keylet::escrow(alice.id(), aseq)); + BEAST_EXPECT(aa); + { + ripple::Dir aod(*env.current(), keylet::ownerDir(alice.id())); + BEAST_EXPECT(std::distance(aod.begin(), aod.end()) == 2); + BEAST_EXPECT( + std::find(aod.begin(), aod.end(), aa) != aod.end()); + } + + { + ripple::Dir iod(*env.current(), keylet::ownerDir(gw.id())); + BEAST_EXPECT(std::distance(iod.begin(), iod.end()) == 4); + BEAST_EXPECT( + std::find(iod.begin(), iod.end(), aa) != iod.end()); + } + + env(escrow::create(bob, bob, USD(1'000)), + escrow::finish_time(env.now() + 1s), + escrow::cancel_time(env.now() + 2s)); + BEAST_EXPECT( + (*env.meta())[sfTransactionResult] == + static_cast(tesSUCCESS)); + env.close(5s); + auto const bb = env.le(keylet::escrow(bob.id(), bseq)); + BEAST_EXPECT(bb); + + { + ripple::Dir bod(*env.current(), keylet::ownerDir(bob.id())); + BEAST_EXPECT(std::distance(bod.begin(), bod.end()) == 2); + BEAST_EXPECT( + std::find(bod.begin(), bod.end(), bb) != bod.end()); + } + + { + ripple::Dir iod(*env.current(), keylet::ownerDir(gw.id())); + BEAST_EXPECT(std::distance(iod.begin(), iod.end()) == 5); + BEAST_EXPECT( + std::find(iod.begin(), iod.end(), bb) != iod.end()); + } + + env.close(5s); + env(escrow::finish(alice, alice, aseq)); + { + BEAST_EXPECT(!env.le(keylet::escrow(alice.id(), aseq))); + BEAST_EXPECT( + (*env.meta())[sfTransactionResult] == + static_cast(tesSUCCESS)); + + ripple::Dir aod(*env.current(), keylet::ownerDir(alice.id())); + BEAST_EXPECT(std::distance(aod.begin(), aod.end()) == 1); + BEAST_EXPECT( + std::find(aod.begin(), aod.end(), aa) == aod.end()); + + ripple::Dir bod(*env.current(), keylet::ownerDir(bob.id())); + BEAST_EXPECT(std::distance(bod.begin(), bod.end()) == 2); + BEAST_EXPECT( + std::find(bod.begin(), bod.end(), bb) != bod.end()); + + ripple::Dir iod(*env.current(), keylet::ownerDir(gw.id())); + BEAST_EXPECT(std::distance(iod.begin(), iod.end()) == 4); + BEAST_EXPECT( + std::find(iod.begin(), iod.end(), bb) != iod.end()); + } + + env.close(5s); + env(escrow::cancel(bob, bob, bseq)); + { + BEAST_EXPECT(!env.le(keylet::escrow(bob.id(), bseq))); + BEAST_EXPECT( + (*env.meta())[sfTransactionResult] == + static_cast(tesSUCCESS)); + + ripple::Dir bod(*env.current(), keylet::ownerDir(bob.id())); + BEAST_EXPECT(std::distance(bod.begin(), bod.end()) == 1); + BEAST_EXPECT( + std::find(bod.begin(), bod.end(), bb) == bod.end()); + + ripple::Dir iod(*env.current(), keylet::ownerDir(gw.id())); + BEAST_EXPECT(std::distance(iod.begin(), iod.end()) == 3); + BEAST_EXPECT( + std::find(iod.begin(), iod.end(), bb) == iod.end()); + } + } + { + testcase("IOU Metadata to other"); + + Env env{*this, features}; + env.fund(XRP(5000), alice, bob, carol, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env.close(); + env.trust(USD(10'000), alice, bob, carol); + env.close(); + env(pay(gw, alice, USD(5000))); + env(pay(gw, bob, USD(5000))); + env(pay(gw, carol, USD(5000))); + env.close(); + auto const aseq = env.seq(alice); + auto const bseq = env.seq(bob); + + env(escrow::create(alice, bob, USD(1'000)), + escrow::finish_time(env.now() + 1s)); + BEAST_EXPECT( + (*env.meta())[sfTransactionResult] == + static_cast(tesSUCCESS)); + env.close(5s); + env(escrow::create(bob, carol, USD(1'000)), + escrow::finish_time(env.now() + 1s), + escrow::cancel_time(env.now() + 2s)); + BEAST_EXPECT( + (*env.meta())[sfTransactionResult] == + static_cast(tesSUCCESS)); + env.close(5s); + + auto const ab = env.le(keylet::escrow(alice.id(), aseq)); + BEAST_EXPECT(ab); + + auto const bc = env.le(keylet::escrow(bob.id(), bseq)); + BEAST_EXPECT(bc); + + { + ripple::Dir aod(*env.current(), keylet::ownerDir(alice.id())); + BEAST_EXPECT(std::distance(aod.begin(), aod.end()) == 2); + BEAST_EXPECT( + std::find(aod.begin(), aod.end(), ab) != aod.end()); + + ripple::Dir bod(*env.current(), keylet::ownerDir(bob.id())); + BEAST_EXPECT(std::distance(bod.begin(), bod.end()) == 3); + BEAST_EXPECT( + std::find(bod.begin(), bod.end(), ab) != bod.end()); + BEAST_EXPECT( + std::find(bod.begin(), bod.end(), bc) != bod.end()); + + ripple::Dir cod(*env.current(), keylet::ownerDir(carol.id())); + BEAST_EXPECT(std::distance(cod.begin(), cod.end()) == 2); + BEAST_EXPECT( + std::find(cod.begin(), cod.end(), bc) != cod.end()); + + ripple::Dir iod(*env.current(), keylet::ownerDir(gw.id())); + BEAST_EXPECT(std::distance(iod.begin(), iod.end()) == 5); + BEAST_EXPECT( + std::find(iod.begin(), iod.end(), ab) != iod.end()); + BEAST_EXPECT( + std::find(iod.begin(), iod.end(), bc) != iod.end()); + } + + env.close(5s); + env(escrow::finish(alice, alice, aseq)); + { + BEAST_EXPECT(!env.le(keylet::escrow(alice.id(), aseq))); + BEAST_EXPECT(env.le(keylet::escrow(bob.id(), bseq))); + + ripple::Dir aod(*env.current(), keylet::ownerDir(alice.id())); + BEAST_EXPECT(std::distance(aod.begin(), aod.end()) == 1); + BEAST_EXPECT( + std::find(aod.begin(), aod.end(), ab) == aod.end()); + + ripple::Dir bod(*env.current(), keylet::ownerDir(bob.id())); + BEAST_EXPECT(std::distance(bod.begin(), bod.end()) == 2); + BEAST_EXPECT( + std::find(bod.begin(), bod.end(), ab) == bod.end()); + BEAST_EXPECT( + std::find(bod.begin(), bod.end(), bc) != bod.end()); + + ripple::Dir cod(*env.current(), keylet::ownerDir(carol.id())); + BEAST_EXPECT(std::distance(cod.begin(), cod.end()) == 2); + + ripple::Dir iod(*env.current(), keylet::ownerDir(gw.id())); + BEAST_EXPECT(std::distance(iod.begin(), iod.end()) == 4); + BEAST_EXPECT( + std::find(iod.begin(), iod.end(), ab) == iod.end()); + BEAST_EXPECT( + std::find(iod.begin(), iod.end(), bc) != iod.end()); + } + + env.close(5s); + env(escrow::cancel(bob, bob, bseq)); + { + BEAST_EXPECT(!env.le(keylet::escrow(alice.id(), aseq))); + BEAST_EXPECT(!env.le(keylet::escrow(bob.id(), bseq))); + + ripple::Dir aod(*env.current(), keylet::ownerDir(alice.id())); + BEAST_EXPECT(std::distance(aod.begin(), aod.end()) == 1); + BEAST_EXPECT( + std::find(aod.begin(), aod.end(), ab) == aod.end()); + + ripple::Dir bod(*env.current(), keylet::ownerDir(bob.id())); + BEAST_EXPECT(std::distance(bod.begin(), bod.end()) == 1); + BEAST_EXPECT( + std::find(bod.begin(), bod.end(), ab) == bod.end()); + BEAST_EXPECT( + std::find(bod.begin(), bod.end(), bc) == bod.end()); + + ripple::Dir cod(*env.current(), keylet::ownerDir(carol.id())); + BEAST_EXPECT(std::distance(cod.begin(), cod.end()) == 1); + + ripple::Dir iod(*env.current(), keylet::ownerDir(gw.id())); + BEAST_EXPECT(std::distance(iod.begin(), iod.end()) == 3); + BEAST_EXPECT( + std::find(iod.begin(), iod.end(), ab) == iod.end()); + BEAST_EXPECT( + std::find(iod.begin(), iod.end(), bc) == iod.end()); + } + } + + { + testcase("IOU Metadata to issuer"); + + Env env{*this, features}; + env.fund(XRP(5000), alice, carol, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env.close(); + env.trust(USD(10'000), alice, carol); + env.close(); + env(pay(gw, alice, USD(5000))); + env(pay(gw, carol, USD(5000))); + env.close(); + auto const aseq = env.seq(alice); + + env(escrow::create(alice, gw, USD(1'000)), + escrow::finish_time(env.now() + 1s)); + + BEAST_EXPECT( + (*env.meta())[sfTransactionResult] == + static_cast(tesSUCCESS)); + env.close(5s); + env(escrow::create(gw, carol, USD(1'000)), + escrow::finish_time(env.now() + 1s), + escrow::cancel_time(env.now() + 2s), + ter(tecNO_PERMISSION)); + env.close(5s); + + auto const ag = env.le(keylet::escrow(alice.id(), aseq)); + BEAST_EXPECT(ag); + + { + ripple::Dir aod(*env.current(), keylet::ownerDir(alice.id())); + BEAST_EXPECT(std::distance(aod.begin(), aod.end()) == 2); + BEAST_EXPECT( + std::find(aod.begin(), aod.end(), ag) != aod.end()); + + ripple::Dir cod(*env.current(), keylet::ownerDir(carol.id())); + BEAST_EXPECT(std::distance(cod.begin(), cod.end()) == 1); + + ripple::Dir iod(*env.current(), keylet::ownerDir(gw.id())); + BEAST_EXPECT(std::distance(iod.begin(), iod.end()) == 3); + BEAST_EXPECT( + std::find(iod.begin(), iod.end(), ag) != iod.end()); + } + + env.close(5s); + env(escrow::finish(alice, alice, aseq)); + { + BEAST_EXPECT(!env.le(keylet::escrow(alice.id(), aseq))); + + ripple::Dir aod(*env.current(), keylet::ownerDir(alice.id())); + BEAST_EXPECT(std::distance(aod.begin(), aod.end()) == 1); + BEAST_EXPECT( + std::find(aod.begin(), aod.end(), ag) == aod.end()); + + ripple::Dir cod(*env.current(), keylet::ownerDir(carol.id())); + BEAST_EXPECT(std::distance(cod.begin(), cod.end()) == 1); + + ripple::Dir iod(*env.current(), keylet::ownerDir(gw.id())); + BEAST_EXPECT(std::distance(iod.begin(), iod.end()) == 2); + BEAST_EXPECT( + std::find(iod.begin(), iod.end(), ag) == iod.end()); + } + } + } + + void + testIOURippleState(FeatureBitset features) + { + testcase("IOU RippleState"); + using namespace test::jtx; + using namespace std::literals; + + struct TestAccountData + { + Account src; + Account dst; + Account gw; + bool hasTrustline; + bool negative; + }; + + std::array tests = {{ + // src > dst && src > issuer && dst no trustline + {Account("alice2"), Account("bob0"), Account{"gw0"}, false, true}, + // src < dst && src < issuer && dst no trustline + {Account("carol0"), Account("dan1"), Account{"gw1"}, false, false}, + // dst > src && dst > issuer && dst no trustline + {Account("dan1"), Account("alice2"), Account{"gw0"}, false, true}, + // dst < src && dst < issuer && dst no trustline + {Account("bob0"), Account("carol0"), Account{"gw1"}, false, false}, + // src > dst && src > issuer && dst has trustline + {Account("alice2"), Account("bob0"), Account{"gw0"}, true, true}, + // src < dst && src < issuer && dst has trustline + {Account("carol0"), Account("dan1"), Account{"gw1"}, true, false}, + // dst > src && dst > issuer && dst has trustline + {Account("dan1"), Account("alice2"), Account{"gw0"}, true, true}, + // dst < src && dst < issuer && dst has trustline + {Account("bob0"), Account("carol0"), Account{"gw1"}, true, false}, + }}; + + for (auto const& t : tests) + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const USD = t.gw["USD"]; + env.fund(XRP(5000), t.src, t.dst, t.gw); + env(fset(t.gw, asfAllowTrustLineLocking)); + env.close(); + + if (t.hasTrustline) + env.trust(USD(100'000), t.src, t.dst); + else + env.trust(USD(100'000), t.src); + env.close(); + + env(pay(t.gw, t.src, USD(10'000))); + if (t.hasTrustline) + env(pay(t.gw, t.dst, USD(10'000))); + env.close(); + + // src can create escrow + auto const seq1 = env.seq(t.src); + auto const delta = USD(1'000); + env(escrow::create(t.src, t.dst, delta), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150)); + env.close(); + + // dst can finish escrow + auto const preSrc = env.balance(t.src, USD); + auto const preDst = env.balance(t.dst, USD); + + env(escrow::finish(t.dst, t.src, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150)); + env.close(); + + BEAST_EXPECT(env.balance(t.src, USD) == preSrc); + BEAST_EXPECT(env.balance(t.dst, USD) == preDst + delta); + } + } + + void + testIOUGateway(FeatureBitset features) + { + testcase("IOU Gateway"); + using namespace test::jtx; + using namespace std::literals; + + struct TestAccountData + { + Account src; + Account dst; + bool hasTrustline; + }; + + // issuer is source + { + auto const gw = Account{"gateway"}; + auto const alice = Account{"alice"}; + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const USD = gw["USD"]; + env.fund(XRP(5000), alice, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env.close(); + env.trust(USD(100'000), alice); + env.close(); + + env(pay(gw, alice, USD(10'000))); + env.close(); + + // issuer cannot create escrow + env(escrow::create(gw, alice, USD(1'000)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecNO_PERMISSION)); + env.close(); + } + + std::array gwDstTests = {{ + // src > dst && src > issuer && dst has trustline + {Account("alice2"), Account{"gw0"}, true}, + // src < dst && src < issuer && dst has trustline + {Account("carol0"), Account{"gw1"}, true}, + // dst > src && dst > issuer && dst has trustline + {Account("dan1"), Account{"gw0"}, true}, + // dst < src && dst < issuer && dst has trustline + {Account("bob0"), Account{"gw1"}, true}, + }}; + + // issuer is destination + for (auto const& t : gwDstTests) + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const USD = t.dst["USD"]; + env.fund(XRP(5000), t.dst, t.src); + env(fset(t.dst, asfAllowTrustLineLocking)); + env.close(); + + env.trust(USD(100'000), t.src); + env.close(); + + env(pay(t.dst, t.src, USD(10'000))); + env.close(); + + // issuer can receive escrow + auto const seq1 = env.seq(t.src); + auto const preSrc = env.balance(t.src, USD); + env(escrow::create(t.src, t.dst, USD(1'000)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150)); + env.close(); + + // issuer can finish escrow, no dest trustline + env(escrow::finish(t.dst, t.src, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150)); + env.close(); + auto const preAmount = 10'000; + BEAST_EXPECT(preSrc == USD(preAmount)); + auto const postAmount = 9000; + BEAST_EXPECT(env.balance(t.src, USD) == USD(postAmount)); + BEAST_EXPECT(env.balance(t.dst, USD) == USD(0)); + } + + // issuer is source and destination + { + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + env.fund(XRP(5000), gw); + env(fset(gw, asfAllowTrustLineLocking)); + env.close(); + + // issuer cannot receive escrow + env(escrow::create(gw, gw, USD(1'000)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecNO_PERMISSION)); + env.close(); + } + } + + void + testIOULockedRate(FeatureBitset features) + { + testcase("IOU Locked Rate"); + using namespace test::jtx; + using namespace std::literals; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const carol = Account("carol"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + + // test locked rate + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + env.fund(XRP(10'000), alice, bob, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env(rate(gw, 1.25)); + env.close(); + env.trust(USD(100'000), alice); + env.trust(USD(100'000), bob); + env.close(); + env(pay(gw, alice, USD(10'000))); + env(pay(gw, bob, USD(10'000))); + env.close(); + + // alice can create escrow w/ xfer rate + auto const preAlice = env.balance(alice, USD); + auto const seq1 = env.seq(alice); + auto const delta = USD(125); + env(escrow::create(alice, bob, delta), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150)); + env.close(); + auto const transferRate = escrow::rate(env, alice, seq1); + BEAST_EXPECT( + transferRate.value == std::uint32_t(1'000'000'000 * 1.25)); + + // bob can finish escrow + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150)); + env.close(); + + BEAST_EXPECT(env.balance(alice, USD) == preAlice - delta); + BEAST_EXPECT(env.balance(bob, USD) == USD(10'100)); + } + // test rate change - higher + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + env.fund(XRP(10'000), alice, bob, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env(rate(gw, 1.25)); + env.close(); + env.trust(USD(100'000), alice); + env.trust(USD(100'000), bob); + env.close(); + env(pay(gw, alice, USD(10'000))); + env(pay(gw, bob, USD(10'000))); + env.close(); + + // alice can create escrow w/ xfer rate + auto const preAlice = env.balance(alice, USD); + auto const seq1 = env.seq(alice); + auto const delta = USD(125); + env(escrow::create(alice, bob, delta), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150)); + env.close(); + auto transferRate = escrow::rate(env, alice, seq1); + BEAST_EXPECT( + transferRate.value == std::uint32_t(1'000'000'000 * 1.25)); + + // issuer changes rate higher + env(rate(gw, 1.26)); + env.close(); + + // bob can finish escrow - rate unchanged + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150)); + env.close(); + + BEAST_EXPECT(env.balance(alice, USD) == preAlice - delta); + BEAST_EXPECT(env.balance(bob, USD) == USD(10'100)); + } + + // test rate change - lower + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + env.fund(XRP(10'000), alice, bob, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env(rate(gw, 1.25)); + env.close(); + env.trust(USD(100'000), alice); + env.trust(USD(100'000), bob); + env.close(); + env(pay(gw, alice, USD(10'000))); + env(pay(gw, bob, USD(10'000))); + env.close(); + + // alice can create escrow w/ xfer rate + auto const preAlice = env.balance(alice, USD); + auto const seq1 = env.seq(alice); + auto const delta = USD(125); + env(escrow::create(alice, bob, delta), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150)); + env.close(); + auto transferRate = escrow::rate(env, alice, seq1); + BEAST_EXPECT( + transferRate.value == std::uint32_t(1'000'000'000 * 1.25)); + + // issuer changes rate lower + env(rate(gw, 1.00)); + env.close(); + + // bob can finish escrow - rate changed + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150)); + env.close(); + + BEAST_EXPECT(env.balance(alice, USD) == preAlice - delta); + BEAST_EXPECT(env.balance(bob, USD) == USD(10125)); + } + + // test cancel doesnt charge rate + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + env.fund(XRP(10'000), alice, bob, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env(rate(gw, 1.25)); + env.close(); + env.trust(USD(100'000), alice); + env.trust(USD(100'000), bob); + env.close(); + env(pay(gw, alice, USD(10'000))); + env(pay(gw, bob, USD(10'000))); + env.close(); + + // alice can create escrow w/ xfer rate + auto const preAlice = env.balance(alice, USD); + auto const seq1 = env.seq(alice); + auto const delta = USD(125); + env(escrow::create(alice, bob, delta), + escrow::finish_time(env.now() + 1s), + escrow::cancel_time(env.now() + 3s), + fee(baseFee)); + env.close(); + auto transferRate = escrow::rate(env, alice, seq1); + BEAST_EXPECT( + transferRate.value == std::uint32_t(1'000'000'000 * 1.25)); + + // issuer changes rate lower + env(rate(gw, 1.00)); + env.close(); + + // alice can cancel escrow - rate is not charged + env(escrow::cancel(alice, alice, seq1), fee(baseFee)); + env.close(); + + BEAST_EXPECT(env.balance(alice, USD) == preAlice); + BEAST_EXPECT(env.balance(bob, USD) == USD(10000)); + } + } + + void + testIOULimitAmount(FeatureBitset features) + { + testcase("IOU Limit"); + using namespace test::jtx; + using namespace std::literals; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + + // test LimitAmount + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + env.fund(XRP(1'000), alice, bob, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env.close(); + env.trust(USD(10'000), alice, bob); + env.close(); + env(pay(gw, alice, USD(1'000))); + env(pay(gw, bob, USD(1'000))); + env.close(); + + // alice can create escrow + auto seq1 = env.seq(alice); + auto const delta = USD(125); + env(escrow::create(alice, bob, delta), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150)); + env.close(); + + // bob can finish + auto const preBobLimit = env.limit(bob, USD); + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150)); + env.close(); + auto const postBobLimit = env.limit(bob, USD); + // bobs limit is NOT changed + BEAST_EXPECT(postBobLimit == preBobLimit); + } + } + + void + testIOURequireAuth(FeatureBitset features) + { + testcase("IOU Require Auth"); + using namespace test::jtx; + using namespace std::literals; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const carol = Account("carol"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + + auto const aliceUSD = alice["USD"]; + auto const bobUSD = bob["USD"]; + + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + env.fund(XRP(1'000), alice, bob, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env(fset(gw, asfRequireAuth)); + env.close(); + env(trust(gw, aliceUSD(10'000)), txflags(tfSetfAuth)); + env(trust(alice, USD(10'000))); + env(trust(bob, USD(10'000))); + env.close(); + env(pay(gw, alice, USD(1'000))); + env.close(); + + // alice cannot create escrow - fails without auth + auto seq1 = env.seq(alice); + auto const delta = USD(125); + env(escrow::create(alice, bob, delta), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecNO_AUTH)); + env.close(); + + // set auth on bob + env(trust(gw, bobUSD(10'000)), txflags(tfSetfAuth)); + env(trust(bob, USD(10'000))); + env.close(); + env(pay(gw, bob, USD(1'000))); + env.close(); + + // alice can create escrow - bob has auth + seq1 = env.seq(alice); + env(escrow::create(alice, bob, delta), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150)); + env.close(); + + // bob can finish + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150)); + env.close(); + } + + void + testIOUFreeze(FeatureBitset features) + { + testcase("IOU Freeze"); + using namespace test::jtx; + using namespace std::literals; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const carol = Account("carol"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + + // test Global Freeze + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + env.fund(XRP(10'000), alice, bob, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env.close(); + env.trust(USD(100'000), alice); + env.trust(USD(100'000), bob); + env.close(); + env(pay(gw, alice, USD(10'000))); + env(pay(gw, bob, USD(10'000))); + env.close(); + env(fset(gw, asfGlobalFreeze)); + env.close(); + + // setup transaction + auto seq1 = env.seq(alice); + auto const delta = USD(125); + + // create escrow fails - frozen trustline + env(escrow::create(alice, bob, delta), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecFROZEN)); + env.close(); + + // clear global freeze + env(fclear(gw, asfGlobalFreeze)); + env.close(); + + // create escrow success + seq1 = env.seq(alice); + env(escrow::create(alice, bob, delta), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150)); + env.close(); + + // set global freeze + env(fset(gw, asfGlobalFreeze)); + env.close(); + + // bob finish escrow success regardless of frozen assets + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150)); + env.close(); + + // clear global freeze + env(fclear(gw, asfGlobalFreeze)); + env.close(); + + // create escrow success + seq1 = env.seq(alice); + env(escrow::create(alice, bob, delta), + escrow::condition(escrow::cb1), + escrow::cancel_time(env.now() + 1s), + fee(baseFee * 150)); + env.close(); + + // set global freeze + env(fset(gw, asfGlobalFreeze)); + env.close(); + + // bob cancel escrow success regardless of frozen assets + env(escrow::cancel(bob, alice, seq1), fee(baseFee)); + env.close(); + } + + // test Individual Freeze + { + // Env Setup + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + env.fund(XRP(10'000), alice, bob, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env.close(); + env(trust(alice, USD(100'000))); + env(trust(bob, USD(100'000))); + env.close(); + env(pay(gw, alice, USD(10'000))); + env(pay(gw, bob, USD(10'000))); + env.close(); + + // set freeze on alice trustline + env(trust(gw, USD(10'000), alice, tfSetFreeze)); + env.close(); + + // setup transaction + auto seq1 = env.seq(alice); + auto const delta = USD(125); + + // create escrow fails - frozen trustline + env(escrow::create(alice, bob, delta), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecFROZEN)); + env.close(); + + // clear freeze on alice trustline + env(trust(gw, USD(10'000), alice, tfClearFreeze)); + env.close(); + + // create escrow success + seq1 = env.seq(alice); + env(escrow::create(alice, bob, delta), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150)); + env.close(); + + // set freeze on bob trustline + env(trust(gw, USD(10'000), bob, tfSetFreeze)); + env.close(); + + // bob finish escrow success regardless of frozen assets + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150)); + env.close(); + + // reset freeze on bob and alice trustline + env(trust(gw, USD(10'000), alice, tfClearFreeze)); + env(trust(gw, USD(10'000), bob, tfClearFreeze)); + env.close(); + + // create escrow success + seq1 = env.seq(alice); + env(escrow::create(alice, bob, delta), + escrow::condition(escrow::cb1), + escrow::cancel_time(env.now() + 1s), + fee(baseFee * 150)); + env.close(); + + // set freeze on bob trustline + env(trust(gw, USD(10'000), bob, tfSetFreeze)); + env.close(); + + // bob cancel escrow success regardless of frozen assets + env(escrow::cancel(bob, alice, seq1), fee(baseFee)); + env.close(); + } + + // test Deep Freeze + { + // Env Setup + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + env.fund(XRP(10'000), alice, bob, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env.close(); + env(trust(alice, USD(100'000))); + env(trust(bob, USD(100'000))); + env.close(); + env(pay(gw, alice, USD(10'000))); + env(pay(gw, bob, USD(10'000))); + env.close(); + + // set freeze on alice trustline + env(trust(gw, USD(10'000), alice, tfSetFreeze | tfSetDeepFreeze)); + env.close(); + + // setup transaction + auto seq1 = env.seq(alice); + auto const delta = USD(125); + + // create escrow fails - frozen trustline + env(escrow::create(alice, bob, delta), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecFROZEN)); + env.close(); + + // clear freeze on alice trustline + env(trust( + gw, USD(10'000), alice, tfClearFreeze | tfClearDeepFreeze)); + env.close(); + + // create escrow success + seq1 = env.seq(alice); + env(escrow::create(alice, bob, delta), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150)); + env.close(); + + // set freeze on bob trustline + env(trust(gw, USD(10'000), bob, tfSetFreeze | tfSetDeepFreeze)); + env.close(); + + // bob finish escrow fails because of deep frozen assets + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150), + ter(tecFROZEN)); + env.close(); + + // reset freeze on alice and bob trustline + env(trust( + gw, USD(10'000), alice, tfClearFreeze | tfClearDeepFreeze)); + env(trust(gw, USD(10'000), bob, tfClearFreeze | tfClearDeepFreeze)); + env.close(); + + // create escrow success + seq1 = env.seq(alice); + env(escrow::create(alice, bob, delta), + escrow::condition(escrow::cb1), + escrow::cancel_time(env.now() + 1s), + fee(baseFee * 150)); + env.close(); + + // set freeze on bob trustline + env(trust(gw, USD(10'000), bob, tfSetFreeze | tfSetDeepFreeze)); + env.close(); + + // bob cancel escrow fails because of deep frozen assets + env(escrow::cancel(bob, alice, seq1), + fee(baseFee), + ter(tesSUCCESS)); + env.close(); + } + } + void + testIOUINSF(FeatureBitset features) + { + testcase("IOU Insuficient Funds"); + using namespace test::jtx; + using namespace std::literals; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const carol = Account("carol"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + { + // test tecPATH_PARTIAL + // ie. has 10'000, escrow 1'000 then try to pay 10'000 + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + env.fund(XRP(10'000), alice, bob, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env.close(); + env.trust(USD(100'000), alice); + env.trust(USD(100'000), bob); + env.close(); + env(pay(gw, alice, USD(10'000))); + env(pay(gw, bob, USD(10'000))); + env.close(); + + // create escrow success + auto const delta = USD(1'000); + env(escrow::create(alice, bob, delta), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150)); + env.close(); + env(pay(alice, gw, USD(10'000)), ter(tecPATH_PARTIAL)); + } + { + // test tecINSUFFICIENT_FUNDS + // ie. has 10'000 escrow 1'000 then try to escrow 10'000 + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + env.fund(XRP(10'000), alice, bob, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env.close(); + env.trust(USD(100'000), alice); + env.trust(USD(100'000), bob); + env.close(); + env(pay(gw, alice, USD(10'000))); + env(pay(gw, bob, USD(10'000))); + env.close(); + + auto const delta = USD(1'000); + env(escrow::create(alice, bob, delta), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150)); + env.close(); + + env(escrow::create(alice, bob, USD(10'000)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecINSUFFICIENT_FUNDS)); + env.close(); + } + } + + void + testIOUPrecisionLoss(FeatureBitset features) + { + testcase("IOU Precision Loss"); + using namespace test::jtx; + using namespace std::literals; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + + // test min create precision loss + { + Env env(*this, features); + auto const baseFee = env.current()->fees().base; + env.fund(XRP(10'000), alice, bob, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env.close(); + env.trust(USD(100000000000000000), alice); + env.trust(USD(100000000000000000), bob); + env.close(); + env(pay(gw, alice, USD(10000000000000000))); + env(pay(gw, bob, USD(1))); + env.close(); + + // alice cannot create escrow for 1/10 iou - precision loss + env(escrow::create(alice, bob, USD(1)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecPRECISION_LOSS)); + env.close(); + + auto const seq1 = env.seq(alice); + // alice can create escrow for 1'000 iou + env(escrow::create(alice, bob, USD(1'000)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150)); + env.close(); + + // bob finish escrow success + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150)); + env.close(); + } + } + + void + testMPTEnablement(FeatureBitset features) + { + testcase("MPT Enablement"); + + using namespace jtx; + using namespace std::chrono; + + for (bool const withTokenEscrow : {false, true}) + { + auto const amend = + withTokenEscrow ? features : features - featureTokenEscrow; + Env env{*this, amend}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + env.fund(XRP(5000), bob); + + MPTTester mptGw(env, gw, {.holders = {alice}}); + mptGw.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanEscrow | tfMPTCanTransfer}); + mptGw.authorize({.account = alice}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10'000))); + env.close(); + + auto const createResult = + withTokenEscrow ? ter(tesSUCCESS) : ter(temBAD_AMOUNT); + auto const finishResult = + withTokenEscrow ? ter(tesSUCCESS) : ter(tecNO_TARGET); + + auto const seq1 = env.seq(alice); + env(escrow::create(alice, bob, MPT(1'000)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + createResult); + env.close(); + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150), + finishResult); + env.close(); + auto const seq2 = env.seq(alice); + env(escrow::create(alice, bob, MPT(1'000)), + escrow::condition(escrow::cb2), + escrow::finish_time(env.now() + 1s), + escrow::cancel_time(env.now() + 2s), + fee(baseFee * 150), + createResult); + env.close(); + env(escrow::cancel(bob, alice, seq2), finishResult); + env.close(); + } + } + + void + testMPTCreatePreflight(FeatureBitset features) + { + testcase("MPT Create Preflight"); + using namespace test::jtx; + using namespace std::literals; + + for (bool const withMPT : {true, false}) + { + auto const amend = + withMPT ? features : features - featureMPTokensV1; + Env env{*this, amend}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + env.fund(XRP(1'000), alice, bob, gw); + + Json::Value jv = escrow::create(alice, bob, XRP(1)); + jv.removeMember(jss::Amount); + jv[jss::Amount][jss::mpt_issuance_id] = + "00000004A407AF5856CCF3C42619DAA925813FC955C72983"; + jv[jss::Amount][jss::value] = "-1"; + + auto const result = withMPT ? ter(temBAD_AMOUNT) : ter(temDISABLED); + env(jv, + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + result); + env.close(); + } + + // temBAD_AMOUNT: amount < 0 + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + + MPTTester mptGw(env, gw, {.holders = {alice, bob}}); + mptGw.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanEscrow | tfMPTCanTransfer}); + mptGw.authorize({.account = alice}); + mptGw.authorize({.account = bob}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10'000))); + env(pay(gw, bob, MPT(10'000))); + env.close(); + + env(escrow::create(alice, bob, MPT(-1)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(temBAD_AMOUNT)); + env.close(); + } + } + + void + testMPTCreatePreclaim(FeatureBitset features) + { + testcase("MPT Create Preclaim"); + using namespace test::jtx; + using namespace std::literals; + + // tecNO_PERMISSION: issuer is the same as the account + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const gw = Account("gw"); + + MPTTester mptGw(env, gw, {.holders = {alice}}); + mptGw.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanEscrow | tfMPTCanTransfer}); + mptGw.authorize({.account = alice}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10'000))); + env.close(); + + env(escrow::create(gw, alice, MPT(1)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecNO_PERMISSION)); + env.close(); + } + + // tecOBJECT_NOT_FOUND: mpt does not exist + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + env.fund(XRP(10'000), alice, bob, gw); + env.close(); + + auto const mpt = ripple::test::jtx::MPT( + alice.name(), makeMptID(env.seq(alice), alice)); + Json::Value jv = escrow::create(alice, bob, mpt(2)); + jv[jss::Amount][jss::mpt_issuance_id] = + "00000004A407AF5856CCF3C42619DAA925813FC955C72983"; + env(jv, + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecOBJECT_NOT_FOUND)); + env.close(); + } + + // tecNO_PERMISSION: tfMPTCanEscrow is not enabled + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + + MPTTester mptGw(env, gw, {.holders = {alice, bob}}); + mptGw.create( + {.ownerCount = 1, .holderCount = 0, .flags = tfMPTCanTransfer}); + mptGw.authorize({.account = alice}); + mptGw.authorize({.account = bob}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10'000))); + env(pay(gw, bob, MPT(10'000))); + env.close(); + + env(escrow::create(alice, bob, MPT(3)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecNO_PERMISSION)); + env.close(); + } + + // tecOBJECT_NOT_FOUND: account does not have the mpt + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + + MPTTester mptGw(env, gw, {.holders = {alice, bob}}); + mptGw.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanEscrow | tfMPTCanTransfer}); + auto const MPT = mptGw["MPT"]; + + env(escrow::create(alice, bob, MPT(4)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecOBJECT_NOT_FOUND)); + env.close(); + } + + // tecNO_AUTH: requireAuth set: account not authorized + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + + MPTTester mptGw(env, gw, {.holders = {alice, bob}}); + mptGw.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = + tfMPTCanEscrow | tfMPTCanTransfer | tfMPTRequireAuth}); + mptGw.authorize({.account = alice}); + mptGw.authorize({.account = gw, .holder = alice}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10'000))); + env.close(); + + // unauthorize account + mptGw.authorize( + {.account = gw, .holder = alice, .flags = tfMPTUnauthorize}); + + env(escrow::create(alice, bob, MPT(5)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecNO_AUTH)); + env.close(); + } + + // tecNO_AUTH: requireAuth set: dest not authorized + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + + MPTTester mptGw(env, gw, {.holders = {alice, bob}}); + mptGw.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = + tfMPTCanEscrow | tfMPTCanTransfer | tfMPTRequireAuth}); + mptGw.authorize({.account = alice}); + mptGw.authorize({.account = gw, .holder = alice}); + mptGw.authorize({.account = bob}); + mptGw.authorize({.account = gw, .holder = bob}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10'000))); + env(pay(gw, bob, MPT(10'000))); + env.close(); + + // unauthorize dest + mptGw.authorize( + {.account = gw, .holder = bob, .flags = tfMPTUnauthorize}); + + env(escrow::create(alice, bob, MPT(6)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecNO_AUTH)); + env.close(); + } + + // tecLOCKED: issuer has locked the account + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + + MPTTester mptGw(env, gw, {.holders = {alice, bob}}); + mptGw.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanEscrow | tfMPTCanTransfer | tfMPTCanLock}); + mptGw.authorize({.account = alice}); + mptGw.authorize({.account = bob}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10'000))); + env(pay(gw, bob, MPT(10'000))); + env.close(); + + // lock account + mptGw.set({.account = gw, .holder = alice, .flags = tfMPTLock}); + + env(escrow::create(alice, bob, MPT(7)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecLOCKED)); + env.close(); + } + + // tecLOCKED: issuer has locked the dest + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + + MPTTester mptGw(env, gw, {.holders = {alice, bob}}); + mptGw.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanEscrow | tfMPTCanTransfer | tfMPTCanLock}); + mptGw.authorize({.account = alice}); + mptGw.authorize({.account = bob}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10'000))); + env(pay(gw, bob, MPT(10'000))); + env.close(); + + // lock dest + mptGw.set({.account = gw, .holder = bob, .flags = tfMPTLock}); + + env(escrow::create(alice, bob, MPT(8)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecLOCKED)); + env.close(); + } + + // tecNO_AUTH: mpt cannot be transferred + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + + MPTTester mptGw(env, gw, {.holders = {alice, bob}}); + mptGw.create( + {.ownerCount = 1, .holderCount = 0, .flags = tfMPTCanEscrow}); + mptGw.authorize({.account = alice}); + mptGw.authorize({.account = bob}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10'000))); + env(pay(gw, bob, MPT(10'000))); + env.close(); + + env(escrow::create(alice, bob, MPT(9)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecNO_AUTH)); + env.close(); + } + + // tecINSUFFICIENT_FUNDS: spendable amount is zero + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + + MPTTester mptGw(env, gw, {.holders = {alice, bob}}); + mptGw.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanEscrow | tfMPTCanTransfer}); + mptGw.authorize({.account = alice}); + mptGw.authorize({.account = bob}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10))); + env(pay(gw, bob, MPT(10))); + env.close(); + + env(escrow::create(alice, bob, MPT(11)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecINSUFFICIENT_FUNDS)); + env.close(); + } + + // tecINSUFFICIENT_FUNDS: spendable amount is less than the amount + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + + MPTTester mptGw(env, gw, {.holders = {alice, bob}}); + mptGw.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanEscrow | tfMPTCanTransfer}); + mptGw.authorize({.account = alice}); + mptGw.authorize({.account = bob}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10))); + env(pay(gw, bob, MPT(10))); + env.close(); + + env(escrow::create(alice, bob, MPT(11)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecINSUFFICIENT_FUNDS)); + env.close(); + } + } + + void + testMPTFinishPreclaim(FeatureBitset features) + { + testcase("MPT Finish Preclaim"); + using namespace test::jtx; + using namespace std::literals; + + // tecNO_AUTH: requireAuth set: dest not authorized + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + + MPTTester mptGw(env, gw, {.holders = {alice, bob}}); + mptGw.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = + tfMPTCanEscrow | tfMPTCanTransfer | tfMPTRequireAuth}); + mptGw.authorize({.account = alice}); + mptGw.authorize({.account = gw, .holder = alice}); + mptGw.authorize({.account = bob}); + mptGw.authorize({.account = gw, .holder = bob}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10'000))); + env(pay(gw, bob, MPT(10'000))); + env.close(); + + auto const seq1 = env.seq(alice); + env(escrow::create(alice, bob, MPT(10)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + // unauthorize dest + mptGw.authorize( + {.account = gw, .holder = bob, .flags = tfMPTUnauthorize}); + + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150), + ter(tecNO_AUTH)); + env.close(); + } + + // tecLOCKED: issuer has locked the dest + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + + MPTTester mptGw(env, gw, {.holders = {alice, bob}}); + mptGw.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanEscrow | tfMPTCanTransfer | tfMPTCanLock}); + mptGw.authorize({.account = alice}); + mptGw.authorize({.account = bob}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10'000))); + env(pay(gw, bob, MPT(10'000))); + env.close(); + + auto const seq1 = env.seq(alice); + env(escrow::create(alice, bob, MPT(8)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + // lock dest + mptGw.set({.account = gw, .holder = bob, .flags = tfMPTLock}); + + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150), + ter(tecLOCKED)); + env.close(); + } + } + + void + testMPTFinishDoApply(FeatureBitset features) + { + testcase("MPT Finish Do Apply"); + using namespace test::jtx; + using namespace std::literals; + + // tecINSUFFICIENT_RESERVE: insufficient reserve to create MPT + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const acctReserve = env.current()->fees().accountReserve(0); + auto const incReserve = env.current()->fees().increment; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + env.fund(acctReserve + (incReserve - 1), bob); + env.close(); + + MPTTester mptGw(env, gw, {.holders = {alice}}); + mptGw.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanEscrow | tfMPTCanTransfer}); + mptGw.authorize({.account = alice}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10'000))); + env.close(); + + auto const seq1 = env.seq(alice); + env(escrow::create(alice, bob, MPT(10)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150), + ter(tecINSUFFICIENT_RESERVE)); + env.close(); + } + + // tesSUCCESS: bob submits; finish MPT created + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + env.fund(XRP(10'000), bob); + env.close(); + + MPTTester mptGw(env, gw, {.holders = {alice}}); + mptGw.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanEscrow | tfMPTCanTransfer}); + mptGw.authorize({.account = alice}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10'000))); + env.close(); + + auto const seq1 = env.seq(alice); + env(escrow::create(alice, bob, MPT(10)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + } + + // tecNO_PERMISSION: carol submits; finish MPT not created + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const carol = Account("carol"); + auto const gw = Account("gw"); + env.fund(XRP(10'000), bob, carol); + env.close(); + + MPTTester mptGw(env, gw, {.holders = {alice}}); + mptGw.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanEscrow | tfMPTCanTransfer}); + mptGw.authorize({.account = alice}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10'000))); + env.close(); + + auto const seq1 = env.seq(alice); + env(escrow::create(alice, bob, MPT(10)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + env(escrow::finish(carol, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150), + ter(tecNO_PERMISSION)); + env.close(); + } + } + + void + testMPTCancelPreclaim(FeatureBitset features) + { + testcase("MPT Cancel Preclaim"); + using namespace test::jtx; + using namespace std::literals; + + // tecNO_AUTH: requireAuth set: account not authorized + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + + MPTTester mptGw(env, gw, {.holders = {alice, bob}}); + mptGw.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = + tfMPTCanEscrow | tfMPTCanTransfer | tfMPTRequireAuth}); + mptGw.authorize({.account = alice}); + mptGw.authorize({.account = gw, .holder = alice}); + mptGw.authorize({.account = bob}); + mptGw.authorize({.account = gw, .holder = bob}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10'000))); + env(pay(gw, bob, MPT(10'000))); + env.close(); + + auto const seq1 = env.seq(alice); + env(escrow::create(alice, bob, MPT(10)), + escrow::cancel_time(env.now() + 2s), + escrow::condition(escrow::cb1), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + // unauthorize account + mptGw.authorize( + {.account = gw, .holder = alice, .flags = tfMPTUnauthorize}); + + env(escrow::cancel(bob, alice, seq1), ter(tecNO_AUTH)); + env.close(); + } + } + + void + testMPTBalances(FeatureBitset features) + { + testcase("MPT Balances"); + + using namespace jtx; + using namespace std::chrono; + + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const carol = Account("carol"); + auto const gw = Account("gw"); + env.fund(XRP(5000), bob); + + MPTTester mptGw(env, gw, {.holders = {alice, carol}}); + mptGw.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanEscrow | tfMPTCanTransfer}); + mptGw.authorize({.account = alice}); + mptGw.authorize({.account = carol}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10'000))); + env(pay(gw, carol, MPT(10'000))); + env.close(); + + auto outstandingMPT = env.balance(gw, MPT); + + // Create & Finish Escrow + auto const seq1 = env.seq(alice); + { + auto const preAliceMPT = env.balance(alice, MPT); + auto const preBobMPT = env.balance(bob, MPT); + env(escrow::create(alice, bob, MPT(1'000)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + BEAST_EXPECT(env.balance(alice, MPT) == preAliceMPT - MPT(1'000)); + BEAST_EXPECT(mptEscrowed(env, alice, MPT) == 1'000); + BEAST_EXPECT(env.balance(bob, MPT) == preBobMPT); + BEAST_EXPECT(mptEscrowed(env, bob, MPT) == 0); + BEAST_EXPECT(env.balance(gw, MPT) == outstandingMPT); + BEAST_EXPECT(issuerMPTEscrowed(env, MPT) == 1'000); + } + { + auto const preAliceMPT = env.balance(alice, MPT); + auto const preBobMPT = env.balance(bob, MPT); + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + BEAST_EXPECT(env.balance(alice, MPT) == preAliceMPT); + BEAST_EXPECT(mptEscrowed(env, alice, MPT) == 0); + BEAST_EXPECT(env.balance(bob, MPT) == preBobMPT + MPT(1'000)); + BEAST_EXPECT(mptEscrowed(env, bob, MPT) == 0); + BEAST_EXPECT(env.balance(gw, MPT) == outstandingMPT); + BEAST_EXPECT(issuerMPTEscrowed(env, MPT) == 0); + } + + // Create & Cancel Escrow + auto const seq2 = env.seq(alice); + { + auto const preAliceMPT = env.balance(alice, MPT); + auto const preBobMPT = env.balance(bob, MPT); + env(escrow::create(alice, bob, MPT(1'000)), + escrow::condition(escrow::cb2), + escrow::finish_time(env.now() + 1s), + escrow::cancel_time(env.now() + 2s), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + BEAST_EXPECT(env.balance(alice, MPT) == preAliceMPT - MPT(1'000)); + BEAST_EXPECT(mptEscrowed(env, alice, MPT) == 1'000); + BEAST_EXPECT(env.balance(bob, MPT) == preBobMPT); + BEAST_EXPECT(mptEscrowed(env, bob, MPT) == 0); + BEAST_EXPECT(env.balance(gw, MPT) == outstandingMPT); + BEAST_EXPECT(issuerMPTEscrowed(env, MPT) == 1'000); + } + { + auto const preAliceMPT = env.balance(alice, MPT); + auto const preBobMPT = env.balance(bob, MPT); + env(escrow::cancel(bob, alice, seq2), ter(tesSUCCESS)); + env.close(); + + BEAST_EXPECT(env.balance(alice, MPT) == preAliceMPT + MPT(1'000)); + BEAST_EXPECT(mptEscrowed(env, alice, MPT) == 0); + BEAST_EXPECT(env.balance(bob, MPT) == preBobMPT); + BEAST_EXPECT(mptEscrowed(env, bob, MPT) == 0); + BEAST_EXPECT(env.balance(gw, MPT) == outstandingMPT); + BEAST_EXPECT(issuerMPTEscrowed(env, MPT) == 0); + } + + // Self Escrow Create & Finish + { + auto const seq = env.seq(alice); + auto const preAliceMPT = env.balance(alice, MPT); + env(escrow::create(alice, alice, MPT(1'000)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + BEAST_EXPECT(env.balance(alice, MPT) == preAliceMPT - MPT(1'000)); + BEAST_EXPECT(mptEscrowed(env, alice, MPT) == 1'000); + BEAST_EXPECT(env.balance(gw, MPT) == outstandingMPT); + BEAST_EXPECT(issuerMPTEscrowed(env, MPT) == 1'000); + + env(escrow::finish(alice, alice, seq), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + BEAST_EXPECT(env.balance(alice, MPT) == preAliceMPT); + BEAST_EXPECT(mptEscrowed(env, alice, MPT) == 0); + BEAST_EXPECT(env.balance(gw, MPT) == outstandingMPT); + BEAST_EXPECT(issuerMPTEscrowed(env, MPT) == 0); + } + + // Self Escrow Create & Cancel + { + auto const seq = env.seq(alice); + auto const preAliceMPT = env.balance(alice, MPT); + env(escrow::create(alice, alice, MPT(1'000)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + escrow::cancel_time(env.now() + 2s), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + BEAST_EXPECT(env.balance(alice, MPT) == preAliceMPT - MPT(1'000)); + BEAST_EXPECT(mptEscrowed(env, alice, MPT) == 1'000); + BEAST_EXPECT(env.balance(gw, MPT) == outstandingMPT); + BEAST_EXPECT(issuerMPTEscrowed(env, MPT) == 1'000); + + env(escrow::cancel(alice, alice, seq), ter(tesSUCCESS)); + env.close(); + + BEAST_EXPECT(env.balance(alice, MPT) == preAliceMPT); + BEAST_EXPECT(mptEscrowed(env, alice, MPT) == 0); + BEAST_EXPECT(env.balance(gw, MPT) == outstandingMPT); + BEAST_EXPECT(issuerMPTEscrowed(env, MPT) == 0); + } + + // Multiple Escrows + { + auto const preAliceMPT = env.balance(alice, MPT); + auto const preBobMPT = env.balance(bob, MPT); + auto const preCarolMPT = env.balance(carol, MPT); + env(escrow::create(alice, bob, MPT(1'000)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + env(escrow::create(carol, bob, MPT(1'000)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + BEAST_EXPECT(env.balance(alice, MPT) == preAliceMPT - MPT(1'000)); + BEAST_EXPECT(mptEscrowed(env, alice, MPT) == 1'000); + BEAST_EXPECT(env.balance(bob, MPT) == preBobMPT); + BEAST_EXPECT(mptEscrowed(env, bob, MPT) == 0); + BEAST_EXPECT(env.balance(carol, MPT) == preCarolMPT - MPT(1'000)); + BEAST_EXPECT(mptEscrowed(env, carol, MPT) == 1'000); + BEAST_EXPECT(env.balance(gw, MPT) == outstandingMPT); + BEAST_EXPECT(issuerMPTEscrowed(env, MPT) == 2'000); + } + + // Max MPT Amount Issued (Escrow 1 MPT) + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + + MPTTester mptGw(env, gw, {.holders = {alice, bob}}); + mptGw.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanEscrow | tfMPTCanTransfer}); + mptGw.authorize({.account = alice}); + mptGw.authorize({.account = bob}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(maxMPTokenAmount))); + env.close(); + + auto const preAliceMPT = env.balance(alice, MPT); + auto const preBobMPT = env.balance(bob, MPT); + auto const outstandingMPT = env.balance(gw, MPT); + + auto const seq1 = env.seq(alice); + env(escrow::create(alice, bob, MPT(1)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150)); + env.close(); + + BEAST_EXPECT(env.balance(alice, MPT) == preAliceMPT - MPT(1)); + BEAST_EXPECT(mptEscrowed(env, alice, MPT) == 1); + BEAST_EXPECT(env.balance(bob, MPT) == preBobMPT); + BEAST_EXPECT(mptEscrowed(env, bob, MPT) == 0); + BEAST_EXPECT(env.balance(gw, MPT) == outstandingMPT); + BEAST_EXPECT(issuerMPTEscrowed(env, MPT) == 1); + + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + BEAST_EXPECT(env.balance(alice, MPT) == preAliceMPT - MPT(1)); + BEAST_EXPECT(mptEscrowed(env, alice, MPT) == 0); + BEAST_EXPECT(!env.le(keylet::mptoken(MPT.mpt(), alice)) + ->isFieldPresent(sfLockedAmount)); + BEAST_EXPECT(env.balance(bob, MPT) == preBobMPT + MPT(1)); + BEAST_EXPECT(mptEscrowed(env, bob, MPT) == 0); + BEAST_EXPECT(env.balance(gw, MPT) == outstandingMPT); + BEAST_EXPECT(issuerMPTEscrowed(env, MPT) == 0); + BEAST_EXPECT(!env.le(keylet::mptIssuance(MPT.mpt())) + ->isFieldPresent(sfLockedAmount)); + } + + // Max MPT Amount Issued (Escrow Max MPT) + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + + MPTTester mptGw(env, gw, {.holders = {alice, bob}}); + mptGw.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanEscrow | tfMPTCanTransfer}); + mptGw.authorize({.account = alice}); + mptGw.authorize({.account = bob}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(maxMPTokenAmount))); + env.close(); + + auto const preAliceMPT = env.balance(alice, MPT); + auto const preBobMPT = env.balance(bob, MPT); + auto const outstandingMPT = env.balance(gw, MPT); + + // Escrow Max MPT - 10 + auto const seq1 = env.seq(alice); + env(escrow::create(alice, bob, MPT(maxMPTokenAmount - 10)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150)); + env.close(); + + // Escrow 10 MPT + auto const seq2 = env.seq(alice); + env(escrow::create(alice, bob, MPT(10)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150)); + env.close(); + + BEAST_EXPECT( + env.balance(alice, MPT) == preAliceMPT - MPT(maxMPTokenAmount)); + BEAST_EXPECT(mptEscrowed(env, alice, MPT) == maxMPTokenAmount); + BEAST_EXPECT(env.balance(bob, MPT) == preBobMPT); + BEAST_EXPECT(mptEscrowed(env, bob, MPT) == 0); + BEAST_EXPECT(env.balance(gw, MPT) == outstandingMPT); + BEAST_EXPECT(issuerMPTEscrowed(env, MPT) == maxMPTokenAmount); + + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + env(escrow::finish(bob, alice, seq2), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + BEAST_EXPECT( + env.balance(alice, MPT) == preAliceMPT - MPT(maxMPTokenAmount)); + BEAST_EXPECT(mptEscrowed(env, alice, MPT) == 0); + BEAST_EXPECT( + env.balance(bob, MPT) == preBobMPT + MPT(maxMPTokenAmount)); + BEAST_EXPECT(mptEscrowed(env, bob, MPT) == 0); + BEAST_EXPECT(env.balance(gw, MPT) == outstandingMPT); + BEAST_EXPECT(issuerMPTEscrowed(env, MPT) == 0); + } + } + + void + testMPTMetaAndOwnership(FeatureBitset features) + { + using namespace jtx; + using namespace std::chrono; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const carol = Account("carol"); + auto const gw = Account{"gateway"}; + { + testcase("MPT Metadata to self"); + + Env env{*this, features}; + MPTTester mptGw(env, gw, {.holders = {alice, bob}}); + mptGw.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanEscrow | tfMPTCanTransfer}); + mptGw.authorize({.account = alice}); + mptGw.authorize({.account = bob}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10'000))); + env(pay(gw, bob, MPT(10'000))); + env.close(); + auto const aseq = env.seq(alice); + auto const bseq = env.seq(bob); + + env(escrow::create(alice, alice, MPT(1'000)), + escrow::finish_time(env.now() + 1s), + escrow::cancel_time(env.now() + 500s)); + BEAST_EXPECT( + (*env.meta())[sfTransactionResult] == + static_cast(tesSUCCESS)); + env.close(5s); + auto const aa = env.le(keylet::escrow(alice.id(), aseq)); + BEAST_EXPECT(aa); + { + ripple::Dir aod(*env.current(), keylet::ownerDir(alice.id())); + BEAST_EXPECT(std::distance(aod.begin(), aod.end()) == 2); + BEAST_EXPECT( + std::find(aod.begin(), aod.end(), aa) != aod.end()); + } + + { + ripple::Dir iod(*env.current(), keylet::ownerDir(gw.id())); + BEAST_EXPECT(std::distance(iod.begin(), iod.end()) == 1); + BEAST_EXPECT( + std::find(iod.begin(), iod.end(), aa) == iod.end()); + } + + env(escrow::create(bob, bob, MPT(1'000)), + escrow::finish_time(env.now() + 1s), + escrow::cancel_time(env.now() + 2s)); + BEAST_EXPECT( + (*env.meta())[sfTransactionResult] == + static_cast(tesSUCCESS)); + env.close(5s); + auto const bb = env.le(keylet::escrow(bob.id(), bseq)); + BEAST_EXPECT(bb); + + { + ripple::Dir bod(*env.current(), keylet::ownerDir(bob.id())); + BEAST_EXPECT(std::distance(bod.begin(), bod.end()) == 2); + BEAST_EXPECT( + std::find(bod.begin(), bod.end(), bb) != bod.end()); + } + + env.close(5s); + env(escrow::finish(alice, alice, aseq)); + { + BEAST_EXPECT(!env.le(keylet::escrow(alice.id(), aseq))); + BEAST_EXPECT( + (*env.meta())[sfTransactionResult] == + static_cast(tesSUCCESS)); + + ripple::Dir aod(*env.current(), keylet::ownerDir(alice.id())); + BEAST_EXPECT(std::distance(aod.begin(), aod.end()) == 1); + BEAST_EXPECT( + std::find(aod.begin(), aod.end(), aa) == aod.end()); + + ripple::Dir bod(*env.current(), keylet::ownerDir(bob.id())); + BEAST_EXPECT(std::distance(bod.begin(), bod.end()) == 2); + BEAST_EXPECT( + std::find(bod.begin(), bod.end(), bb) != bod.end()); + } + + env.close(5s); + env(escrow::cancel(bob, bob, bseq)); + { + BEAST_EXPECT(!env.le(keylet::escrow(bob.id(), bseq))); + BEAST_EXPECT( + (*env.meta())[sfTransactionResult] == + static_cast(tesSUCCESS)); + + ripple::Dir bod(*env.current(), keylet::ownerDir(bob.id())); + BEAST_EXPECT(std::distance(bod.begin(), bod.end()) == 1); + BEAST_EXPECT( + std::find(bod.begin(), bod.end(), bb) == bod.end()); + } + } + + { + testcase("MPT Metadata to other"); + + Env env{*this, features}; + MPTTester mptGw(env, gw, {.holders = {alice, bob, carol}}); + mptGw.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanEscrow | tfMPTCanTransfer}); + mptGw.authorize({.account = alice}); + mptGw.authorize({.account = bob}); + mptGw.authorize({.account = carol}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10'000))); + env(pay(gw, bob, MPT(10'000))); + env(pay(gw, carol, MPT(10'000))); + env.close(); + auto const aseq = env.seq(alice); + auto const bseq = env.seq(bob); + + env(escrow::create(alice, bob, MPT(1'000)), + escrow::finish_time(env.now() + 1s)); + BEAST_EXPECT( + (*env.meta())[sfTransactionResult] == + static_cast(tesSUCCESS)); + env.close(5s); + env(escrow::create(bob, carol, MPT(1'000)), + escrow::finish_time(env.now() + 1s), + escrow::cancel_time(env.now() + 2s)); + BEAST_EXPECT( + (*env.meta())[sfTransactionResult] == + static_cast(tesSUCCESS)); + env.close(5s); + + auto const ab = env.le(keylet::escrow(alice.id(), aseq)); + BEAST_EXPECT(ab); + + auto const bc = env.le(keylet::escrow(bob.id(), bseq)); + BEAST_EXPECT(bc); + + { + ripple::Dir aod(*env.current(), keylet::ownerDir(alice.id())); + BEAST_EXPECT(std::distance(aod.begin(), aod.end()) == 2); + BEAST_EXPECT( + std::find(aod.begin(), aod.end(), ab) != aod.end()); + + ripple::Dir bod(*env.current(), keylet::ownerDir(bob.id())); + BEAST_EXPECT(std::distance(bod.begin(), bod.end()) == 3); + BEAST_EXPECT( + std::find(bod.begin(), bod.end(), ab) != bod.end()); + BEAST_EXPECT( + std::find(bod.begin(), bod.end(), bc) != bod.end()); + + ripple::Dir cod(*env.current(), keylet::ownerDir(carol.id())); + BEAST_EXPECT(std::distance(cod.begin(), cod.end()) == 2); + BEAST_EXPECT( + std::find(cod.begin(), cod.end(), bc) != cod.end()); + } + + env.close(5s); + env(escrow::finish(alice, alice, aseq)); + { + BEAST_EXPECT(!env.le(keylet::escrow(alice.id(), aseq))); + BEAST_EXPECT(env.le(keylet::escrow(bob.id(), bseq))); + + ripple::Dir aod(*env.current(), keylet::ownerDir(alice.id())); + BEAST_EXPECT(std::distance(aod.begin(), aod.end()) == 1); + BEAST_EXPECT( + std::find(aod.begin(), aod.end(), ab) == aod.end()); + + ripple::Dir bod(*env.current(), keylet::ownerDir(bob.id())); + BEAST_EXPECT(std::distance(bod.begin(), bod.end()) == 2); + BEAST_EXPECT( + std::find(bod.begin(), bod.end(), ab) == bod.end()); + BEAST_EXPECT( + std::find(bod.begin(), bod.end(), bc) != bod.end()); + + ripple::Dir cod(*env.current(), keylet::ownerDir(carol.id())); + BEAST_EXPECT(std::distance(cod.begin(), cod.end()) == 2); + } + + env.close(5s); + env(escrow::cancel(bob, bob, bseq)); + { + BEAST_EXPECT(!env.le(keylet::escrow(alice.id(), aseq))); + BEAST_EXPECT(!env.le(keylet::escrow(bob.id(), bseq))); + + ripple::Dir aod(*env.current(), keylet::ownerDir(alice.id())); + BEAST_EXPECT(std::distance(aod.begin(), aod.end()) == 1); + BEAST_EXPECT( + std::find(aod.begin(), aod.end(), ab) == aod.end()); + + ripple::Dir bod(*env.current(), keylet::ownerDir(bob.id())); + BEAST_EXPECT(std::distance(bod.begin(), bod.end()) == 1); + BEAST_EXPECT( + std::find(bod.begin(), bod.end(), ab) == bod.end()); + BEAST_EXPECT( + std::find(bod.begin(), bod.end(), bc) == bod.end()); + + ripple::Dir cod(*env.current(), keylet::ownerDir(carol.id())); + BEAST_EXPECT(std::distance(cod.begin(), cod.end()) == 1); + } + } + } + + void + testMPTGateway(FeatureBitset features) + { + testcase("MPT Gateway Balances"); + using namespace test::jtx; + using namespace std::literals; + + // issuer is dest; alice w/ authorization + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const gw = Account("gw"); + + MPTTester mptGw(env, gw, {.holders = {alice}}); + mptGw.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanEscrow | tfMPTCanTransfer}); + mptGw.authorize({.account = alice}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10'000))); + env.close(); + + // issuer can be destination + auto const seq1 = env.seq(alice); + auto const preAliceMPT = env.balance(alice, MPT); + auto const preOutstanding = env.balance(gw, MPT); + auto const preEscrowed = issuerMPTEscrowed(env, MPT); + BEAST_EXPECT(preOutstanding == MPT(10'000)); + BEAST_EXPECT(preEscrowed == 0); + + env(escrow::create(alice, gw, MPT(1'000)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150)); + env.close(); + + BEAST_EXPECT(env.balance(alice, MPT) == preAliceMPT - MPT(1'000)); + BEAST_EXPECT(mptEscrowed(env, alice, MPT) == 1'000); + BEAST_EXPECT(env.balance(gw, MPT) == preOutstanding); + BEAST_EXPECT(issuerMPTEscrowed(env, MPT) == preEscrowed + 1'000); + + // issuer (dest) can finish escrow + env(escrow::finish(gw, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150)); + env.close(); + + BEAST_EXPECT(env.balance(alice, MPT) == preAliceMPT - MPT(1'000)); + BEAST_EXPECT(mptEscrowed(env, alice, MPT) == 0); + BEAST_EXPECT(env.balance(gw, MPT) == preOutstanding - MPT(1'000)); + BEAST_EXPECT(issuerMPTEscrowed(env, MPT) == preEscrowed); + } + } + + void + testMPTLockedRate(FeatureBitset features) + { + testcase("MPT Locked Rate"); + using namespace test::jtx; + using namespace std::literals; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const carol = Account("carol"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + + // test locked rate: finish + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + + MPTTester mptGw(env, gw, {.holders = {alice, bob}}); + mptGw.create( + {.transferFee = 25000, + .ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanEscrow | tfMPTCanTransfer}); + mptGw.authorize({.account = alice}); + mptGw.authorize({.account = bob}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10'000))); + env(pay(gw, bob, MPT(10'000))); + env.close(); + + // alice can create escrow w/ xfer rate + auto const preAlice = env.balance(alice, MPT); + auto const seq1 = env.seq(alice); + auto const delta = MPT(125); + env(escrow::create(alice, bob, MPT(125)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150)); + env.close(); + auto const transferRate = escrow::rate(env, alice, seq1); + BEAST_EXPECT( + transferRate.value == std::uint32_t(1'000'000'000 * 1.25)); + + // bob can finish escrow + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150)); + env.close(); + + BEAST_EXPECT(env.balance(alice, MPT) == preAlice - delta); + BEAST_EXPECT(env.balance(bob, MPT) == MPT(10'100)); + } + + // test locked rate: cancel + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + + MPTTester mptGw(env, gw, {.holders = {alice, bob}}); + mptGw.create( + {.transferFee = 25000, + .ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanEscrow | tfMPTCanTransfer}); + mptGw.authorize({.account = alice}); + mptGw.authorize({.account = bob}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10'000))); + env(pay(gw, bob, MPT(10'000))); + env.close(); + + // alice can create escrow w/ xfer rate + auto const preAlice = env.balance(alice, MPT); + auto const preBob = env.balance(bob, MPT); + auto const seq1 = env.seq(alice); + auto const delta = MPT(125); + env(escrow::create(alice, bob, MPT(125)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + escrow::cancel_time(env.now() + 3s), + fee(baseFee * 150)); + env.close(); + auto const transferRate = escrow::rate(env, alice, seq1); + BEAST_EXPECT( + transferRate.value == std::uint32_t(1'000'000'000 * 1.25)); + + // alice can cancel escrow + env(escrow::cancel(alice, alice, seq1), fee(baseFee)); + env.close(); + + BEAST_EXPECT(env.balance(alice, MPT) == preAlice); + BEAST_EXPECT(env.balance(bob, MPT) == preBob); + } + } + + void + testMPTRequireAuth(FeatureBitset features) + { + testcase("MPT Require Auth"); + using namespace test::jtx; + using namespace std::literals; + + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + + MPTTester mptGw(env, gw, {.holders = {alice, bob}}); + mptGw.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanEscrow | tfMPTCanTransfer | tfMPTRequireAuth}); + mptGw.authorize({.account = alice}); + mptGw.authorize({.account = gw, .holder = alice}); + mptGw.authorize({.account = bob}); + mptGw.authorize({.account = gw, .holder = bob}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10'000))); + env.close(); + + auto seq = env.seq(alice); + auto const delta = MPT(125); + // alice can create escrow - is authorized + env(escrow::create(alice, bob, MPT(100)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150)); + env.close(); + + // bob can finish escrow - is authorized + env(escrow::finish(bob, alice, seq), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150)); + env.close(); + } + + void + testMPTLock(FeatureBitset features) + { + testcase("MPT Lock"); + using namespace test::jtx; + using namespace std::literals; + + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + + MPTTester mptGw(env, gw, {.holders = {alice, bob}}); + mptGw.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanEscrow | tfMPTCanTransfer | tfMPTCanLock}); + mptGw.authorize({.account = alice}); + mptGw.authorize({.account = bob}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10'000))); + env(pay(gw, bob, MPT(10'000))); + env.close(); + + // alice create escrow + auto seq1 = env.seq(alice); + env(escrow::create(alice, bob, MPT(100)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + escrow::cancel_time(env.now() + 2s), + fee(baseFee * 150)); + env.close(); + + // lock account & dest + mptGw.set({.account = gw, .holder = alice, .flags = tfMPTLock}); + mptGw.set({.account = gw, .holder = bob, .flags = tfMPTLock}); + + // bob cannot finish + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150), + ter(tecLOCKED)); + env.close(); + + // bob can cancel + env(escrow::cancel(bob, alice, seq1)); + env.close(); + } + + void + testMPTCanTransfer(FeatureBitset features) + { + testcase("MPT Can Transfer"); + using namespace test::jtx; + using namespace std::literals; + + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + + MPTTester mptGw(env, gw, {.holders = {alice, bob}}); + mptGw.create( + {.ownerCount = 1, .holderCount = 0, .flags = tfMPTCanEscrow}); + mptGw.authorize({.account = alice}); + mptGw.authorize({.account = bob}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10'000))); + env(pay(gw, bob, MPT(10'000))); + env.close(); + + // alice cannot create escrow to non issuer + env(escrow::create(alice, bob, MPT(100)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + escrow::cancel_time(env.now() + 2s), + fee(baseFee * 150), + ter(tecNO_AUTH)); + env.close(); + + // Escrow Create & Finish + { + // alice an create escrow to issuer + auto seq = env.seq(alice); + env(escrow::create(alice, gw, MPT(100)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150)); + env.close(); + + // gw can finish + env(escrow::finish(gw, alice, seq), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150)); + env.close(); + } + + // Escrow Create & Cancel + { + // alice an create escrow to issuer + auto seq = env.seq(alice); + env(escrow::create(alice, gw, MPT(100)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + escrow::cancel_time(env.now() + 2s), + fee(baseFee * 150)); + env.close(); + + // alice can cancel + env(escrow::cancel(alice, alice, seq)); + env.close(); + } + } + + void + testMPTDestroy(FeatureBitset features) + { + testcase("MPT Destroy"); + using namespace test::jtx; + using namespace std::literals; + + // tecHAS_OBLIGATIONS: issuer cannot destroy issuance + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + + MPTTester mptGw(env, gw, {.holders = {alice, bob}}); + mptGw.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanEscrow | tfMPTCanTransfer}); + mptGw.authorize({.account = alice}); + mptGw.authorize({.account = bob}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10'000))); + env(pay(gw, bob, MPT(10'000))); + env.close(); + + auto const seq1 = env.seq(alice); + env(escrow::create(alice, bob, MPT(10)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150)); + env.close(); + + env(pay(alice, gw, MPT(9'990))); + env(pay(bob, gw, MPT(10'000))); + BEAST_EXPECT(env.balance(alice, MPT) == MPT(0)); + BEAST_EXPECT(mptEscrowed(env, alice, MPT) == 10); + BEAST_EXPECT(env.balance(bob, MPT) == MPT(0)); + BEAST_EXPECT(mptEscrowed(env, bob, MPT) == 0); + mptGw.authorize({.account = bob, .flags = tfMPTUnauthorize}); + mptGw.destroy( + {.id = mptGw.issuanceID(), + .ownerCount = 1, + .err = tecHAS_OBLIGATIONS}); + + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + env(pay(bob, gw, MPT(10))); + mptGw.destroy({.id = mptGw.issuanceID(), .ownerCount = 0}); + } + + // tecHAS_OBLIGATIONS: holder cannot destroy mptoken + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + env.fund(XRP(10'000), bob); + env.close(); + + MPTTester mptGw(env, gw, {.holders = {alice}}); + mptGw.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanEscrow | tfMPTCanTransfer}); + mptGw.authorize({.account = alice}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10'000))); + env.close(); + + auto const seq1 = env.seq(alice); + env(escrow::create(alice, bob, MPT(10)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + env(pay(alice, gw, MPT(9'990))); + env.close(); + + BEAST_EXPECT(env.balance(alice, MPT) == MPT(0)); + BEAST_EXPECT(mptEscrowed(env, alice, MPT) == 10); + mptGw.authorize( + {.account = alice, + .flags = tfMPTUnauthorize, + .err = tecHAS_OBLIGATIONS}); + + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + BEAST_EXPECT(env.balance(alice, MPT) == MPT(0)); + BEAST_EXPECT(mptEscrowed(env, alice, MPT) == 0); + mptGw.authorize({.account = alice, .flags = tfMPTUnauthorize}); + BEAST_EXPECT(!env.le(keylet::mptoken(MPT.mpt(), alice))); + } + } + + void + testIOUWithFeats(FeatureBitset features) + { + testIOUEnablement(features); + testIOUAllowLockingFlag(features); + testIOUCreatePreflight(features); + testIOUCreatePreclaim(features); + testIOUFinishPreclaim(features); + testIOUFinishDoApply(features); + testIOUCancelPreclaim(features); + testIOUBalances(features); + testIOUMetaAndOwnership(features); + testIOURippleState(features); + testIOUGateway(features); + testIOULockedRate(features); + testIOULimitAmount(features); + testIOURequireAuth(features); + testIOUFreeze(features); + testIOUINSF(features); + testIOUPrecisionLoss(features); + } + + void + testMPTWithFeats(FeatureBitset features) + { + testMPTEnablement(features); + testMPTCreatePreflight(features); + testMPTCreatePreclaim(features); + testMPTFinishPreclaim(features); + testMPTFinishDoApply(features); + testMPTCancelPreclaim(features); + testMPTBalances(features); + testMPTMetaAndOwnership(features); + testMPTGateway(features); + testMPTLockedRate(features); + testMPTRequireAuth(features); + testMPTLock(features); + testMPTCanTransfer(features); + testMPTDestroy(features); + } + +public: + void + run() override + { + using namespace test::jtx; + FeatureBitset const all{supported_amendments()}; + testIOUWithFeats(all); + testMPTWithFeats(all); + } +}; + +BEAST_DEFINE_TESTSUITE(EscrowToken, app, ripple); + +} // namespace test +} // namespace ripple diff --git a/src/test/app/Escrow_test.cpp b/src/test/app/Escrow_test.cpp index 1129019aab..aa86ad338e 100644 --- a/src/test/app/Escrow_test.cpp +++ b/src/test/app/Escrow_test.cpp @@ -35,81 +35,53 @@ namespace test { struct Escrow_test : public beast::unit_test::suite { - // A PreimageSha256 fulfillments and its associated condition. - std::array const fb1 = {{0xA0, 0x02, 0x80, 0x00}}; - - std::array const cb1 = { - {0xA0, 0x25, 0x80, 0x20, 0xE3, 0xB0, 0xC4, 0x42, 0x98, 0xFC, - 0x1C, 0x14, 0x9A, 0xFB, 0xF4, 0xC8, 0x99, 0x6F, 0xB9, 0x24, - 0x27, 0xAE, 0x41, 0xE4, 0x64, 0x9B, 0x93, 0x4C, 0xA4, 0x95, - 0x99, 0x1B, 0x78, 0x52, 0xB8, 0x55, 0x81, 0x01, 0x00}}; - - // Another PreimageSha256 fulfillments and its associated condition. - std::array const fb2 = { - {0xA0, 0x05, 0x80, 0x03, 0x61, 0x61, 0x61}}; - - std::array const cb2 = { - {0xA0, 0x25, 0x80, 0x20, 0x98, 0x34, 0x87, 0x6D, 0xCF, 0xB0, - 0x5C, 0xB1, 0x67, 0xA5, 0xC2, 0x49, 0x53, 0xEB, 0xA5, 0x8C, - 0x4A, 0xC8, 0x9B, 0x1A, 0xDF, 0x57, 0xF2, 0x8F, 0x2F, 0x9D, - 0x09, 0xAF, 0x10, 0x7E, 0xE8, 0xF0, 0x81, 0x01, 0x03}}; - - // Another PreimageSha256 fulfillment and its associated condition. - std::array const fb3 = { - {0xA0, 0x06, 0x80, 0x04, 0x6E, 0x69, 0x6B, 0x62}}; - - std::array const cb3 = { - {0xA0, 0x25, 0x80, 0x20, 0x6E, 0x4C, 0x71, 0x45, 0x30, 0xC0, - 0xA4, 0x26, 0x8B, 0x3F, 0xA6, 0x3B, 0x1B, 0x60, 0x6F, 0x2D, - 0x26, 0x4A, 0x2D, 0x85, 0x7B, 0xE8, 0xA0, 0x9C, 0x1D, 0xFD, - 0x57, 0x0D, 0x15, 0x85, 0x8B, 0xD4, 0x81, 0x01, 0x04}}; - void - testEnablement() + testEnablement(FeatureBitset features) { testcase("Enablement"); using namespace jtx; using namespace std::chrono; - Env env(*this); + Env env(*this, features); auto const baseFee = env.current()->fees().base; env.fund(XRP(5000), "alice", "bob"); - env(escrow("alice", "bob", XRP(1000)), finish_time(env.now() + 1s)); + env(escrow::create("alice", "bob", XRP(1000)), + escrow::finish_time(env.now() + 1s)); env.close(); auto const seq1 = env.seq("alice"); - env(escrow("alice", "bob", XRP(1000)), - condition(cb1), - finish_time(env.now() + 1s), + env(escrow::create("alice", "bob", XRP(1000)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), fee(baseFee * 150)); env.close(); - env(finish("bob", "alice", seq1), - condition(cb1), - fulfillment(fb1), + env(escrow::finish("bob", "alice", seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), fee(baseFee * 150)); auto const seq2 = env.seq("alice"); - env(escrow("alice", "bob", XRP(1000)), - condition(cb2), - finish_time(env.now() + 1s), - cancel_time(env.now() + 2s), + env(escrow::create("alice", "bob", XRP(1000)), + escrow::condition(escrow::cb2), + escrow::finish_time(env.now() + 1s), + escrow::cancel_time(env.now() + 2s), fee(baseFee * 150)); env.close(); - env(cancel("bob", "alice", seq2), fee(baseFee * 150)); + env(escrow::cancel("bob", "alice", seq2), fee(baseFee * 150)); } void - testTiming() + testTiming(FeatureBitset features) { using namespace jtx; using namespace std::chrono; { testcase("Timing: Finish Only"); - Env env(*this); + Env env(*this, features); auto const baseFee = env.current()->fees().base; env.fund(XRP(5000), "alice", "bob"); env.close(); @@ -118,21 +90,22 @@ struct Escrow_test : public beast::unit_test::suite auto const ts = env.now() + 97s; auto const seq = env.seq("alice"); - env(escrow("alice", "bob", XRP(1000)), finish_time(ts)); + env(escrow::create("alice", "bob", XRP(1000)), + escrow::finish_time(ts)); // Advance the ledger, verifying that the finish won't complete // prematurely. for (; env.now() < ts; env.close()) - env(finish("bob", "alice", seq), + env(escrow::finish("bob", "alice", seq), fee(baseFee * 150), ter(tecNO_PERMISSION)); - env(finish("bob", "alice", seq), fee(baseFee * 150)); + env(escrow::finish("bob", "alice", seq), fee(baseFee * 150)); } { testcase("Timing: Cancel Only"); - Env env(*this); + Env env(*this, features); auto const baseFee = env.current()->fees().base; env.fund(XRP(5000), "alice", "bob"); env.close(); @@ -141,31 +114,31 @@ struct Escrow_test : public beast::unit_test::suite auto const ts = env.now() + 117s; auto const seq = env.seq("alice"); - env(escrow("alice", "bob", XRP(1000)), - condition(cb1), - cancel_time(ts)); + env(escrow::create("alice", "bob", XRP(1000)), + escrow::condition(escrow::cb1), + escrow::cancel_time(ts)); // Advance the ledger, verifying that the cancel won't complete // prematurely. for (; env.now() < ts; env.close()) - env(cancel("bob", "alice", seq), + env(escrow::cancel("bob", "alice", seq), fee(baseFee * 150), ter(tecNO_PERMISSION)); // Verify that a finish won't work anymore. - env(finish("bob", "alice", seq), - condition(cb1), - fulfillment(fb1), + env(escrow::finish("bob", "alice", seq), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), fee(baseFee * 150), ter(tecNO_PERMISSION)); // Verify that the cancel will succeed - env(cancel("bob", "alice", seq), fee(baseFee * 150)); + env(escrow::cancel("bob", "alice", seq), fee(baseFee * 150)); } { testcase("Timing: Finish and Cancel -> Finish"); - Env env(*this); + Env env(*this, features); auto const baseFee = env.current()->fees().base; env.fund(XRP(5000), "alice", "bob"); env.close(); @@ -175,34 +148,34 @@ struct Escrow_test : public beast::unit_test::suite auto const cts = env.now() + 192s; auto const seq = env.seq("alice"); - env(escrow("alice", "bob", XRP(1000)), - finish_time(fts), - cancel_time(cts)); + env(escrow::create("alice", "bob", XRP(1000)), + escrow::finish_time(fts), + escrow::cancel_time(cts)); // Advance the ledger, verifying that the finish and cancel won't // complete prematurely. for (; env.now() < fts; env.close()) { - env(finish("bob", "alice", seq), + env(escrow::finish("bob", "alice", seq), fee(baseFee * 150), ter(tecNO_PERMISSION)); - env(cancel("bob", "alice", seq), + env(escrow::cancel("bob", "alice", seq), fee(baseFee * 150), ter(tecNO_PERMISSION)); } // Verify that a cancel still won't work - env(cancel("bob", "alice", seq), + env(escrow::cancel("bob", "alice", seq), fee(baseFee * 150), ter(tecNO_PERMISSION)); // And verify that a finish will - env(finish("bob", "alice", seq), fee(baseFee * 150)); + env(escrow::finish("bob", "alice", seq), fee(baseFee * 150)); } { testcase("Timing: Finish and Cancel -> Cancel"); - Env env(*this); + Env env(*this, features); auto const baseFee = env.current()->fees().base; env.fund(XRP(5000), "alice", "bob"); env.close(); @@ -212,18 +185,18 @@ struct Escrow_test : public beast::unit_test::suite auto const cts = env.now() + 184s; auto const seq = env.seq("alice"); - env(escrow("alice", "bob", XRP(1000)), - finish_time(fts), - cancel_time(cts)); + env(escrow::create("alice", "bob", XRP(1000)), + escrow::finish_time(fts), + escrow::cancel_time(cts)); // Advance the ledger, verifying that the finish and cancel won't // complete prematurely. for (; env.now() < fts; env.close()) { - env(finish("bob", "alice", seq), + env(escrow::finish("bob", "alice", seq), fee(baseFee * 150), ter(tecNO_PERMISSION)); - env(cancel("bob", "alice", seq), + env(escrow::cancel("bob", "alice", seq), fee(baseFee * 150), ter(tecNO_PERMISSION)); } @@ -231,30 +204,30 @@ struct Escrow_test : public beast::unit_test::suite // Continue advancing, verifying that the cancel won't complete // prematurely. At this point a finish would succeed. for (; env.now() < cts; env.close()) - env(cancel("bob", "alice", seq), + env(escrow::cancel("bob", "alice", seq), fee(baseFee * 150), ter(tecNO_PERMISSION)); // Verify that finish will no longer work, since we are past the // cancel activation time. - env(finish("bob", "alice", seq), + env(escrow::finish("bob", "alice", seq), fee(baseFee * 150), ter(tecNO_PERMISSION)); // And verify that a cancel will succeed. - env(cancel("bob", "alice", seq), fee(baseFee * 150)); + env(escrow::cancel("bob", "alice", seq), fee(baseFee * 150)); } } void - testTags() + testTags(FeatureBitset features) { testcase("Tags"); using namespace jtx; using namespace std::chrono; - Env env(*this); + Env env(*this, features); auto const alice = Account("alice"); auto const bob = Account("bob"); @@ -264,15 +237,15 @@ struct Escrow_test : public beast::unit_test::suite // Check to make sure that we correctly detect if tags are really // required: env(fset(bob, asfRequireDest)); - env(escrow(alice, bob, XRP(1000)), - finish_time(env.now() + 1s), + env(escrow::create(alice, bob, XRP(1000)), + escrow::finish_time(env.now() + 1s), ter(tecDST_TAG_NEEDED)); // set source and dest tags auto const seq = env.seq(alice); - env(escrow(alice, bob, XRP(1000)), - finish_time(env.now() + 1s), + env(escrow::create(alice, bob, XRP(1000)), + escrow::finish_time(env.now() + 1s), stag(1), dtag(2)); @@ -283,7 +256,7 @@ struct Escrow_test : public beast::unit_test::suite } void - testDisallowXRP() + testDisallowXRP(FeatureBitset features) { testcase("Disallow XRP"); @@ -292,27 +265,28 @@ struct Escrow_test : public beast::unit_test::suite { // Respect the "asfDisallowXRP" account flag: - Env env(*this, supported_amendments() - featureDepositAuth); + Env env(*this, features - featureDepositAuth); env.fund(XRP(5000), "bob", "george"); env(fset("george", asfDisallowXRP)); - env(escrow("bob", "george", XRP(10)), - finish_time(env.now() + 1s), + env(escrow::create("bob", "george", XRP(10)), + escrow::finish_time(env.now() + 1s), ter(tecNO_TARGET)); } { // Ignore the "asfDisallowXRP" account flag, which we should // have been doing before. - Env env(*this); + Env env(*this, features); env.fund(XRP(5000), "bob", "george"); env(fset("george", asfDisallowXRP)); - env(escrow("bob", "george", XRP(10)), finish_time(env.now() + 1s)); + env(escrow::create("bob", "george", XRP(10)), + escrow::finish_time(env.now() + 1s)); } } void - test1571() + test1571(FeatureBitset features) { using namespace jtx; using namespace std::chrono; @@ -328,11 +302,11 @@ struct Escrow_test : public beast::unit_test::suite // Creating an escrow without a finish time and finishing it // is allowed without fix1571: auto const seq1 = env.seq("alice"); - env(escrow("alice", "bob", XRP(100)), - cancel_time(env.now() + 1s), + env(escrow::create("alice", "bob", XRP(100)), + escrow::cancel_time(env.now() + 1s), fee(baseFee * 150)); env.close(); - env(finish("carol", "alice", seq1), fee(baseFee * 150)); + env(escrow::finish("carol", "alice", seq1), fee(baseFee * 150)); BEAST_EXPECT(env.balance("bob") == XRP(5100)); env.close(); @@ -340,14 +314,14 @@ struct Escrow_test : public beast::unit_test::suite // Creating an escrow without a finish time and a condition is // also allowed without fix1571: auto const seq2 = env.seq("alice"); - env(escrow("alice", "bob", XRP(100)), - cancel_time(env.now() + 1s), - condition(cb1), + env(escrow::create("alice", "bob", XRP(100)), + escrow::cancel_time(env.now() + 1s), + escrow::condition(escrow::cb1), fee(baseFee * 150)); env.close(); - env(finish("carol", "alice", seq2), - condition(cb1), - fulfillment(fb1), + env(escrow::finish("carol", "alice", seq2), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), fee(baseFee * 150)); BEAST_EXPECT(env.balance("bob") == XRP(5200)); } @@ -355,117 +329,125 @@ struct Escrow_test : public beast::unit_test::suite { testcase("Implied Finish Time (with fix1571)"); - Env env(*this); + Env env(*this, features); auto const baseFee = env.current()->fees().base; env.fund(XRP(5000), "alice", "bob", "carol"); env.close(); // Creating an escrow with only a cancel time is not allowed: - env(escrow("alice", "bob", XRP(100)), - cancel_time(env.now() + 90s), + env(escrow::create("alice", "bob", XRP(100)), + escrow::cancel_time(env.now() + 90s), fee(baseFee * 150), ter(temMALFORMED)); // Creating an escrow with only a cancel time and a condition is // allowed: auto const seq = env.seq("alice"); - env(escrow("alice", "bob", XRP(100)), - cancel_time(env.now() + 90s), - condition(cb1), + env(escrow::create("alice", "bob", XRP(100)), + escrow::cancel_time(env.now() + 90s), + escrow::condition(escrow::cb1), fee(baseFee * 150)); env.close(); - env(finish("carol", "alice", seq), - condition(cb1), - fulfillment(fb1), + env(escrow::finish("carol", "alice", seq), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), fee(baseFee * 150)); BEAST_EXPECT(env.balance("bob") == XRP(5100)); } } void - testFails() + testFails(FeatureBitset features) { testcase("Failure Cases"); using namespace jtx; using namespace std::chrono; - Env env(*this); + Env env(*this, features); auto const baseFee = env.current()->fees().base; - env.fund(XRP(5000), "alice", "bob"); + env.fund(XRP(5000), "alice", "bob", "gw"); env.close(); // Finish time is in the past - env(escrow("alice", "bob", XRP(1000)), - finish_time(env.now() - 5s), + env(escrow::create("alice", "bob", XRP(1000)), + escrow::finish_time(env.now() - 5s), ter(tecNO_PERMISSION)); // Cancel time is in the past - env(escrow("alice", "bob", XRP(1000)), - condition(cb1), - cancel_time(env.now() - 5s), + env(escrow::create("alice", "bob", XRP(1000)), + escrow::condition(escrow::cb1), + escrow::cancel_time(env.now() - 5s), ter(tecNO_PERMISSION)); // no destination account - env(escrow("alice", "carol", XRP(1000)), - finish_time(env.now() + 1s), + env(escrow::create("alice", "carol", XRP(1000)), + escrow::finish_time(env.now() + 1s), ter(tecNO_DST)); env.fund(XRP(5000), "carol"); // Using non-XRP: - env(escrow("alice", "carol", Account("alice")["USD"](500)), - finish_time(env.now() + 1s), - ter(temBAD_AMOUNT)); + bool const withTokenEscrow = + env.current()->rules().enabled(featureTokenEscrow); + { + // tecNO_PERMISSION: token escrow is enabled but the issuer did not + // set the asfAllowTrustLineLocking flag + auto const txResult = + withTokenEscrow ? ter(tecNO_PERMISSION) : ter(temBAD_AMOUNT); + env(escrow::create("alice", "carol", Account("alice")["USD"](500)), + escrow::finish_time(env.now() + 5s), + txResult); + } // Sending zero or no XRP: - env(escrow("alice", "carol", XRP(0)), - finish_time(env.now() + 1s), + env(escrow::create("alice", "carol", XRP(0)), + escrow::finish_time(env.now() + 1s), ter(temBAD_AMOUNT)); - env(escrow("alice", "carol", XRP(-1000)), - finish_time(env.now() + 1s), + env(escrow::create("alice", "carol", XRP(-1000)), + escrow::finish_time(env.now() + 1s), ter(temBAD_AMOUNT)); // Fail if neither CancelAfter nor FinishAfter are specified: - env(escrow("alice", "carol", XRP(1)), ter(temBAD_EXPIRATION)); + env(escrow::create("alice", "carol", XRP(1)), ter(temBAD_EXPIRATION)); // Fail if neither a FinishTime nor a condition are attached: - env(escrow("alice", "carol", XRP(1)), - cancel_time(env.now() + 1s), + env(escrow::create("alice", "carol", XRP(1)), + escrow::cancel_time(env.now() + 1s), ter(temMALFORMED)); // Fail if FinishAfter has already passed: - env(escrow("alice", "carol", XRP(1)), - finish_time(env.now() - 1s), + env(escrow::create("alice", "carol", XRP(1)), + escrow::finish_time(env.now() - 1s), ter(tecNO_PERMISSION)); // If both CancelAfter and FinishAfter are set, then CancelAfter must // be strictly later than FinishAfter. - env(escrow("alice", "carol", XRP(1)), - condition(cb1), - finish_time(env.now() + 10s), - cancel_time(env.now() + 10s), + env(escrow::create("alice", "carol", XRP(1)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 10s), + escrow::cancel_time(env.now() + 10s), ter(temBAD_EXPIRATION)); - env(escrow("alice", "carol", XRP(1)), - condition(cb1), - finish_time(env.now() + 10s), - cancel_time(env.now() + 5s), + env(escrow::create("alice", "carol", XRP(1)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 10s), + escrow::cancel_time(env.now() + 5s), ter(temBAD_EXPIRATION)); // Carol now requires the use of a destination tag env(fset("carol", asfRequireDest)); // missing destination tag - env(escrow("alice", "carol", XRP(1)), - condition(cb1), - cancel_time(env.now() + 1s), + env(escrow::create("alice", "carol", XRP(1)), + escrow::condition(escrow::cb1), + escrow::cancel_time(env.now() + 1s), ter(tecDST_TAG_NEEDED)); // Success! - env(escrow("alice", "carol", XRP(1)), - condition(cb1), - cancel_time(env.now() + 1s), + env(escrow::create("alice", "carol", XRP(1)), + escrow::condition(escrow::cb1), + escrow::cancel_time(env.now() + 1s), dtag(1)); { // Fail if the sender wants to send more than he has: @@ -474,29 +456,29 @@ struct Escrow_test : public beast::unit_test::suite drops(env.current()->fees().increment); env.fund(accountReserve + accountIncrement + XRP(50), "daniel"); - env(escrow("daniel", "bob", XRP(51)), - finish_time(env.now() + 1s), + env(escrow::create("daniel", "bob", XRP(51)), + escrow::finish_time(env.now() + 1s), ter(tecUNFUNDED)); env.fund(accountReserve + accountIncrement + XRP(50), "evan"); - env(escrow("evan", "bob", XRP(50)), - finish_time(env.now() + 1s), + env(escrow::create("evan", "bob", XRP(50)), + escrow::finish_time(env.now() + 1s), ter(tecUNFUNDED)); env.fund(accountReserve, "frank"); - env(escrow("frank", "bob", XRP(1)), - finish_time(env.now() + 1s), + env(escrow::create("frank", "bob", XRP(1)), + escrow::finish_time(env.now() + 1s), ter(tecINSUFFICIENT_RESERVE)); } { // Specify incorrect sequence number env.fund(XRP(5000), "hannah"); auto const seq = env.seq("hannah"); - env(escrow("hannah", "hannah", XRP(10)), - finish_time(env.now() + 1s), + env(escrow::create("hannah", "hannah", XRP(10)), + escrow::finish_time(env.now() + 1s), fee(150 * baseFee)); env.close(); - env(finish("hannah", "hannah", seq + 7), + env(escrow::finish("hannah", "hannah", seq + 7), fee(150 * baseFee), ter(tecNO_TARGET)); } @@ -505,18 +487,19 @@ struct Escrow_test : public beast::unit_test::suite env.fund(XRP(5000), "ivan"); auto const seq = env.seq("ivan"); - env(escrow("ivan", "ivan", XRP(10)), finish_time(env.now() + 1s)); + env(escrow::create("ivan", "ivan", XRP(10)), + escrow::finish_time(env.now() + 1s)); env.close(); - env(finish("ivan", "ivan", seq), - condition(cb1), - fulfillment(fb1), + env(escrow::finish("ivan", "ivan", seq), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), fee(150 * baseFee), ter(tecCRYPTOCONDITION_ERROR)); } } void - testLockup() + testLockup(FeatureBitset features) { testcase("Lockup"); @@ -525,49 +508,50 @@ struct Escrow_test : public beast::unit_test::suite { // Unconditional - Env env(*this); + Env env(*this, features); auto const baseFee = env.current()->fees().base; env.fund(XRP(5000), "alice", "bob"); auto const seq = env.seq("alice"); - env(escrow("alice", "alice", XRP(1000)), - finish_time(env.now() + 5s)); + env(escrow::create("alice", "alice", XRP(1000)), + escrow::finish_time(env.now() + 5s)); env.require(balance("alice", XRP(4000) - drops(baseFee))); // Not enough time has elapsed for a finish and canceling isn't // possible. - env(cancel("bob", "alice", seq), ter(tecNO_PERMISSION)); - env(finish("bob", "alice", seq), ter(tecNO_PERMISSION)); + env(escrow::cancel("bob", "alice", seq), ter(tecNO_PERMISSION)); + env(escrow::finish("bob", "alice", seq), ter(tecNO_PERMISSION)); env.close(); // Cancel continues to not be possible - env(cancel("bob", "alice", seq), ter(tecNO_PERMISSION)); + env(escrow::cancel("bob", "alice", seq), ter(tecNO_PERMISSION)); // Finish should succeed. Verify funds. - env(finish("bob", "alice", seq)); + env(escrow::finish("bob", "alice", seq)); env.require(balance("alice", XRP(5000) - drops(baseFee))); } { // Unconditionally pay from Alice to Bob. Zelda (neither source nor // destination) signs all cancels and finishes. This shows that // Escrow will make a payment to Bob with no intervention from Bob. - Env env(*this); + Env env(*this, features); auto const baseFee = env.current()->fees().base; env.fund(XRP(5000), "alice", "bob", "zelda"); auto const seq = env.seq("alice"); - env(escrow("alice", "bob", XRP(1000)), finish_time(env.now() + 5s)); + env(escrow::create("alice", "bob", XRP(1000)), + escrow::finish_time(env.now() + 5s)); env.require(balance("alice", XRP(4000) - drops(baseFee))); // Not enough time has elapsed for a finish and canceling isn't // possible. - env(cancel("zelda", "alice", seq), ter(tecNO_PERMISSION)); - env(finish("zelda", "alice", seq), ter(tecNO_PERMISSION)); + env(escrow::cancel("zelda", "alice", seq), ter(tecNO_PERMISSION)); + env(escrow::finish("zelda", "alice", seq), ter(tecNO_PERMISSION)); env.close(); // Cancel continues to not be possible - env(cancel("zelda", "alice", seq), ter(tecNO_PERMISSION)); + env(escrow::cancel("zelda", "alice", seq), ter(tecNO_PERMISSION)); // Finish should succeed. Verify funds. - env(finish("zelda", "alice", seq)); + env(escrow::finish("zelda", "alice", seq)); env.close(); env.require(balance("alice", XRP(4000) - drops(baseFee))); @@ -576,7 +560,7 @@ struct Escrow_test : public beast::unit_test::suite } { // Bob sets DepositAuth so only Bob can finish the escrow. - Env env(*this); + Env env(*this, features); auto const baseFee = env.current()->fees().base; env.fund(XRP(5000), "alice", "bob", "zelda"); @@ -584,27 +568,28 @@ struct Escrow_test : public beast::unit_test::suite env.close(); auto const seq = env.seq("alice"); - env(escrow("alice", "bob", XRP(1000)), finish_time(env.now() + 5s)); + env(escrow::create("alice", "bob", XRP(1000)), + escrow::finish_time(env.now() + 5s)); env.require(balance("alice", XRP(4000) - drops(baseFee))); // Not enough time has elapsed for a finish and canceling isn't // possible. - env(cancel("zelda", "alice", seq), ter(tecNO_PERMISSION)); - env(cancel("alice", "alice", seq), ter(tecNO_PERMISSION)); - env(cancel("bob", "alice", seq), ter(tecNO_PERMISSION)); - env(finish("zelda", "alice", seq), ter(tecNO_PERMISSION)); - env(finish("alice", "alice", seq), ter(tecNO_PERMISSION)); - env(finish("bob", "alice", seq), ter(tecNO_PERMISSION)); + env(escrow::cancel("zelda", "alice", seq), ter(tecNO_PERMISSION)); + env(escrow::cancel("alice", "alice", seq), ter(tecNO_PERMISSION)); + env(escrow::cancel("bob", "alice", seq), ter(tecNO_PERMISSION)); + env(escrow::finish("zelda", "alice", seq), ter(tecNO_PERMISSION)); + env(escrow::finish("alice", "alice", seq), ter(tecNO_PERMISSION)); + env(escrow::finish("bob", "alice", seq), ter(tecNO_PERMISSION)); env.close(); // Cancel continues to not be possible. Finish will only succeed for // Bob, because of DepositAuth. - env(cancel("zelda", "alice", seq), ter(tecNO_PERMISSION)); - env(cancel("alice", "alice", seq), ter(tecNO_PERMISSION)); - env(cancel("bob", "alice", seq), ter(tecNO_PERMISSION)); - env(finish("zelda", "alice", seq), ter(tecNO_PERMISSION)); - env(finish("alice", "alice", seq), ter(tecNO_PERMISSION)); - env(finish("bob", "alice", seq)); + env(escrow::cancel("zelda", "alice", seq), ter(tecNO_PERMISSION)); + env(escrow::cancel("alice", "alice", seq), ter(tecNO_PERMISSION)); + env(escrow::cancel("bob", "alice", seq), ter(tecNO_PERMISSION)); + env(escrow::finish("zelda", "alice", seq), ter(tecNO_PERMISSION)); + env(escrow::finish("alice", "alice", seq), ter(tecNO_PERMISSION)); + env(escrow::finish("bob", "alice", seq)); env.close(); env.require(balance("alice", XRP(4000) - (baseFee * 5))); @@ -614,7 +599,7 @@ struct Escrow_test : public beast::unit_test::suite { // Bob sets DepositAuth but preauthorizes Zelda, so Zelda can // finish the escrow. - Env env(*this); + Env env(*this, features); auto const baseFee = env.current()->fees().base; env.fund(XRP(5000), "alice", "bob", "zelda"); @@ -624,15 +609,16 @@ struct Escrow_test : public beast::unit_test::suite env.close(); auto const seq = env.seq("alice"); - env(escrow("alice", "bob", XRP(1000)), finish_time(env.now() + 5s)); + env(escrow::create("alice", "bob", XRP(1000)), + escrow::finish_time(env.now() + 5s)); env.require(balance("alice", XRP(4000) - drops(baseFee))); env.close(); // DepositPreauth allows Finish to succeed for either Zelda or // Bob. But Finish won't succeed for Alice since she is not // preauthorized. - env(finish("alice", "alice", seq), ter(tecNO_PERMISSION)); - env(finish("zelda", "alice", seq)); + env(escrow::finish("alice", "alice", seq), ter(tecNO_PERMISSION)); + env(escrow::finish("zelda", "alice", seq)); env.close(); env.require(balance("alice", XRP(4000) - (baseFee * 2))); @@ -641,93 +627,97 @@ struct Escrow_test : public beast::unit_test::suite } { // Conditional - Env env(*this); + Env env(*this, features); auto const baseFee = env.current()->fees().base; env.fund(XRP(5000), "alice", "bob"); auto const seq = env.seq("alice"); - env(escrow("alice", "alice", XRP(1000)), - condition(cb2), - finish_time(env.now() + 5s)); + env(escrow::create("alice", "alice", XRP(1000)), + escrow::condition(escrow::cb2), + escrow::finish_time(env.now() + 5s)); env.require(balance("alice", XRP(4000) - drops(baseFee))); // Not enough time has elapsed for a finish and canceling isn't // possible. - env(cancel("alice", "alice", seq), ter(tecNO_PERMISSION)); - env(cancel("bob", "alice", seq), ter(tecNO_PERMISSION)); - env(finish("alice", "alice", seq), ter(tecNO_PERMISSION)); - env(finish("alice", "alice", seq), - condition(cb2), - fulfillment(fb2), + env(escrow::cancel("alice", "alice", seq), ter(tecNO_PERMISSION)); + env(escrow::cancel("bob", "alice", seq), ter(tecNO_PERMISSION)); + env(escrow::finish("alice", "alice", seq), ter(tecNO_PERMISSION)); + env(escrow::finish("alice", "alice", seq), + escrow::condition(escrow::cb2), + escrow::fulfillment(escrow::fb2), fee(150 * baseFee), ter(tecNO_PERMISSION)); - env(finish("bob", "alice", seq), ter(tecNO_PERMISSION)); - env(finish("bob", "alice", seq), - condition(cb2), - fulfillment(fb2), + env(escrow::finish("bob", "alice", seq), ter(tecNO_PERMISSION)); + env(escrow::finish("bob", "alice", seq), + escrow::condition(escrow::cb2), + escrow::fulfillment(escrow::fb2), fee(150 * baseFee), ter(tecNO_PERMISSION)); env.close(); // Cancel continues to not be possible. Finish is possible but // requires the fulfillment associated with the escrow. - env(cancel("alice", "alice", seq), ter(tecNO_PERMISSION)); - env(cancel("bob", "alice", seq), ter(tecNO_PERMISSION)); - env(finish("bob", "alice", seq), ter(tecCRYPTOCONDITION_ERROR)); - env(finish("alice", "alice", seq), ter(tecCRYPTOCONDITION_ERROR)); + env(escrow::cancel("alice", "alice", seq), ter(tecNO_PERMISSION)); + env(escrow::cancel("bob", "alice", seq), ter(tecNO_PERMISSION)); + env(escrow::finish("bob", "alice", seq), + ter(tecCRYPTOCONDITION_ERROR)); + env(escrow::finish("alice", "alice", seq), + ter(tecCRYPTOCONDITION_ERROR)); env.close(); - env(finish("bob", "alice", seq), - condition(cb2), - fulfillment(fb2), + env(escrow::finish("bob", "alice", seq), + escrow::condition(escrow::cb2), + escrow::fulfillment(escrow::fb2), fee(150 * baseFee)); } { // Self-escrowed conditional with DepositAuth. - Env env(*this); + Env env(*this, features); auto const baseFee = env.current()->fees().base; env.fund(XRP(5000), "alice", "bob"); auto const seq = env.seq("alice"); - env(escrow("alice", "alice", XRP(1000)), - condition(cb3), - finish_time(env.now() + 5s)); + env(escrow::create("alice", "alice", XRP(1000)), + escrow::condition(escrow::cb3), + escrow::finish_time(env.now() + 5s)); env.require(balance("alice", XRP(4000) - drops(baseFee))); env.close(); // Finish is now possible but requires the cryptocondition. - env(finish("bob", "alice", seq), ter(tecCRYPTOCONDITION_ERROR)); - env(finish("alice", "alice", seq), ter(tecCRYPTOCONDITION_ERROR)); + env(escrow::finish("bob", "alice", seq), + ter(tecCRYPTOCONDITION_ERROR)); + env(escrow::finish("alice", "alice", seq), + ter(tecCRYPTOCONDITION_ERROR)); // Enable deposit authorization. After this only Alice can finish // the escrow. env(fset("alice", asfDepositAuth)); env.close(); - env(finish("alice", "alice", seq), - condition(cb2), - fulfillment(fb2), + env(escrow::finish("alice", "alice", seq), + escrow::condition(escrow::cb2), + escrow::fulfillment(escrow::fb2), fee(150 * baseFee), ter(tecCRYPTOCONDITION_ERROR)); - env(finish("bob", "alice", seq), - condition(cb3), - fulfillment(fb3), + env(escrow::finish("bob", "alice", seq), + escrow::condition(escrow::cb3), + escrow::fulfillment(escrow::fb3), fee(150 * baseFee), ter(tecNO_PERMISSION)); - env(finish("alice", "alice", seq), - condition(cb3), - fulfillment(fb3), + env(escrow::finish("alice", "alice", seq), + escrow::condition(escrow::cb3), + escrow::fulfillment(escrow::fb3), fee(150 * baseFee)); } { // Self-escrowed conditional with DepositAuth and DepositPreauth. - Env env(*this); + Env env(*this, features); auto const baseFee = env.current()->fees().base; env.fund(XRP(5000), "alice", "bob", "zelda"); auto const seq = env.seq("alice"); - env(escrow("alice", "alice", XRP(1000)), - condition(cb3), - finish_time(env.now() + 5s)); + env(escrow::create("alice", "alice", XRP(1000)), + escrow::condition(escrow::cb3), + escrow::finish_time(env.now() + 5s)); env.require(balance("alice", XRP(4000) - drops(baseFee))); env.close(); @@ -737,34 +727,37 @@ struct Escrow_test : public beast::unit_test::suite env.close(); // Finish is now possible but requires the cryptocondition. - env(finish("alice", "alice", seq), ter(tecCRYPTOCONDITION_ERROR)); - env(finish("bob", "alice", seq), ter(tecCRYPTOCONDITION_ERROR)); - env(finish("zelda", "alice", seq), ter(tecCRYPTOCONDITION_ERROR)); + env(escrow::finish("alice", "alice", seq), + ter(tecCRYPTOCONDITION_ERROR)); + env(escrow::finish("bob", "alice", seq), + ter(tecCRYPTOCONDITION_ERROR)); + env(escrow::finish("zelda", "alice", seq), + ter(tecCRYPTOCONDITION_ERROR)); // Alice enables deposit authorization. After this only Alice or // Zelda (because Zelda is preauthorized) can finish the escrow. env(fset("alice", asfDepositAuth)); env.close(); - env(finish("alice", "alice", seq), - condition(cb2), - fulfillment(fb2), + env(escrow::finish("alice", "alice", seq), + escrow::condition(escrow::cb2), + escrow::fulfillment(escrow::fb2), fee(150 * baseFee), ter(tecCRYPTOCONDITION_ERROR)); - env(finish("bob", "alice", seq), - condition(cb3), - fulfillment(fb3), + env(escrow::finish("bob", "alice", seq), + escrow::condition(escrow::cb3), + escrow::fulfillment(escrow::fb3), fee(150 * baseFee), ter(tecNO_PERMISSION)); - env(finish("zelda", "alice", seq), - condition(cb3), - fulfillment(fb3), + env(escrow::finish("zelda", "alice", seq), + escrow::condition(escrow::cb3), + escrow::fulfillment(escrow::fb3), fee(150 * baseFee)); } } void - testEscrowConditions() + testEscrowConditions(FeatureBitset features) { testcase("Escrow with CryptoConditions"); @@ -772,126 +765,127 @@ struct Escrow_test : public beast::unit_test::suite using namespace std::chrono; { // Test cryptoconditions - Env env(*this); + Env env(*this, features); auto const baseFee = env.current()->fees().base; env.fund(XRP(5000), "alice", "bob", "carol"); auto const seq = env.seq("alice"); BEAST_EXPECT((*env.le("alice"))[sfOwnerCount] == 0); - env(escrow("alice", "carol", XRP(1000)), - condition(cb1), - cancel_time(env.now() + 1s)); + env(escrow::create("alice", "carol", XRP(1000)), + escrow::condition(escrow::cb1), + escrow::cancel_time(env.now() + 1s)); BEAST_EXPECT((*env.le("alice"))[sfOwnerCount] == 1); env.require(balance("alice", XRP(4000) - drops(baseFee))); env.require(balance("carol", XRP(5000))); - env(cancel("bob", "alice", seq), ter(tecNO_PERMISSION)); + env(escrow::cancel("bob", "alice", seq), ter(tecNO_PERMISSION)); BEAST_EXPECT((*env.le("alice"))[sfOwnerCount] == 1); // Attempt to finish without a fulfillment - env(finish("bob", "alice", seq), ter(tecCRYPTOCONDITION_ERROR)); + env(escrow::finish("bob", "alice", seq), + ter(tecCRYPTOCONDITION_ERROR)); BEAST_EXPECT((*env.le("alice"))[sfOwnerCount] == 1); // Attempt to finish with a condition instead of a fulfillment - env(finish("bob", "alice", seq), - condition(cb1), - fulfillment(cb1), + env(escrow::finish("bob", "alice", seq), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::cb1), fee(150 * baseFee), ter(tecCRYPTOCONDITION_ERROR)); BEAST_EXPECT((*env.le("alice"))[sfOwnerCount] == 1); - env(finish("bob", "alice", seq), - condition(cb1), - fulfillment(cb2), + env(escrow::finish("bob", "alice", seq), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::cb2), fee(150 * baseFee), ter(tecCRYPTOCONDITION_ERROR)); BEAST_EXPECT((*env.le("alice"))[sfOwnerCount] == 1); - env(finish("bob", "alice", seq), - condition(cb1), - fulfillment(cb3), + env(escrow::finish("bob", "alice", seq), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::cb3), fee(150 * baseFee), ter(tecCRYPTOCONDITION_ERROR)); BEAST_EXPECT((*env.le("alice"))[sfOwnerCount] == 1); // Attempt to finish with an incorrect condition and various // combinations of correct and incorrect fulfillments. - env(finish("bob", "alice", seq), - condition(cb2), - fulfillment(fb1), + env(escrow::finish("bob", "alice", seq), + escrow::condition(escrow::cb2), + escrow::fulfillment(escrow::fb1), fee(150 * baseFee), ter(tecCRYPTOCONDITION_ERROR)); BEAST_EXPECT((*env.le("alice"))[sfOwnerCount] == 1); - env(finish("bob", "alice", seq), - condition(cb2), - fulfillment(fb2), + env(escrow::finish("bob", "alice", seq), + escrow::condition(escrow::cb2), + escrow::fulfillment(escrow::fb2), fee(150 * baseFee), ter(tecCRYPTOCONDITION_ERROR)); BEAST_EXPECT((*env.le("alice"))[sfOwnerCount] == 1); - env(finish("bob", "alice", seq), - condition(cb2), - fulfillment(fb3), + env(escrow::finish("bob", "alice", seq), + escrow::condition(escrow::cb2), + escrow::fulfillment(escrow::fb3), fee(150 * baseFee), ter(tecCRYPTOCONDITION_ERROR)); BEAST_EXPECT((*env.le("alice"))[sfOwnerCount] == 1); // Attempt to finish with the correct condition & fulfillment - env(finish("bob", "alice", seq), - condition(cb1), - fulfillment(fb1), + env(escrow::finish("bob", "alice", seq), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), fee(150 * baseFee)); // SLE removed on finish BEAST_EXPECT(!env.le(keylet::escrow(Account("alice").id(), seq))); BEAST_EXPECT((*env.le("alice"))[sfOwnerCount] == 0); env.require(balance("carol", XRP(6000))); - env(cancel("bob", "alice", seq), ter(tecNO_TARGET)); + env(escrow::cancel("bob", "alice", seq), ter(tecNO_TARGET)); BEAST_EXPECT((*env.le("alice"))[sfOwnerCount] == 0); - env(cancel("bob", "carol", 1), ter(tecNO_TARGET)); + env(escrow::cancel("bob", "carol", 1), ter(tecNO_TARGET)); } { // Test cancel when condition is present - Env env(*this); + Env env(*this, features); auto const baseFee = env.current()->fees().base; env.fund(XRP(5000), "alice", "bob", "carol"); auto const seq = env.seq("alice"); BEAST_EXPECT((*env.le("alice"))[sfOwnerCount] == 0); - env(escrow("alice", "carol", XRP(1000)), - condition(cb2), - cancel_time(env.now() + 1s)); + env(escrow::create("alice", "carol", XRP(1000)), + escrow::condition(escrow::cb2), + escrow::cancel_time(env.now() + 1s)); env.close(); env.require(balance("alice", XRP(4000) - drops(baseFee))); // balance restored on cancel - env(cancel("bob", "alice", seq)); + env(escrow::cancel("bob", "alice", seq)); env.require(balance("alice", XRP(5000) - drops(baseFee))); // SLE removed on cancel BEAST_EXPECT(!env.le(keylet::escrow(Account("alice").id(), seq))); } { - Env env(*this); + Env env(*this, features); auto const baseFee = env.current()->fees().base; env.fund(XRP(5000), "alice", "bob", "carol"); env.close(); auto const seq = env.seq("alice"); - env(escrow("alice", "carol", XRP(1000)), - condition(cb3), - cancel_time(env.now() + 1s)); + env(escrow::create("alice", "carol", XRP(1000)), + escrow::condition(escrow::cb3), + escrow::cancel_time(env.now() + 1s)); BEAST_EXPECT((*env.le("alice"))[sfOwnerCount] == 1); // cancel fails before expiration - env(cancel("bob", "alice", seq), ter(tecNO_PERMISSION)); + env(escrow::cancel("bob", "alice", seq), ter(tecNO_PERMISSION)); BEAST_EXPECT((*env.le("alice"))[sfOwnerCount] == 1); env.close(); // finish fails after expiration - env(finish("bob", "alice", seq), - condition(cb3), - fulfillment(fb3), + env(escrow::finish("bob", "alice", seq), + escrow::condition(escrow::cb3), + escrow::fulfillment(escrow::fb3), fee(150 * baseFee), ter(tecNO_PERMISSION)); BEAST_EXPECT((*env.le("alice"))[sfOwnerCount] == 1); env.require(balance("carol", XRP(5000))); } { // Test long & short conditions during creation - Env env(*this); + Env env(*this, features); env.fund(XRP(5000), "alice", "bob", "carol"); std::vector v; - v.resize(cb1.size() + 2, 0x78); - std::memcpy(v.data() + 1, cb1.data(), cb1.size()); + v.resize(escrow::cb1.size() + 2, 0x78); + std::memcpy(v.data() + 1, escrow::cb1.data(), escrow::cb1.size()); auto const p = v.data(); auto const s = v.size(); @@ -900,63 +894,63 @@ struct Escrow_test : public beast::unit_test::suite // All these are expected to fail, because the // condition we pass in is malformed in some way - env(escrow("alice", "carol", XRP(1000)), - condition(Slice{p, s}), - cancel_time(ts), + env(escrow::create("alice", "carol", XRP(1000)), + escrow::condition(Slice{p, s}), + escrow::cancel_time(ts), ter(temMALFORMED)); - env(escrow("alice", "carol", XRP(1000)), - condition(Slice{p, s - 1}), - cancel_time(ts), + env(escrow::create("alice", "carol", XRP(1000)), + escrow::condition(Slice{p, s - 1}), + escrow::cancel_time(ts), ter(temMALFORMED)); - env(escrow("alice", "carol", XRP(1000)), - condition(Slice{p, s - 2}), - cancel_time(ts), + env(escrow::create("alice", "carol", XRP(1000)), + escrow::condition(Slice{p, s - 2}), + escrow::cancel_time(ts), ter(temMALFORMED)); - env(escrow("alice", "carol", XRP(1000)), - condition(Slice{p + 1, s - 1}), - cancel_time(ts), + env(escrow::create("alice", "carol", XRP(1000)), + escrow::condition(Slice{p + 1, s - 1}), + escrow::cancel_time(ts), ter(temMALFORMED)); - env(escrow("alice", "carol", XRP(1000)), - condition(Slice{p + 1, s - 3}), - cancel_time(ts), + env(escrow::create("alice", "carol", XRP(1000)), + escrow::condition(Slice{p + 1, s - 3}), + escrow::cancel_time(ts), ter(temMALFORMED)); - env(escrow("alice", "carol", XRP(1000)), - condition(Slice{p + 2, s - 2}), - cancel_time(ts), + env(escrow::create("alice", "carol", XRP(1000)), + escrow::condition(Slice{p + 2, s - 2}), + escrow::cancel_time(ts), ter(temMALFORMED)); - env(escrow("alice", "carol", XRP(1000)), - condition(Slice{p + 2, s - 3}), - cancel_time(ts), + env(escrow::create("alice", "carol", XRP(1000)), + escrow::condition(Slice{p + 2, s - 3}), + escrow::cancel_time(ts), ter(temMALFORMED)); auto const seq = env.seq("alice"); auto const baseFee = env.current()->fees().base; - env(escrow("alice", "carol", XRP(1000)), - condition(Slice{p + 1, s - 2}), - cancel_time(ts), + env(escrow::create("alice", "carol", XRP(1000)), + escrow::condition(Slice{p + 1, s - 2}), + escrow::cancel_time(ts), fee(10 * baseFee)); - env(finish("bob", "alice", seq), - condition(cb1), - fulfillment(fb1), + env(escrow::finish("bob", "alice", seq), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), fee(150 * baseFee)); env.require(balance("alice", XRP(4000) - drops(10 * baseFee))); env.require(balance("bob", XRP(5000) - drops(150 * baseFee))); env.require(balance("carol", XRP(6000))); } { // Test long and short conditions & fulfillments during finish - Env env(*this); + Env env(*this, features); env.fund(XRP(5000), "alice", "bob", "carol"); std::vector cv; - cv.resize(cb2.size() + 2, 0x78); - std::memcpy(cv.data() + 1, cb2.data(), cb2.size()); + cv.resize(escrow::cb2.size() + 2, 0x78); + std::memcpy(cv.data() + 1, escrow::cb2.data(), escrow::cb2.size()); auto const cp = cv.data(); auto const cs = cv.size(); std::vector fv; - fv.resize(fb2.size() + 2, 0x13); - std::memcpy(fv.data() + 1, fb2.data(), fb2.size()); + fv.resize(escrow::fb2.size() + 2, 0x13); + std::memcpy(fv.data() + 1, escrow::fb2.data(), escrow::fb2.size()); auto const fp = fv.data(); auto const fs = fv.size(); @@ -965,180 +959,182 @@ struct Escrow_test : public beast::unit_test::suite // All these are expected to fail, because the // condition we pass in is malformed in some way - env(escrow("alice", "carol", XRP(1000)), - condition(Slice{cp, cs}), - cancel_time(ts), + env(escrow::create("alice", "carol", XRP(1000)), + escrow::condition(Slice{cp, cs}), + escrow::cancel_time(ts), ter(temMALFORMED)); - env(escrow("alice", "carol", XRP(1000)), - condition(Slice{cp, cs - 1}), - cancel_time(ts), + env(escrow::create("alice", "carol", XRP(1000)), + escrow::condition(Slice{cp, cs - 1}), + escrow::cancel_time(ts), ter(temMALFORMED)); - env(escrow("alice", "carol", XRP(1000)), - condition(Slice{cp, cs - 2}), - cancel_time(ts), + env(escrow::create("alice", "carol", XRP(1000)), + escrow::condition(Slice{cp, cs - 2}), + escrow::cancel_time(ts), ter(temMALFORMED)); - env(escrow("alice", "carol", XRP(1000)), - condition(Slice{cp + 1, cs - 1}), - cancel_time(ts), + env(escrow::create("alice", "carol", XRP(1000)), + escrow::condition(Slice{cp + 1, cs - 1}), + escrow::cancel_time(ts), ter(temMALFORMED)); - env(escrow("alice", "carol", XRP(1000)), - condition(Slice{cp + 1, cs - 3}), - cancel_time(ts), + env(escrow::create("alice", "carol", XRP(1000)), + escrow::condition(Slice{cp + 1, cs - 3}), + escrow::cancel_time(ts), ter(temMALFORMED)); - env(escrow("alice", "carol", XRP(1000)), - condition(Slice{cp + 2, cs - 2}), - cancel_time(ts), + env(escrow::create("alice", "carol", XRP(1000)), + escrow::condition(Slice{cp + 2, cs - 2}), + escrow::cancel_time(ts), ter(temMALFORMED)); - env(escrow("alice", "carol", XRP(1000)), - condition(Slice{cp + 2, cs - 3}), - cancel_time(ts), + env(escrow::create("alice", "carol", XRP(1000)), + escrow::condition(Slice{cp + 2, cs - 3}), + escrow::cancel_time(ts), ter(temMALFORMED)); auto const seq = env.seq("alice"); auto const baseFee = env.current()->fees().base; - env(escrow("alice", "carol", XRP(1000)), - condition(Slice{cp + 1, cs - 2}), - cancel_time(ts), + env(escrow::create("alice", "carol", XRP(1000)), + escrow::condition(Slice{cp + 1, cs - 2}), + escrow::cancel_time(ts), fee(10 * baseFee)); // Now, try to fulfill using the same sequence of // malformed conditions. - env(finish("bob", "alice", seq), - condition(Slice{cp, cs}), - fulfillment(Slice{fp, fs}), + env(escrow::finish("bob", "alice", seq), + escrow::condition(Slice{cp, cs}), + escrow::fulfillment(Slice{fp, fs}), fee(150 * baseFee), ter(tecCRYPTOCONDITION_ERROR)); - env(finish("bob", "alice", seq), - condition(Slice{cp, cs - 1}), - fulfillment(Slice{fp, fs}), + env(escrow::finish("bob", "alice", seq), + escrow::condition(Slice{cp, cs - 1}), + escrow::fulfillment(Slice{fp, fs}), fee(150 * baseFee), ter(tecCRYPTOCONDITION_ERROR)); - env(finish("bob", "alice", seq), - condition(Slice{cp, cs - 2}), - fulfillment(Slice{fp, fs}), + env(escrow::finish("bob", "alice", seq), + escrow::condition(Slice{cp, cs - 2}), + escrow::fulfillment(Slice{fp, fs}), fee(150 * baseFee), ter(tecCRYPTOCONDITION_ERROR)); - env(finish("bob", "alice", seq), - condition(Slice{cp + 1, cs - 1}), - fulfillment(Slice{fp, fs}), + env(escrow::finish("bob", "alice", seq), + escrow::condition(Slice{cp + 1, cs - 1}), + escrow::fulfillment(Slice{fp, fs}), fee(150 * baseFee), ter(tecCRYPTOCONDITION_ERROR)); - env(finish("bob", "alice", seq), - condition(Slice{cp + 1, cs - 3}), - fulfillment(Slice{fp, fs}), + env(escrow::finish("bob", "alice", seq), + escrow::condition(Slice{cp + 1, cs - 3}), + escrow::fulfillment(Slice{fp, fs}), fee(150 * baseFee), ter(tecCRYPTOCONDITION_ERROR)); - env(finish("bob", "alice", seq), - condition(Slice{cp + 2, cs - 2}), - fulfillment(Slice{fp, fs}), + env(escrow::finish("bob", "alice", seq), + escrow::condition(Slice{cp + 2, cs - 2}), + escrow::fulfillment(Slice{fp, fs}), fee(150 * baseFee), ter(tecCRYPTOCONDITION_ERROR)); - env(finish("bob", "alice", seq), - condition(Slice{cp + 2, cs - 3}), - fulfillment(Slice{fp, fs}), + env(escrow::finish("bob", "alice", seq), + escrow::condition(Slice{cp + 2, cs - 3}), + escrow::fulfillment(Slice{fp, fs}), fee(150 * baseFee), ter(tecCRYPTOCONDITION_ERROR)); // Now, using the correct condition, try malformed fulfillments: - env(finish("bob", "alice", seq), - condition(Slice{cp + 1, cs - 2}), - fulfillment(Slice{fp, fs}), + env(escrow::finish("bob", "alice", seq), + escrow::condition(Slice{cp + 1, cs - 2}), + escrow::fulfillment(Slice{fp, fs}), fee(150 * baseFee), ter(tecCRYPTOCONDITION_ERROR)); - env(finish("bob", "alice", seq), - condition(Slice{cp + 1, cs - 2}), - fulfillment(Slice{fp, fs - 1}), + env(escrow::finish("bob", "alice", seq), + escrow::condition(Slice{cp + 1, cs - 2}), + escrow::fulfillment(Slice{fp, fs - 1}), fee(150 * baseFee), ter(tecCRYPTOCONDITION_ERROR)); - env(finish("bob", "alice", seq), - condition(Slice{cp + 1, cs - 2}), - fulfillment(Slice{fp, fs - 2}), + env(escrow::finish("bob", "alice", seq), + escrow::condition(Slice{cp + 1, cs - 2}), + escrow::fulfillment(Slice{fp, fs - 2}), fee(150 * baseFee), ter(tecCRYPTOCONDITION_ERROR)); - env(finish("bob", "alice", seq), - condition(Slice{cp + 1, cs - 2}), - fulfillment(Slice{fp + 1, fs - 1}), + env(escrow::finish("bob", "alice", seq), + escrow::condition(Slice{cp + 1, cs - 2}), + escrow::fulfillment(Slice{fp + 1, fs - 1}), fee(150 * baseFee), ter(tecCRYPTOCONDITION_ERROR)); - env(finish("bob", "alice", seq), - condition(Slice{cp + 1, cs - 2}), - fulfillment(Slice{fp + 1, fs - 3}), + env(escrow::finish("bob", "alice", seq), + escrow::condition(Slice{cp + 1, cs - 2}), + escrow::fulfillment(Slice{fp + 1, fs - 3}), fee(150 * baseFee), ter(tecCRYPTOCONDITION_ERROR)); - env(finish("bob", "alice", seq), - condition(Slice{cp + 1, cs - 2}), - fulfillment(Slice{fp + 1, fs - 3}), + env(escrow::finish("bob", "alice", seq), + escrow::condition(Slice{cp + 1, cs - 2}), + escrow::fulfillment(Slice{fp + 1, fs - 3}), fee(150 * baseFee), ter(tecCRYPTOCONDITION_ERROR)); - env(finish("bob", "alice", seq), - condition(Slice{cp + 1, cs - 2}), - fulfillment(Slice{fp + 2, fs - 2}), + env(escrow::finish("bob", "alice", seq), + escrow::condition(Slice{cp + 1, cs - 2}), + escrow::fulfillment(Slice{fp + 2, fs - 2}), fee(150 * baseFee), ter(tecCRYPTOCONDITION_ERROR)); - env(finish("bob", "alice", seq), - condition(Slice{cp + 1, cs - 2}), - fulfillment(Slice{fp + 2, fs - 3}), + env(escrow::finish("bob", "alice", seq), + escrow::condition(Slice{cp + 1, cs - 2}), + escrow::fulfillment(Slice{fp + 2, fs - 3}), fee(150 * baseFee), ter(tecCRYPTOCONDITION_ERROR)); // Now try for the right one - env(finish("bob", "alice", seq), - condition(cb2), - fulfillment(fb2), + env(escrow::finish("bob", "alice", seq), + escrow::condition(escrow::cb2), + escrow::fulfillment(escrow::fb2), fee(150 * baseFee)); env.require(balance("alice", XRP(4000) - drops(10 * baseFee))); env.require(balance("carol", XRP(6000))); } { // Test empty condition during creation and // empty condition & fulfillment during finish - Env env(*this); + Env env(*this, features); env.fund(XRP(5000), "alice", "bob", "carol"); - env(escrow("alice", "carol", XRP(1000)), - condition(Slice{}), - cancel_time(env.now() + 1s), + env(escrow::create("alice", "carol", XRP(1000)), + escrow::condition(Slice{}), + escrow::cancel_time(env.now() + 1s), ter(temMALFORMED)); auto const seq = env.seq("alice"); auto const baseFee = env.current()->fees().base; - env(escrow("alice", "carol", XRP(1000)), - condition(cb3), - cancel_time(env.now() + 1s)); + env(escrow::create("alice", "carol", XRP(1000)), + escrow::condition(escrow::cb3), + escrow::cancel_time(env.now() + 1s)); - env(finish("bob", "alice", seq), - condition(Slice{}), - fulfillment(Slice{}), + env(escrow::finish("bob", "alice", seq), + escrow::condition(Slice{}), + escrow::fulfillment(Slice{}), fee(150 * baseFee), ter(tecCRYPTOCONDITION_ERROR)); - env(finish("bob", "alice", seq), - condition(cb3), - fulfillment(Slice{}), + env(escrow::finish("bob", "alice", seq), + escrow::condition(escrow::cb3), + escrow::fulfillment(Slice{}), fee(150 * baseFee), ter(tecCRYPTOCONDITION_ERROR)); - env(finish("bob", "alice", seq), - condition(Slice{}), - fulfillment(fb3), + env(escrow::finish("bob", "alice", seq), + escrow::condition(Slice{}), + escrow::fulfillment(escrow::fb3), fee(150 * baseFee), ter(tecCRYPTOCONDITION_ERROR)); // Assemble finish that is missing the Condition or the Fulfillment // since either both must be present, or neither can: - env(finish("bob", "alice", seq), condition(cb3), ter(temMALFORMED)); - env(finish("bob", "alice", seq), - fulfillment(fb3), + env(escrow::finish("bob", "alice", seq), + escrow::condition(escrow::cb3), + ter(temMALFORMED)); + env(escrow::finish("bob", "alice", seq), + escrow::fulfillment(escrow::fb3), ter(temMALFORMED)); // Now finish it. - env(finish("bob", "alice", seq), - condition(cb3), - fulfillment(fb3), + env(escrow::finish("bob", "alice", seq), + escrow::condition(escrow::cb3), + escrow::fulfillment(escrow::fb3), fee(150 * baseFee)); env.require(balance("carol", XRP(6000))); env.require(balance("alice", XRP(4000) - drops(baseFee))); } { // Test a condition other than PreimageSha256, which // would require a separate amendment - Env env(*this); + Env env(*this, features); env.fund(XRP(5000), "alice", "bob"); std::array cb = { @@ -1150,15 +1146,15 @@ struct Escrow_test : public beast::unit_test::suite // FIXME: this transaction should, eventually, return temDISABLED // instead of temMALFORMED. - env(escrow("alice", "bob", XRP(1000)), - condition(cb), - cancel_time(env.now() + 1s), + env(escrow::create("alice", "bob", XRP(1000)), + escrow::condition(cb), + escrow::cancel_time(env.now() + 1s), ter(temMALFORMED)); } } void - testMetaAndOwnership() + testMetaAndOwnership(FeatureBitset features) { using namespace jtx; using namespace std::chrono; @@ -1170,14 +1166,14 @@ struct Escrow_test : public beast::unit_test::suite { testcase("Metadata to self"); - Env env(*this); + Env env(*this, features); env.fund(XRP(5000), alice, bruce, carol); auto const aseq = env.seq(alice); auto const bseq = env.seq(bruce); - env(escrow(alice, alice, XRP(1000)), - finish_time(env.now() + 1s), - cancel_time(env.now() + 500s)); + env(escrow::create(alice, alice, XRP(1000)), + escrow::finish_time(env.now() + 1s), + escrow::cancel_time(env.now() + 500s)); BEAST_EXPECT( (*env.meta())[sfTransactionResult] == static_cast(tesSUCCESS)); @@ -1192,9 +1188,9 @@ struct Escrow_test : public beast::unit_test::suite std::find(aod.begin(), aod.end(), aa) != aod.end()); } - env(escrow(bruce, bruce, XRP(1000)), - finish_time(env.now() + 1s), - cancel_time(env.now() + 2s)); + env(escrow::create(bruce, bruce, XRP(1000)), + escrow::finish_time(env.now() + 1s), + escrow::cancel_time(env.now() + 2s)); BEAST_EXPECT( (*env.meta())[sfTransactionResult] == static_cast(tesSUCCESS)); @@ -1210,7 +1206,7 @@ struct Escrow_test : public beast::unit_test::suite } env.close(5s); - env(finish(alice, alice, aseq)); + env(escrow::finish(alice, alice, aseq)); { BEAST_EXPECT(!env.le(keylet::escrow(alice.id(), aseq))); BEAST_EXPECT( @@ -1229,7 +1225,7 @@ struct Escrow_test : public beast::unit_test::suite } env.close(5s); - env(cancel(bruce, bruce, bseq)); + env(escrow::cancel(bruce, bruce, bseq)); { BEAST_EXPECT(!env.le(keylet::escrow(bruce.id(), bseq))); BEAST_EXPECT( @@ -1245,19 +1241,20 @@ struct Escrow_test : public beast::unit_test::suite { testcase("Metadata to other"); - Env env(*this); + Env env(*this, features); env.fund(XRP(5000), alice, bruce, carol); auto const aseq = env.seq(alice); auto const bseq = env.seq(bruce); - env(escrow(alice, bruce, XRP(1000)), finish_time(env.now() + 1s)); + env(escrow::create(alice, bruce, XRP(1000)), + escrow::finish_time(env.now() + 1s)); BEAST_EXPECT( (*env.meta())[sfTransactionResult] == static_cast(tesSUCCESS)); env.close(5s); - env(escrow(bruce, carol, XRP(1000)), - finish_time(env.now() + 1s), - cancel_time(env.now() + 2s)); + env(escrow::create(bruce, carol, XRP(1000)), + escrow::finish_time(env.now() + 1s), + escrow::cancel_time(env.now() + 2s)); BEAST_EXPECT( (*env.meta())[sfTransactionResult] == static_cast(tesSUCCESS)); @@ -1289,7 +1286,7 @@ struct Escrow_test : public beast::unit_test::suite } env.close(5s); - env(finish(alice, alice, aseq)); + env(escrow::finish(alice, alice, aseq)); { BEAST_EXPECT(!env.le(keylet::escrow(alice.id(), aseq))); BEAST_EXPECT(env.le(keylet::escrow(bruce.id(), bseq))); @@ -1311,7 +1308,7 @@ struct Escrow_test : public beast::unit_test::suite } env.close(5s); - env(cancel(bruce, bruce, bseq)); + env(escrow::cancel(bruce, bruce, bseq)); { BEAST_EXPECT(!env.le(keylet::escrow(alice.id(), aseq))); BEAST_EXPECT(!env.le(keylet::escrow(bruce.id(), bseq))); @@ -1335,13 +1332,13 @@ struct Escrow_test : public beast::unit_test::suite } void - testConsequences() + testConsequences(FeatureBitset features) { testcase("Consequences"); using namespace jtx; using namespace std::chrono; - Env env(*this); + Env env(*this, features); auto const baseFee = env.current()->fees().base; env.memoize("alice"); @@ -1350,8 +1347,8 @@ struct Escrow_test : public beast::unit_test::suite { auto const jtx = env.jt( - escrow("alice", "carol", XRP(1000)), - finish_time(env.now() + 1s), + escrow::create("alice", "carol", XRP(1000)), + escrow::finish_time(env.now() + 1s), seq(1), fee(baseFee)); auto const pf = preflight( @@ -1368,7 +1365,7 @@ struct Escrow_test : public beast::unit_test::suite { auto const jtx = - env.jt(cancel("bob", "alice", 3), seq(1), fee(baseFee)); + env.jt(escrow::cancel("bob", "alice", 3), seq(1), fee(baseFee)); auto const pf = preflight( env.app(), env.current()->rules(), @@ -1383,7 +1380,7 @@ struct Escrow_test : public beast::unit_test::suite { auto const jtx = - env.jt(finish("bob", "alice", 3), seq(1), fee(baseFee)); + env.jt(escrow::finish("bob", "alice", 3), seq(1), fee(baseFee)); auto const pf = preflight( env.app(), env.current()->rules(), @@ -1398,7 +1395,7 @@ struct Escrow_test : public beast::unit_test::suite } void - testEscrowWithTickets() + testEscrowWithTickets(FeatureBitset features) { testcase("Escrow with tickets"); @@ -1409,7 +1406,7 @@ struct Escrow_test : public beast::unit_test::suite { // Create escrow and finish using tickets. - Env env(*this); + Env env(*this, features); auto const baseFee = env.current()->fees().base; env.fund(XRP(5000), alice, bob); env.close(); @@ -1437,8 +1434,8 @@ struct Escrow_test : public beast::unit_test::suite auto const ts = env.now() + 97s; std::uint32_t const escrowSeq = aliceTicket; - env(escrow(alice, bob, XRP(1000)), - finish_time(ts), + env(escrow::create(alice, bob, XRP(1000)), + escrow::finish_time(ts), ticket::use(aliceTicket)); BEAST_EXPECT(env.seq(alice) == aliceRootSeq); env.require(tickets(alice, 0)); @@ -1448,7 +1445,7 @@ struct Escrow_test : public beast::unit_test::suite // prematurely. Note that each tec consumes one of bob's tickets. for (; env.now() < ts; env.close()) { - env(finish(bob, alice, escrowSeq), + env(escrow::finish(bob, alice, escrowSeq), fee(150 * baseFee), ticket::use(--bobTicket), ter(tecNO_PERMISSION)); @@ -1456,13 +1453,13 @@ struct Escrow_test : public beast::unit_test::suite } // bob tries to re-use a ticket, which is rejected. - env(finish(bob, alice, escrowSeq), + env(escrow::finish(bob, alice, escrowSeq), fee(150 * baseFee), ticket::use(bobTicket), ter(tefNO_TICKET)); // bob uses one of his remaining tickets. Success! - env(finish(bob, alice, escrowSeq), + env(escrow::finish(bob, alice, escrowSeq), fee(150 * baseFee), ticket::use(--bobTicket)); env.close(); @@ -1470,7 +1467,7 @@ struct Escrow_test : public beast::unit_test::suite } { // Create escrow and cancel using tickets. - Env env(*this); + Env env(*this, features); auto const baseFee = env.current()->fees().base; env.fund(XRP(5000), alice, bob); env.close(); @@ -1497,9 +1494,9 @@ struct Escrow_test : public beast::unit_test::suite auto const ts = env.now() + 117s; std::uint32_t const escrowSeq = aliceTicket; - env(escrow(alice, bob, XRP(1000)), - condition(cb1), - cancel_time(ts), + env(escrow::create(alice, bob, XRP(1000)), + escrow::condition(escrow::cb1), + escrow::cancel_time(ts), ticket::use(aliceTicket)); BEAST_EXPECT(env.seq(alice) == aliceRootSeq); env.require(tickets(alice, 0)); @@ -1509,7 +1506,7 @@ struct Escrow_test : public beast::unit_test::suite // prematurely. for (; env.now() < ts; env.close()) { - env(cancel(bob, alice, escrowSeq), + env(escrow::cancel(bob, alice, escrowSeq), fee(150 * baseFee), ticket::use(bobTicket++), ter(tecNO_PERMISSION)); @@ -1517,16 +1514,16 @@ struct Escrow_test : public beast::unit_test::suite } // Verify that a finish won't work anymore. - env(finish(bob, alice, escrowSeq), - condition(cb1), - fulfillment(fb1), + env(escrow::finish(bob, alice, escrowSeq), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), fee(150 * baseFee), ticket::use(bobTicket++), ter(tecNO_PERMISSION)); BEAST_EXPECT(env.seq(bob) == bobRootSeq); // Verify that the cancel succeeds. - env(cancel(bob, alice, escrowSeq), + env(escrow::cancel(bob, alice, escrowSeq), fee(150 * baseFee), ticket::use(bobTicket++)); env.close(); @@ -1538,7 +1535,7 @@ struct Escrow_test : public beast::unit_test::suite } void - testCredentials() + testCredentials(FeatureBitset features) { testcase("Test with credentials"); @@ -1555,12 +1552,13 @@ struct Escrow_test : public beast::unit_test::suite { // Credentials amendment not enabled - Env env(*this, supported_amendments() - featureCredentials); + Env env(*this, features - featureCredentials); env.fund(XRP(5000), alice, bob); env.close(); auto const seq = env.seq(alice); - env(escrow(alice, bob, XRP(1000)), finish_time(env.now() + 1s)); + env(escrow::create(alice, bob, XRP(1000)), + escrow::finish_time(env.now() + 1s)); env.close(); env(fset(bob, asfDepositAuth)); @@ -1571,13 +1569,13 @@ struct Escrow_test : public beast::unit_test::suite std::string const credIdx = "48004829F915654A81B11C4AB8218D96FED67F209B58328A72314FB6EA288B" "E4"; - env(finish(bob, alice, seq), + env(escrow::finish(bob, alice, seq), credentials::ids({credIdx}), ter(temDISABLED)); } { - Env env(*this); + Env env(*this, features); env.fund(XRP(5000), alice, bob, carol, dillon, zelda); env.close(); @@ -1589,7 +1587,8 @@ struct Escrow_test : public beast::unit_test::suite std::string const credIdx = jv[jss::result][jss::index].asString(); auto const seq = env.seq(alice); - env(escrow(alice, bob, XRP(1000)), finish_time(env.now() + 50s)); + env(escrow::create(alice, bob, XRP(1000)), + escrow::finish_time(env.now() + 50s)); env.close(); // Bob require preauthorization @@ -1597,7 +1596,7 @@ struct Escrow_test : public beast::unit_test::suite env.close(); // Fail, credentials not accepted - env(finish(carol, alice, seq), + env(escrow::finish(carol, alice, seq), credentials::ids({credIdx}), ter(tecBAD_CREDENTIALS)); @@ -1607,12 +1606,12 @@ struct Escrow_test : public beast::unit_test::suite env.close(); // Fail, credentials doesn’t belong to root account - env(finish(dillon, alice, seq), + env(escrow::finish(dillon, alice, seq), credentials::ids({credIdx}), ter(tecBAD_CREDENTIALS)); // Fail, no depositPreauth - env(finish(carol, alice, seq), + env(escrow::finish(carol, alice, seq), credentials::ids({credIdx}), ter(tecNO_PERMISSION)); @@ -1621,7 +1620,7 @@ struct Escrow_test : public beast::unit_test::suite // Success env.close(); - env(finish(carol, alice, seq), credentials::ids({credIdx})); + env(escrow::finish(carol, alice, seq), credentials::ids({credIdx})); env.close(); } @@ -1629,7 +1628,7 @@ struct Escrow_test : public beast::unit_test::suite testcase("Escrow with credentials without depositPreauth"); using namespace std::chrono; - Env env(*this); + Env env(*this, features); env.fund(XRP(5000), alice, bob, carol, dillon, zelda); env.close(); @@ -1643,7 +1642,8 @@ struct Escrow_test : public beast::unit_test::suite std::string const credIdx = jv[jss::result][jss::index].asString(); auto const seq = env.seq(alice); - env(escrow(alice, bob, XRP(1000)), finish_time(env.now() + 50s)); + env(escrow::create(alice, bob, XRP(1000)), + escrow::finish_time(env.now() + 50s)); // time advance env.close(); env.close(); @@ -1653,7 +1653,7 @@ struct Escrow_test : public beast::unit_test::suite env.close(); // Succeed, Bob doesn't require preauthorization - env(finish(carol, alice, seq), credentials::ids({credIdx})); + env(escrow::finish(carol, alice, seq), credentials::ids({credIdx})); env.close(); { @@ -1669,7 +1669,8 @@ struct Escrow_test : public beast::unit_test::suite .asString(); auto const seq = env.seq(alice); - env(escrow(alice, bob, XRP(1000)), finish_time(env.now() + 1s)); + env(escrow::create(alice, bob, XRP(1000)), + escrow::finish_time(env.now() + 1s)); env.close(); // Bob require preauthorization @@ -1679,27 +1680,38 @@ struct Escrow_test : public beast::unit_test::suite env.close(); // Use any valid credentials if account == dst - env(finish(bob, alice, seq), credentials::ids({credIdxBob})); + env(escrow::finish(bob, alice, seq), + credentials::ids({credIdxBob})); env.close(); } } } + void + testWithFeats(FeatureBitset features) + { + testEnablement(features); + testTiming(features); + testTags(features); + testDisallowXRP(features); + test1571(features); + testFails(features); + testLockup(features); + testEscrowConditions(features); + testMetaAndOwnership(features); + testConsequences(features); + testEscrowWithTickets(features); + testCredentials(features); + } + +public: void run() override { - testEnablement(); - testTiming(); - testTags(); - testDisallowXRP(); - test1571(); - testFails(); - testLockup(); - testEscrowConditions(); - testMetaAndOwnership(); - testConsequences(); - testEscrowWithTickets(); - testCredentials(); + using namespace test::jtx; + FeatureBitset const all{supported_amendments()}; + testWithFeats(all); + testWithFeats(all - featureTokenEscrow); } }; diff --git a/src/test/app/MPToken_test.cpp b/src/test/app/MPToken_test.cpp index a6055d85f6..deee217aa8 100644 --- a/src/test/app/MPToken_test.cpp +++ b/src/test/app/MPToken_test.cpp @@ -1694,15 +1694,6 @@ class MPToken_test : public beast::unit_test::suite jv[jss::SendMax] = mpt.getJson(JsonOptions::none); test(jv, jss::SendMax.c_str()); } - // EscrowCreate - { - Json::Value jv; - jv[jss::TransactionType] = jss::EscrowCreate; - jv[jss::Account] = alice.human(); - jv[jss::Destination] = carol.human(); - jv[jss::Amount] = mpt.getJson(JsonOptions::none); - test(jv, jss::Amount.c_str()); - } // OfferCreate { Json::Value jv = offer(alice, USD(100), mpt); diff --git a/src/test/jtx.h b/src/test/jtx.h index fa67780cbd..4188910085 100644 --- a/src/test/jtx.h +++ b/src/test/jtx.h @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include diff --git a/src/test/jtx/Env.h b/src/test/jtx/Env.h index 042bda39a6..53417a6079 100644 --- a/src/test/jtx/Env.h +++ b/src/test/jtx/Env.h @@ -472,6 +472,15 @@ public: PrettyAmount balance(Account const& account, Issue const& issue) const; + PrettyAmount + balance(Account const& account, MPTIssue const& mptIssue) const; + + /** Returns the IOU limit on an account. + Returns 0 if the trust line does not exist. + */ + PrettyAmount + limit(Account const& account, Issue const& issue) const; + /** Return the number of objects owned by an account. * Returns 0 if the account does not exist. */ diff --git a/src/test/jtx/TestHelpers.h b/src/test/jtx/TestHelpers.h index ae46ea4fe3..d4a39b6498 100644 --- a/src/test/jtx/TestHelpers.h +++ b/src/test/jtx/TestHelpers.h @@ -233,127 +233,6 @@ expectLedgerEntryRoot( Account const& acct, STAmount const& expectedValue); -/* Escrow */ -/******************************************************************************/ - -Json::Value -escrow(AccountID const& account, AccountID const& to, STAmount const& amount); - -inline Json::Value -escrow(Account const& account, Account const& to, STAmount const& amount) -{ - return escrow(account.id(), to.id(), amount); -} - -Json::Value -finish(AccountID const& account, AccountID const& from, std::uint32_t seq); - -inline Json::Value -finish(Account const& account, Account const& from, std::uint32_t seq) -{ - return finish(account.id(), from.id(), seq); -} - -Json::Value -cancel(AccountID const& account, Account const& from, std::uint32_t seq); - -inline Json::Value -cancel(Account const& account, Account const& from, std::uint32_t seq) -{ - return cancel(account.id(), from, seq); -} - -std::array constexpr cb1 = { - {0xA0, 0x25, 0x80, 0x20, 0xE3, 0xB0, 0xC4, 0x42, 0x98, 0xFC, - 0x1C, 0x14, 0x9A, 0xFB, 0xF4, 0xC8, 0x99, 0x6F, 0xB9, 0x24, - 0x27, 0xAE, 0x41, 0xE4, 0x64, 0x9B, 0x93, 0x4C, 0xA4, 0x95, - 0x99, 0x1B, 0x78, 0x52, 0xB8, 0x55, 0x81, 0x01, 0x00}}; - -// A PreimageSha256 fulfillments and its associated condition. -std::array const fb1 = {{0xA0, 0x02, 0x80, 0x00}}; - -/** Set the "FinishAfter" time tag on a JTx */ -struct finish_time -{ -private: - NetClock::time_point value_; - -public: - explicit finish_time(NetClock::time_point const& value) : value_(value) - { - } - - void - operator()(Env&, JTx& jt) const - { - jt.jv[sfFinishAfter.jsonName] = value_.time_since_epoch().count(); - } -}; - -/** Set the "CancelAfter" time tag on a JTx */ -struct cancel_time -{ -private: - NetClock::time_point value_; - -public: - explicit cancel_time(NetClock::time_point const& value) : value_(value) - { - } - - void - operator()(jtx::Env&, jtx::JTx& jt) const - { - jt.jv[sfCancelAfter.jsonName] = value_.time_since_epoch().count(); - } -}; - -struct condition -{ -private: - std::string value_; - -public: - explicit condition(Slice const& cond) : value_(strHex(cond)) - { - } - - template - explicit condition(std::array const& c) - : condition(makeSlice(c)) - { - } - - void - operator()(Env&, JTx& jt) const - { - jt.jv[sfCondition.jsonName] = value_; - } -}; - -struct fulfillment -{ -private: - std::string value_; - -public: - explicit fulfillment(Slice condition) : value_(strHex(condition)) - { - } - - template - explicit fulfillment(std::array f) - : fulfillment(makeSlice(f)) - { - } - - void - operator()(Env&, JTx& jt) const - { - jt.jv[sfFulfillment.jsonName] = value_; - } -}; - /* Payment Channel */ /******************************************************************************/ diff --git a/src/test/jtx/escrow.h b/src/test/jtx/escrow.h new file mode 100644 index 0000000000..3147b44c65 --- /dev/null +++ b/src/test/jtx/escrow.h @@ -0,0 +1,185 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2019 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_TEST_JTX_ESCROW_H_INCLUDED +#define RIPPLE_TEST_JTX_ESCROW_H_INCLUDED + +#include +#include +#include +#include + +#include + +namespace ripple { +namespace test { +namespace jtx { + +/** Escrow operations. */ +namespace escrow { + +Json::Value +create(AccountID const& account, AccountID const& to, STAmount const& amount); + +inline Json::Value +create(Account const& account, Account const& to, STAmount const& amount) +{ + return create(account.id(), to.id(), amount); +} + +Json::Value +finish(AccountID const& account, AccountID const& from, std::uint32_t seq); + +inline Json::Value +finish(Account const& account, Account const& from, std::uint32_t seq) +{ + return finish(account.id(), from.id(), seq); +} + +Json::Value +cancel(AccountID const& account, Account const& from, std::uint32_t seq); + +inline Json::Value +cancel(Account const& account, Account const& from, std::uint32_t seq) +{ + return cancel(account.id(), from, seq); +} + +Rate +rate(Env& env, Account const& account, std::uint32_t const& seq); + +// A PreimageSha256 fulfillments and its associated condition. +std::array const fb1 = {{0xA0, 0x02, 0x80, 0x00}}; + +std::array const cb1 = { + {0xA0, 0x25, 0x80, 0x20, 0xE3, 0xB0, 0xC4, 0x42, 0x98, 0xFC, + 0x1C, 0x14, 0x9A, 0xFB, 0xF4, 0xC8, 0x99, 0x6F, 0xB9, 0x24, + 0x27, 0xAE, 0x41, 0xE4, 0x64, 0x9B, 0x93, 0x4C, 0xA4, 0x95, + 0x99, 0x1B, 0x78, 0x52, 0xB8, 0x55, 0x81, 0x01, 0x00}}; + +// Another PreimageSha256 fulfillments and its associated condition. +std::array const fb2 = { + {0xA0, 0x05, 0x80, 0x03, 0x61, 0x61, 0x61}}; + +std::array const cb2 = { + {0xA0, 0x25, 0x80, 0x20, 0x98, 0x34, 0x87, 0x6D, 0xCF, 0xB0, + 0x5C, 0xB1, 0x67, 0xA5, 0xC2, 0x49, 0x53, 0xEB, 0xA5, 0x8C, + 0x4A, 0xC8, 0x9B, 0x1A, 0xDF, 0x57, 0xF2, 0x8F, 0x2F, 0x9D, + 0x09, 0xAF, 0x10, 0x7E, 0xE8, 0xF0, 0x81, 0x01, 0x03}}; + +// Another PreimageSha256 fulfillment and its associated condition. +std::array const fb3 = { + {0xA0, 0x06, 0x80, 0x04, 0x6E, 0x69, 0x6B, 0x62}}; + +std::array const cb3 = { + {0xA0, 0x25, 0x80, 0x20, 0x6E, 0x4C, 0x71, 0x45, 0x30, 0xC0, + 0xA4, 0x26, 0x8B, 0x3F, 0xA6, 0x3B, 0x1B, 0x60, 0x6F, 0x2D, + 0x26, 0x4A, 0x2D, 0x85, 0x7B, 0xE8, 0xA0, 0x9C, 0x1D, 0xFD, + 0x57, 0x0D, 0x15, 0x85, 0x8B, 0xD4, 0x81, 0x01, 0x04}}; + +/** Set the "FinishAfter" time tag on a JTx */ +struct finish_time +{ +private: + NetClock::time_point value_; + +public: + explicit finish_time(NetClock::time_point const& value) : value_(value) + { + } + + void + operator()(Env&, JTx& jt) const + { + jt.jv[sfFinishAfter.jsonName] = value_.time_since_epoch().count(); + } +}; + +/** Set the "CancelAfter" time tag on a JTx */ +struct cancel_time +{ +private: + NetClock::time_point value_; + +public: + explicit cancel_time(NetClock::time_point const& value) : value_(value) + { + } + + void + operator()(jtx::Env&, jtx::JTx& jt) const + { + jt.jv[sfCancelAfter.jsonName] = value_.time_since_epoch().count(); + } +}; + +struct condition +{ +private: + std::string value_; + +public: + explicit condition(Slice const& cond) : value_(strHex(cond)) + { + } + + template + explicit condition(std::array const& c) + : condition(makeSlice(c)) + { + } + + void + operator()(Env&, JTx& jt) const + { + jt.jv[sfCondition.jsonName] = value_; + } +}; + +struct fulfillment +{ +private: + std::string value_; + +public: + explicit fulfillment(Slice condition) : value_(strHex(condition)) + { + } + + template + explicit fulfillment(std::array f) + : fulfillment(makeSlice(f)) + { + } + + void + operator()(Env&, JTx& jt) const + { + jt.jv[sfFulfillment.jsonName] = value_; + } +}; + +} // namespace escrow + +} // namespace jtx + +} // namespace test +} // namespace ripple + +#endif diff --git a/src/test/jtx/flags.h b/src/test/jtx/flags.h index 09e5dac52f..aa048c3e55 100644 --- a/src/test/jtx/flags.h +++ b/src/test/jtx/flags.h @@ -96,6 +96,9 @@ private: case asfDisallowIncomingTrustline: mask_ |= lsfDisallowIncomingTrustline; break; + case asfAllowTrustLineLocking: + mask_ |= lsfAllowTrustLineLocking; + break; default: Throw("unknown flag"); } diff --git a/src/test/jtx/impl/Env.cpp b/src/test/jtx/impl/Env.cpp index e45042e310..58d26da26e 100644 --- a/src/test/jtx/impl/Env.cpp +++ b/src/test/jtx/impl/Env.cpp @@ -199,6 +199,48 @@ Env::balance(Account const& account, Issue const& issue) const return {amount, lookup(issue.account).name()}; } +PrettyAmount +Env::balance(Account const& account, MPTIssue const& mptIssue) const +{ + MPTID const id = mptIssue.getMptID(); + if (!id) + return {STAmount(mptIssue, 0), account.name()}; + + AccountID const issuer = mptIssue.getIssuer(); + if (account.id() == issuer) + { + // Issuer balance + auto const sle = le(keylet::mptIssuance(id)); + if (!sle) + return {STAmount(mptIssue, 0), account.name()}; + + STAmount const amount{mptIssue, sle->getFieldU64(sfOutstandingAmount)}; + return {amount, lookup(issuer).name()}; + } + else + { + // Holder balance + auto const sle = le(keylet::mptoken(id, account)); + if (!sle) + return {STAmount(mptIssue, 0), account.name()}; + + STAmount const amount{mptIssue, sle->getFieldU64(sfMPTAmount)}; + return {amount, lookup(issuer).name()}; + } +} + +PrettyAmount +Env::limit(Account const& account, Issue const& issue) const +{ + auto const sle = le(keylet::line(account.id(), issue)); + if (!sle) + return {STAmount(issue, 0), account.name()}; + auto const aHigh = account.id() > issue.account; + if (sle && sle->isFieldPresent(aHigh ? sfLowLimit : sfHighLimit)) + return {(*sle)[aHigh ? sfLowLimit : sfHighLimit], account.name()}; + return {STAmount(issue, 0), account.name()}; +} + std::uint32_t Env::ownerCount(Account const& account) const { diff --git a/src/test/jtx/impl/TestHelpers.cpp b/src/test/jtx/impl/TestHelpers.cpp index cb8141b9f3..5f8c53877a 100644 --- a/src/test/jtx/impl/TestHelpers.cpp +++ b/src/test/jtx/impl/TestHelpers.cpp @@ -211,42 +211,6 @@ expectLedgerEntryRoot( return accountBalance(env, acct) == to_string(expectedValue.xrp()); } -/* Escrow */ -/******************************************************************************/ - -Json::Value -escrow(AccountID const& account, AccountID const& to, STAmount const& amount) -{ - Json::Value jv; - jv[jss::TransactionType] = jss::EscrowCreate; - jv[jss::Account] = to_string(account); - jv[jss::Destination] = to_string(to); - jv[jss::Amount] = amount.getJson(JsonOptions::none); - return jv; -} - -Json::Value -finish(AccountID const& account, AccountID const& from, std::uint32_t seq) -{ - Json::Value jv; - jv[jss::TransactionType] = jss::EscrowFinish; - jv[jss::Account] = to_string(account); - jv[sfOwner.jsonName] = to_string(from); - jv[sfOfferSequence.jsonName] = seq; - return jv; -} - -Json::Value -cancel(AccountID const& account, Account const& from, std::uint32_t seq) -{ - Json::Value jv; - jv[jss::TransactionType] = jss::EscrowCancel; - jv[jss::Account] = to_string(account); - jv[sfOwner.jsonName] = from.human(); - jv[sfOfferSequence.jsonName] = seq; - return jv; -} - /* Payment Channel */ /******************************************************************************/ Json::Value diff --git a/src/test/jtx/impl/escrow.cpp b/src/test/jtx/impl/escrow.cpp new file mode 100644 index 0000000000..a1ec6a3c5e --- /dev/null +++ b/src/test/jtx/impl/escrow.cpp @@ -0,0 +1,82 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2019 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include +#include + +namespace ripple { +namespace test { +namespace jtx { + +/** Escrow operations. */ +namespace escrow { + +Json::Value +create(AccountID const& account, AccountID const& to, STAmount const& amount) +{ + Json::Value jv; + jv[jss::TransactionType] = jss::EscrowCreate; + jv[jss::Flags] = tfFullyCanonicalSig; + jv[jss::Account] = to_string(account); + jv[jss::Destination] = to_string(to); + jv[jss::Amount] = amount.getJson(JsonOptions::none); + return jv; +} + +Json::Value +finish(AccountID const& account, AccountID const& from, std::uint32_t seq) +{ + Json::Value jv; + jv[jss::TransactionType] = jss::EscrowFinish; + jv[jss::Flags] = tfFullyCanonicalSig; + jv[jss::Account] = to_string(account); + jv[sfOwner.jsonName] = to_string(from); + jv[sfOfferSequence.jsonName] = seq; + return jv; +} + +Json::Value +cancel(AccountID const& account, Account const& from, std::uint32_t seq) +{ + Json::Value jv; + jv[jss::TransactionType] = jss::EscrowCancel; + jv[jss::Flags] = tfFullyCanonicalSig; + jv[jss::Account] = to_string(account); + jv[sfOwner.jsonName] = from.human(); + jv[sfOfferSequence.jsonName] = seq; + return jv; +} + +Rate +rate(Env& env, Account const& account, std::uint32_t const& seq) +{ + auto const sle = env.le(keylet::escrow(account.id(), seq)); + if (sle->isFieldPresent(sfTransferRate)) + return ripple::Rate((*sle)[sfTransferRate]); + return Rate{0}; +} + +} // namespace escrow + +} // namespace jtx + +} // namespace test +} // namespace ripple diff --git a/src/test/ledger/Invariants_test.cpp b/src/test/ledger/Invariants_test.cpp index 76fcb34c7f..ebd2235cc9 100644 --- a/src/test/ledger/Invariants_test.cpp +++ b/src/test/ledger/Invariants_test.cpp @@ -731,21 +731,6 @@ class Invariants_test : public beast::unit_test::suite using namespace test::jtx; testcase << "no zero escrow"; - doInvariantCheck( - {{"Cannot return non-native STAmount as XRPAmount"}}, - [](Account const& A1, Account const& A2, ApplyContext& ac) { - // escrow with nonnative amount - auto const sle = ac.view().peek(keylet::account(A1.id())); - if (!sle) - return false; - auto sleNew = std::make_shared( - keylet::escrow(A1, (*sle)[sfSequence] + 2)); - STAmount nonNative(A2["USD"](51)); - sleNew->setFieldAmount(sfAmount, nonNative); - ac.view().insert(sleNew); - return true; - }); - doInvariantCheck( {{"XRP net change of -1000000 doesn't match fee 0"}, {"escrow specifies invalid amount"}}, diff --git a/src/test/protocol/STAmount_test.cpp b/src/test/protocol/STAmount_test.cpp index 712c91000e..d62241f2f4 100644 --- a/src/test/protocol/STAmount_test.cpp +++ b/src/test/protocol/STAmount_test.cpp @@ -17,6 +17,8 @@ */ //============================================================================== +#include + #include #include #include @@ -668,6 +670,366 @@ public: } } + void + testCanAddXRP() + { + testcase("can add xrp"); + + // Adding zero + { + STAmount amt1(XRPAmount(0)); + STAmount amt2(XRPAmount(1000)); + BEAST_EXPECT(canAdd(amt1, amt2) == true); + } + + // Adding zero + { + STAmount amt1(XRPAmount(1000)); + STAmount amt2(XRPAmount(0)); + BEAST_EXPECT(canAdd(amt1, amt2) == true); + } + + // Adding two positive XRP amounts + { + STAmount amt1(XRPAmount(500)); + STAmount amt2(XRPAmount(1500)); + BEAST_EXPECT(canAdd(amt1, amt2) == true); + } + + // Adding two negative XRP amounts + { + STAmount amt1(XRPAmount(-500)); + STAmount amt2(XRPAmount(-1500)); + BEAST_EXPECT(canAdd(amt1, amt2) == true); + } + + // Adding a positive and a negative XRP amount + { + STAmount amt1(XRPAmount(1000)); + STAmount amt2(XRPAmount(-1000)); + BEAST_EXPECT(canAdd(amt1, amt2) == true); + } + + // Overflow check for max XRP amounts + { + STAmount amt1(std::numeric_limits::max()); + STAmount amt2(XRPAmount(1)); + BEAST_EXPECT(canAdd(amt1, amt2) == false); + } + + // Overflow check for min XRP amounts + { + STAmount amt1(std::numeric_limits::max()); + amt1 += XRPAmount(1); + STAmount amt2(XRPAmount(-1)); + BEAST_EXPECT(canAdd(amt1, amt2) == false); + } + } + + void + testCanAddIOU() + { + testcase("can add iou"); + + Issue const usd{Currency(0x5553440000000000), AccountID(0x4985601)}; + Issue const eur{Currency(0x4555520000000000), AccountID(0x4985601)}; + + // Adding two IOU amounts + { + STAmount amt1(usd, 500); + STAmount amt2(usd, 1500); + BEAST_EXPECT(canAdd(amt1, amt2) == true); + } + + // Adding a positive and a negative IOU amount + { + STAmount amt1(usd, 1000); + STAmount amt2(usd, -1000); + BEAST_EXPECT(canAdd(amt1, amt2) == true); + } + + // Overflow check for max IOU amounts + { + STAmount amt1(usd, std::numeric_limits::max()); + STAmount amt2(usd, 1); + BEAST_EXPECT(canAdd(amt1, amt2) == false); + } + + // Overflow check for min IOU amounts + { + STAmount amt1(usd, std::numeric_limits::min()); + STAmount amt2(usd, -1); + BEAST_EXPECT(canAdd(amt1, amt2) == false); + } + + // Adding XRP and IOU + { + STAmount amt1(XRPAmount(1)); + STAmount amt2(usd, 1); + BEAST_EXPECT(canAdd(amt1, amt2) == false); + } + + // Adding different IOU issues (non zero) + { + STAmount amt1(usd, 1000); + STAmount amt2(eur, 500); + BEAST_EXPECT(canAdd(amt1, amt2) == false); + } + + // Adding different IOU issues (zero) + { + STAmount amt1(usd, 0); + STAmount amt2(eur, 500); + BEAST_EXPECT(canAdd(amt1, amt2) == false); + } + } + + void + testCanAddMPT() + { + testcase("can add mpt"); + + MPTIssue const mpt{MPTIssue{makeMptID(1, AccountID(0x4985601))}}; + MPTIssue const mpt2{MPTIssue{makeMptID(2, AccountID(0x4985601))}}; + + // Adding zero + { + STAmount amt1(mpt, 0); + STAmount amt2(mpt, 1000); + BEAST_EXPECT(canAdd(amt1, amt2) == true); + } + + // Adding zero + { + STAmount amt1(mpt, 1000); + STAmount amt2(mpt, 0); + BEAST_EXPECT(canAdd(amt1, amt2) == true); + } + + // Adding two positive MPT amounts + { + STAmount amt1(mpt, 500); + STAmount amt2(mpt, 1500); + BEAST_EXPECT(canAdd(amt1, amt2) == true); + } + + // Adding two negative MPT amounts + { + STAmount amt1(mpt, -500); + STAmount amt2(mpt, -1500); + BEAST_EXPECT(canAdd(amt1, amt2) == true); + } + + // Adding a positive and a negative MPT amount + { + STAmount amt1(mpt, 1000); + STAmount amt2(mpt, -1000); + BEAST_EXPECT(canAdd(amt1, amt2) == true); + } + + // Overflow check for max MPT amounts + { + STAmount amt1( + mpt, std::numeric_limits::max()); + STAmount amt2(mpt, 1); + BEAST_EXPECT(canAdd(amt1, amt2) == false); + } + + // Overflow check for min MPT amounts + // Note: Cannot check min MPT overflow because you cannot initialize the + // STAmount with a negative MPT amount. + + // Adding MPT and XRP + { + STAmount amt1(XRPAmount(1000)); + STAmount amt2(mpt, 1000); + BEAST_EXPECT(canAdd(amt1, amt2) == false); + } + + // Adding different MPT issues (non zero) + { + STAmount amt1(mpt2, 500); + STAmount amt2(mpt, 500); + BEAST_EXPECT(canAdd(amt1, amt2) == false); + } + + // Adding different MPT issues (non zero) + { + STAmount amt1(mpt2, 0); + STAmount amt2(mpt, 500); + BEAST_EXPECT(canAdd(amt1, amt2) == false); + } + } + + void + testCanSubtractXRP() + { + testcase("can subtract xrp"); + + // Subtracting zero + { + STAmount amt1(XRPAmount(1000)); + STAmount amt2(XRPAmount(0)); + BEAST_EXPECT(canSubtract(amt1, amt2) == true); + } + + // Subtracting zero + { + STAmount amt1(XRPAmount(0)); + STAmount amt2(XRPAmount(1000)); + BEAST_EXPECT(canSubtract(amt1, amt2) == false); + } + + // Subtracting two positive XRP amounts + { + STAmount amt1(XRPAmount(1500)); + STAmount amt2(XRPAmount(500)); + BEAST_EXPECT(canSubtract(amt1, amt2) == true); + } + + // Subtracting two negative XRP amounts + { + STAmount amt1(XRPAmount(-1500)); + STAmount amt2(XRPAmount(-500)); + BEAST_EXPECT(canSubtract(amt1, amt2) == true); + } + + // Subtracting a positive and a negative XRP amount + { + STAmount amt1(XRPAmount(1000)); + STAmount amt2(XRPAmount(-1000)); + BEAST_EXPECT(canSubtract(amt1, amt2) == true); + } + + // Underflow check for min XRP amounts + { + STAmount amt1(std::numeric_limits::max()); + amt1 += XRPAmount(1); + STAmount amt2(XRPAmount(1)); + BEAST_EXPECT(canSubtract(amt1, amt2) == false); + } + + // Overflow check for max XRP amounts + { + STAmount amt1(std::numeric_limits::max()); + STAmount amt2(XRPAmount(-1)); + BEAST_EXPECT(canSubtract(amt1, amt2) == false); + } + } + + void + testCanSubtractIOU() + { + testcase("can subtract iou"); + Issue const usd{Currency(0x5553440000000000), AccountID(0x4985601)}; + Issue const eur{Currency(0x4555520000000000), AccountID(0x4985601)}; + + // Subtracting two IOU amounts + { + STAmount amt1(usd, 1500); + STAmount amt2(usd, 500); + BEAST_EXPECT(canSubtract(amt1, amt2) == true); + } + + // Subtracting XRP and IOU + { + STAmount amt1(XRPAmount(1000)); + STAmount amt2(usd, 1000); + BEAST_EXPECT(canSubtract(amt1, amt2) == false); + } + + // Subtracting different IOU issues (non zero) + { + STAmount amt1(usd, 1000); + STAmount amt2(eur, 500); + BEAST_EXPECT(canSubtract(amt1, amt2) == false); + } + + // Subtracting different IOU issues (zero) + { + STAmount amt1(usd, 0); + STAmount amt2(eur, 500); + BEAST_EXPECT(canSubtract(amt1, amt2) == false); + } + } + + void + testCanSubtractMPT() + { + testcase("can subtract mpt"); + + MPTIssue const mpt{MPTIssue{makeMptID(1, AccountID(0x4985601))}}; + MPTIssue const mpt2{MPTIssue{makeMptID(2, AccountID(0x4985601))}}; + + // Subtracting zero + { + STAmount amt1(mpt, 1000); + STAmount amt2(mpt, 0); + BEAST_EXPECT(canSubtract(amt1, amt2) == true); + } + + // Subtracting zero + { + STAmount amt1(mpt, 0); + STAmount amt2(mpt, 1000); + BEAST_EXPECT(canSubtract(amt1, amt2) == false); + } + + // Subtracting two positive MPT amounts + { + STAmount amt1(mpt, 1500); + STAmount amt2(mpt, 500); + BEAST_EXPECT(canSubtract(amt1, amt2) == true); + } + + // Subtracting two negative MPT amounts + { + STAmount amt1(mpt, -1500); + STAmount amt2(mpt, -500); + BEAST_EXPECT(canSubtract(amt1, amt2) == true); + } + + // Subtracting a positive and a negative MPT amount + { + STAmount amt1(mpt, 1000); + STAmount amt2(mpt, -1000); + BEAST_EXPECT(canSubtract(amt1, amt2) == true); + } + + // Underflow check for min MPT amounts + // Note: Cannot check min MPT underflow because you cannot initialize + // the STAmount with a negative MPT amount. + + // Overflow check for max positive MPT amounts (should fail) + { + STAmount amt1( + mpt, std::numeric_limits::max()); + STAmount amt2(mpt, -2); + BEAST_EXPECT(canSubtract(amt1, amt2) == false); + } + + // Subtracting MPT and XRP + { + STAmount amt1(XRPAmount(1000)); + STAmount amt2(mpt, 1000); + BEAST_EXPECT(canSubtract(amt1, amt2) == false); + } + + // Subtracting different MPT issues (non zero) + { + STAmount amt1(mpt, 1000); + STAmount amt2(mpt2, 500); + BEAST_EXPECT(canSubtract(amt1, amt2) == false); + } + + // Subtracting different MPT issues (zero) + { + STAmount amt1(mpt, 0); + STAmount amt2(mpt2, 500); + BEAST_EXPECT(canSubtract(amt1, amt2) == false); + } + } + //-------------------------------------------------------------------------- void @@ -681,6 +1043,12 @@ public: testRounding(); testConvertXRP(); testConvertIOU(); + testCanAddXRP(); + testCanAddIOU(); + testCanAddMPT(); + testCanSubtractXRP(); + testCanSubtractIOU(); + testCanSubtractMPT(); } }; diff --git a/src/test/rpc/AccountLines_test.cpp b/src/test/rpc/AccountLines_test.cpp index 42acea4111..9215f4087a 100644 --- a/src/test/rpc/AccountLines_test.cpp +++ b/src/test/rpc/AccountLines_test.cpp @@ -573,21 +573,6 @@ public: env.fund(XRP(10000), alice, becky, gw1); env.close(); - // A couple of helper lambdas - auto escrow = [&env]( - Account const& account, - Account const& to, - STAmount const& amount) { - Json::Value jv; - jv[jss::TransactionType] = jss::EscrowCreate; - jv[jss::Account] = account.human(); - jv[jss::Destination] = to.human(); - jv[jss::Amount] = amount.getJson(JsonOptions::none); - NetClock::time_point finish = env.now() + 1s; - jv[sfFinishAfter.jsonName] = finish.time_since_epoch().count(); - return jv; - }; - auto payChan = [](Account const& account, Account const& to, STAmount const& amount, @@ -623,8 +608,10 @@ public: env.close(); // Escrow, in each direction - env(escrow(alice, becky, XRP(1000))); - env(escrow(becky, alice, XRP(1000))); + env(escrow::create(alice, becky, XRP(1000)), + escrow::finish_time(env.now() + 1s)); + env(escrow::create(becky, alice, XRP(1000)), + escrow::finish_time(env.now() + 1s)); // Pay channels, in each direction env(payChan(alice, becky, XRP(1000), 100s, alice.pk())); diff --git a/src/test/rpc/AccountSet_test.cpp b/src/test/rpc/AccountSet_test.cpp index 7bca51ae96..c056279bf1 100644 --- a/src/test/rpc/AccountSet_test.cpp +++ b/src/test/rpc/AccountSet_test.cpp @@ -99,6 +99,12 @@ public: // is tested elsewhere. continue; } + if (flag == asfAllowTrustLineLocking) + { + // These flags are part of the AllowTokenLocking amendment + // and are tested elsewhere + continue; + } if (std::find(goodFlags.begin(), goodFlags.end(), flag) != goodFlags.end()) diff --git a/src/xrpld/app/tx/detail/Escrow.cpp b/src/xrpld/app/tx/detail/Escrow.cpp index 0b58957fcf..f7840650f7 100644 --- a/src/xrpld/app/tx/detail/Escrow.cpp +++ b/src/xrpld/app/tx/detail/Escrow.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -29,6 +30,7 @@ #include #include #include +#include #include #include @@ -79,7 +81,41 @@ namespace ripple { TxConsequences EscrowCreate::makeTxConsequences(PreflightContext const& ctx) { - return TxConsequences{ctx.tx, ctx.tx[sfAmount].xrp()}; + return TxConsequences{ + ctx.tx, isXRP(ctx.tx[sfAmount]) ? ctx.tx[sfAmount].xrp() : beast::zero}; +} + +template +static NotTEC +escrowCreatePreflightHelper(PreflightContext const& ctx); + +template <> +NotTEC +escrowCreatePreflightHelper(PreflightContext const& ctx) +{ + STAmount const amount = ctx.tx[sfAmount]; + if (amount.native() || amount <= beast::zero) + return temBAD_AMOUNT; + + if (badCurrency() == amount.getCurrency()) + return temBAD_CURRENCY; + + return tesSUCCESS; +} + +template <> +NotTEC +escrowCreatePreflightHelper(PreflightContext const& ctx) +{ + if (!ctx.rules.enabled(featureMPTokensV1)) + return temDISABLED; + + auto const amount = ctx.tx[sfAmount]; + if (amount.native() || amount.mpt() > MPTAmount{maxMPTokenAmount} || + amount <= beast::zero) + return temBAD_AMOUNT; + + return tesSUCCESS; } NotTEC @@ -91,11 +127,25 @@ EscrowCreate::preflight(PreflightContext const& ctx) if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) return ret; - if (!isXRP(ctx.tx[sfAmount])) - return temBAD_AMOUNT; + STAmount const amount{ctx.tx[sfAmount]}; + if (!isXRP(amount)) + { + if (!ctx.rules.enabled(featureTokenEscrow)) + return temBAD_AMOUNT; - if (ctx.tx[sfAmount] <= beast::zero) - return temBAD_AMOUNT; + if (auto const ret = std::visit( + [&](T const&) { + return escrowCreatePreflightHelper(ctx); + }, + amount.asset().value()); + !isTesSuccess(ret)) + return ret; + } + else + { + if (amount <= beast::zero) + return temBAD_AMOUNT; + } // We must specify at least one timeout value if (!ctx.tx[~sfCancelAfter] && !ctx.tx[~sfFinishAfter]) @@ -142,10 +192,181 @@ EscrowCreate::preflight(PreflightContext const& ctx) return preflight2(ctx); } +template +static TER +escrowCreatePreclaimHelper( + PreclaimContext const& ctx, + AccountID const& account, + AccountID const& dest, + STAmount const& amount); + +template <> +TER +escrowCreatePreclaimHelper( + PreclaimContext const& ctx, + AccountID const& account, + AccountID const& dest, + STAmount const& amount) +{ + AccountID issuer = amount.getIssuer(); + // If the issuer is the same as the account, return tecNO_PERMISSION + if (issuer == account) + return tecNO_PERMISSION; + + // If the lsfAllowTrustLineLocking is not enabled, return tecNO_PERMISSION + auto const sleIssuer = ctx.view.read(keylet::account(issuer)); + if (!sleIssuer) + return tecNO_ISSUER; + if (!sleIssuer->isFlag(lsfAllowTrustLineLocking)) + return tecNO_PERMISSION; + + // If the account does not have a trustline to the issuer, return tecNO_LINE + auto const sleRippleState = + ctx.view.read(keylet::line(account, issuer, amount.getCurrency())); + if (!sleRippleState) + return tecNO_LINE; + + STAmount const balance = (*sleRippleState)[sfBalance]; + + // If balance is positive, issuer must have higher address than account + if (balance > beast::zero && issuer < account) + return tecNO_PERMISSION; // LCOV_EXCL_LINE + + // If balance is negative, issuer must have lower address than account + if (balance < beast::zero && issuer > account) + return tecNO_PERMISSION; // LCOV_EXCL_LINE + + // If the issuer has requireAuth set, check if the account is authorized + if (auto const ter = requireAuth(ctx.view, amount.issue(), account); + ter != tesSUCCESS) + return ter; + + // If the issuer has requireAuth set, check if the destination is authorized + if (auto const ter = requireAuth(ctx.view, amount.issue(), dest); + ter != tesSUCCESS) + return ter; + + // If the issuer has frozen the account, return tecFROZEN + if (isFrozen(ctx.view, account, amount.issue())) + return tecFROZEN; + + // If the issuer has frozen the destination, return tecFROZEN + if (isFrozen(ctx.view, dest, amount.issue())) + return tecFROZEN; + + STAmount const spendableAmount = accountHolds( + ctx.view, + account, + amount.getCurrency(), + issuer, + fhIGNORE_FREEZE, + ctx.j); + + // If the balance is less than or equal to 0, return tecINSUFFICIENT_FUNDS + if (spendableAmount <= beast::zero) + return tecINSUFFICIENT_FUNDS; + + // If the spendable amount is less than the amount, return + // tecINSUFFICIENT_FUNDS + if (spendableAmount < amount) + return tecINSUFFICIENT_FUNDS; + + // If the amount is not addable to the balance, return tecPRECISION_LOSS + if (!canAdd(spendableAmount, amount)) + return tecPRECISION_LOSS; + + return tesSUCCESS; +} + +template <> +TER +escrowCreatePreclaimHelper( + PreclaimContext const& ctx, + AccountID const& account, + AccountID const& dest, + STAmount const& amount) +{ + AccountID issuer = amount.getIssuer(); + // If the issuer is the same as the account, return tecNO_PERMISSION + if (issuer == account) + return tecNO_PERMISSION; + + // If the mpt does not exist, return tecOBJECT_NOT_FOUND + auto const issuanceKey = + keylet::mptIssuance(amount.get().getMptID()); + auto const sleIssuance = ctx.view.read(issuanceKey); + if (!sleIssuance) + return tecOBJECT_NOT_FOUND; + + // If the lsfMPTCanEscrow is not enabled, return tecNO_PERMISSION + if (!sleIssuance->isFlag(lsfMPTCanEscrow)) + return tecNO_PERMISSION; + + // If the issuer is not the same as the issuer of the mpt, return + // tecNO_PERMISSION + if (sleIssuance->getAccountID(sfIssuer) != issuer) + return tecNO_PERMISSION; // LCOV_EXCL_LINE + + // If the account does not have the mpt, return tecOBJECT_NOT_FOUND + if (!ctx.view.exists(keylet::mptoken(issuanceKey.key, account))) + return tecOBJECT_NOT_FOUND; + + // If the issuer has requireAuth set, check if the account is + // authorized + auto const& mptIssue = amount.get(); + if (auto const ter = + requireAuth(ctx.view, mptIssue, account, MPTAuthType::WeakAuth); + ter != tesSUCCESS) + return ter; + + // If the issuer has requireAuth set, check if the destination is + // authorized + if (auto const ter = + requireAuth(ctx.view, mptIssue, dest, MPTAuthType::WeakAuth); + ter != tesSUCCESS) + return ter; + + // If the issuer has frozen the account, return tecLOCKED + if (isFrozen(ctx.view, account, mptIssue)) + return tecLOCKED; + + // If the issuer has frozen the destination, return tecLOCKED + if (isFrozen(ctx.view, dest, mptIssue)) + return tecLOCKED; + + // If the mpt cannot be transferred, return tecNO_AUTH + if (auto const ter = canTransfer(ctx.view, mptIssue, account, dest); + ter != tesSUCCESS) + return ter; + + STAmount const spendableAmount = accountHolds( + ctx.view, + account, + amount.get(), + fhIGNORE_FREEZE, + ahIGNORE_AUTH, + ctx.j); + + // If the balance is less than or equal to 0, return tecINSUFFICIENT_FUNDS + if (spendableAmount <= beast::zero) + return tecINSUFFICIENT_FUNDS; + + // If the spendable amount is less than the amount, return + // tecINSUFFICIENT_FUNDS + if (spendableAmount < amount) + return tecINSUFFICIENT_FUNDS; + + return tesSUCCESS; +} + TER EscrowCreate::preclaim(PreclaimContext const& ctx) { - auto const sled = ctx.view.read(keylet::account(ctx.tx[sfDestination])); + STAmount const amount{ctx.tx[sfAmount]}; + AccountID const account{ctx.tx[sfAccount]}; + AccountID const dest{ctx.tx[sfDestination]}; + + auto const sled = ctx.view.read(keylet::account(dest)); if (!sled) return tecNO_DST; @@ -156,6 +377,77 @@ EscrowCreate::preclaim(PreclaimContext const& ctx) if (isPseudoAccount(sled)) return tecNO_PERMISSION; + if (!isXRP(amount)) + { + if (!ctx.view.rules().enabled(featureTokenEscrow)) + return temDISABLED; // LCOV_EXCL_LINE + + if (auto const ret = std::visit( + [&](T const&) { + return escrowCreatePreclaimHelper( + ctx, account, dest, amount); + }, + amount.asset().value()); + !isTesSuccess(ret)) + return ret; + } + return tesSUCCESS; +} + +template +static TER +escrowLockApplyHelper( + ApplyView& view, + AccountID const& issuer, + AccountID const& sender, + STAmount const& amount, + beast::Journal journal); + +template <> +TER +escrowLockApplyHelper( + ApplyView& view, + AccountID const& issuer, + AccountID const& sender, + STAmount const& amount, + beast::Journal journal) +{ + // Defensive: Issuer cannot create an escrow + // LCOV_EXCL_START + if (issuer == sender) + return tecINTERNAL; + // LCOV_EXCL_STOP + + auto const ter = rippleCredit( + view, + sender, + issuer, + amount, + amount.holds() ? false : true, + journal); + if (ter != tesSUCCESS) + return ter; // LCOV_EXCL_LINE + return tesSUCCESS; +} + +template <> +TER +escrowLockApplyHelper( + ApplyView& view, + AccountID const& issuer, + AccountID const& sender, + STAmount const& amount, + beast::Journal journal) +{ + // Defensive: Issuer cannot create an escrow + // LCOV_EXCL_START + if (issuer == sender) + return tecINTERNAL; + // LCOV_EXCL_STOP + + auto const ter = rippleLockEscrowMPT(view, sender, amount, journal); + if (ter != tesSUCCESS) + return ter; // LCOV_EXCL_LINE return tesSUCCESS; } @@ -196,21 +488,23 @@ EscrowCreate::doApply() } } - auto const account = ctx_.tx[sfAccount]; - auto const sle = ctx_.view().peek(keylet::account(account)); + auto const sle = ctx_.view().peek(keylet::account(account_)); if (!sle) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE // Check reserve and funds availability + STAmount const amount{ctx_.tx[sfAmount]}; + + auto const reserve = + ctx_.view().fees().accountReserve((*sle)[sfOwnerCount] + 1); + + if (mSourceBalance < reserve) + return tecINSUFFICIENT_RESERVE; + + // Check reserve and funds availability + if (isXRP(amount)) { - auto const balance = STAmount((*sle)[sfBalance]).xrp(); - auto const reserve = - ctx_.view().fees().accountReserve((*sle)[sfOwnerCount] + 1); - - if (balance < reserve) - return tecINSUFFICIENT_RESERVE; - - if (balance < reserve + STAmount(ctx_.tx[sfAmount]).xrp()) + if (mSourceBalance < reserve + STAmount(amount).xrp()) return tecUNFUNDED; } @@ -233,10 +527,10 @@ EscrowCreate::doApply() // Create escrow in ledger. Note that we we use the value from the // sequence or ticket. For more explanation see comments in SeqProxy.h. - Keylet const escrowKeylet = keylet::escrow(account, ctx_.tx.getSeqValue()); + Keylet const escrowKeylet = keylet::escrow(account_, ctx_.tx.getSeqValue()); auto const slep = std::make_shared(escrowKeylet); - (*slep)[sfAmount] = ctx_.tx[sfAmount]; - (*slep)[sfAccount] = account; + (*slep)[sfAmount] = amount; + (*slep)[sfAccount] = account_; (*slep)[~sfCondition] = ctx_.tx[~sfCondition]; (*slep)[~sfSourceTag] = ctx_.tx[~sfSourceTag]; (*slep)[sfDestination] = ctx_.tx[sfDestination]; @@ -244,32 +538,69 @@ EscrowCreate::doApply() (*slep)[~sfFinishAfter] = ctx_.tx[~sfFinishAfter]; (*slep)[~sfDestinationTag] = ctx_.tx[~sfDestinationTag]; + if (ctx_.view().rules().enabled(featureTokenEscrow) && !isXRP(amount)) + { + auto const xferRate = transferRate(ctx_.view(), amount); + if (xferRate != parityRate) + (*slep)[sfTransferRate] = xferRate.value; + } + ctx_.view().insert(slep); // Add escrow to sender's owner directory { auto page = ctx_.view().dirInsert( - keylet::ownerDir(account), escrowKeylet, describeOwnerDir(account)); + keylet::ownerDir(account_), + escrowKeylet, + describeOwnerDir(account_)); if (!page) - return tecDIR_FULL; + return tecDIR_FULL; // LCOV_EXCL_LINE (*slep)[sfOwnerNode] = *page; } // If it's not a self-send, add escrow to recipient's owner directory. - if (auto const dest = ctx_.tx[sfDestination]; dest != ctx_.tx[sfAccount]) + AccountID const dest = ctx_.tx[sfDestination]; + if (dest != account_) { auto page = ctx_.view().dirInsert( keylet::ownerDir(dest), escrowKeylet, describeOwnerDir(dest)); if (!page) - return tecDIR_FULL; + return tecDIR_FULL; // LCOV_EXCL_LINE (*slep)[sfDestinationNode] = *page; } - // Deduct owner's balance, increment owner count - (*sle)[sfBalance] = (*sle)[sfBalance] - ctx_.tx[sfAmount]; + // IOU escrow objects are added to the issuer's owner directory to help + // track the total locked balance. For MPT, this isn't necessary because the + // locked balance is already stored directly in the MPTokenIssuance object. + AccountID const issuer = amount.getIssuer(); + if (!isXRP(amount) && issuer != account_ && issuer != dest && + !amount.holds()) + { + auto page = ctx_.view().dirInsert( + keylet::ownerDir(issuer), escrowKeylet, describeOwnerDir(issuer)); + if (!page) + return tecDIR_FULL; // LCOV_EXCL_LINE + (*slep)[sfIssuerNode] = *page; + } + + // Deduct owner's balance + if (isXRP(amount)) + (*sle)[sfBalance] = (*sle)[sfBalance] - amount; + else + { + if (auto const ret = std::visit( + [&](T const&) { + return escrowLockApplyHelper( + ctx_.view(), issuer, account_, amount, j_); + }, + amount.asset().value()); + !isTesSuccess(ret)) + return ret; + } + + // increment owner count adjustOwnerCount(ctx_.view(), sle, 1, ctx_.journal); ctx_.view().update(sle); - return tesSUCCESS; } @@ -360,19 +691,324 @@ EscrowFinish::calculateBaseFee(ReadView const& view, STTx const& tx) return Transactor::calculateBaseFee(view, tx) + extraFee; } +template +static TER +escrowFinishPreclaimHelper( + PreclaimContext const& ctx, + AccountID const& dest, + STAmount const& amount); + +template <> +TER +escrowFinishPreclaimHelper( + PreclaimContext const& ctx, + AccountID const& dest, + STAmount const& amount) +{ + AccountID issuer = amount.getIssuer(); + // If the issuer is the same as the account, return tesSUCCESS + if (issuer == dest) + return tesSUCCESS; + + // If the issuer has requireAuth set, check if the destination is authorized + if (auto const ter = requireAuth(ctx.view, amount.issue(), dest); + ter != tesSUCCESS) + return ter; + + // If the issuer has deep frozen the destination, return tecFROZEN + if (isDeepFrozen(ctx.view, dest, amount.getCurrency(), amount.getIssuer())) + return tecFROZEN; + + return tesSUCCESS; +} + +template <> +TER +escrowFinishPreclaimHelper( + PreclaimContext const& ctx, + AccountID const& dest, + STAmount const& amount) +{ + AccountID issuer = amount.getIssuer(); + // If the issuer is the same as the dest, return tesSUCCESS + if (issuer == dest) + return tesSUCCESS; + + // If the mpt does not exist, return tecOBJECT_NOT_FOUND + auto const issuanceKey = + keylet::mptIssuance(amount.get().getMptID()); + auto const sleIssuance = ctx.view.read(issuanceKey); + if (!sleIssuance) + return tecOBJECT_NOT_FOUND; + + // If the issuer has requireAuth set, check if the destination is + // authorized + auto const& mptIssue = amount.get(); + if (auto const ter = + requireAuth(ctx.view, mptIssue, dest, MPTAuthType::WeakAuth); + ter != tesSUCCESS) + return ter; + + // If the issuer has frozen the destination, return tecLOCKED + if (isFrozen(ctx.view, dest, mptIssue)) + return tecLOCKED; + + return tesSUCCESS; +} + TER EscrowFinish::preclaim(PreclaimContext const& ctx) { - if (!ctx.view.rules().enabled(featureCredentials)) - return Transactor::preclaim(ctx); + if (ctx.view.rules().enabled(featureCredentials)) + { + if (auto const err = credentials::valid(ctx, ctx.tx[sfAccount]); + !isTesSuccess(err)) + return err; + } - if (auto const err = credentials::valid(ctx, ctx.tx[sfAccount]); - !isTesSuccess(err)) - return err; + auto const k = keylet::escrow(ctx.tx[sfOwner], ctx.tx[sfOfferSequence]); + auto const slep = ctx.view.read(k); + if (!slep) + return tecNO_TARGET; + AccountID const dest = (*slep)[sfDestination]; + STAmount const amount = (*slep)[sfAmount]; + + if (!isXRP(amount)) + { + if (!ctx.view.rules().enabled(featureTokenEscrow)) + return temDISABLED; // LCOV_EXCL_LINE + + if (auto const ret = std::visit( + [&](T const&) { + return escrowFinishPreclaimHelper(ctx, dest, amount); + }, + amount.asset().value()); + !isTesSuccess(ret)) + return ret; + } return tesSUCCESS; } +template +static TER +escrowUnlockApplyHelper( + ApplyView& view, + Rate lockedRate, + std::shared_ptr const& sleDest, + STAmount const& xrpBalance, + STAmount const& amount, + AccountID const& issuer, + AccountID const& sender, + AccountID const& receiver, + bool createAsset, + beast::Journal journal); + +template <> +TER +escrowUnlockApplyHelper( + ApplyView& view, + Rate lockedRate, + std::shared_ptr const& sleDest, + STAmount const& xrpBalance, + STAmount const& amount, + AccountID const& issuer, + AccountID const& sender, + AccountID const& receiver, + bool createAsset, + beast::Journal journal) +{ + Keylet const trustLineKey = keylet::line(receiver, amount.issue()); + bool const recvLow = issuer > receiver; + bool const senderIssuer = issuer == sender; + bool const receiverIssuer = issuer == receiver; + bool const issuerHigh = issuer > receiver; + + // LCOV_EXCL_START + if (senderIssuer) + return tecINTERNAL; + // LCOV_EXCL_STOP + + if (receiverIssuer) + return tesSUCCESS; + + if (!view.exists(trustLineKey) && createAsset && !receiverIssuer) + { + // Can the account cover the trust line's reserve? + if (std::uint32_t const ownerCount = {sleDest->at(sfOwnerCount)}; + xrpBalance < view.fees().accountReserve(ownerCount + 1)) + { + JLOG(journal.trace()) << "Trust line does not exist. " + "Insufficent reserve to create line."; + + return tecNO_LINE_INSUF_RESERVE; + } + + Currency const currency = amount.getCurrency(); + STAmount initialBalance(amount.issue()); + initialBalance.setIssuer(noAccount()); + + // clang-format off + if (TER const ter = trustCreate( + view, // payment sandbox + recvLow, // is dest low? + issuer, // source + receiver, // destination + trustLineKey.key, // ledger index + sleDest, // Account to add to + false, // authorize account + (sleDest->getFlags() & lsfDefaultRipple) == 0, + false, // freeze trust line + false, // deep freeze trust line + initialBalance, // zero initial balance + Issue(currency, receiver), // limit of zero + 0, // quality in + 0, // quality out + journal); // journal + !isTesSuccess(ter)) + { + return ter; // LCOV_EXCL_LINE + } + // clang-format on + + view.update(sleDest); + } + + if (!view.exists(trustLineKey) && !receiverIssuer) + return tecNO_LINE; + + auto const xferRate = transferRate(view, amount); + // update if issuer rate is less than locked rate + if (xferRate < lockedRate) + lockedRate = xferRate; + + // Transfer Rate only applies when: + // 1. Issuer is not involved in the transfer (senderIssuer or + // receiverIssuer) + // 2. The locked rate is different from the parity rate + + // NOTE: Transfer fee in escrow works a bit differently from a normal + // payment. In escrow, the fee is deducted from the locked/sending amount, + // whereas in a normal payment, the transfer fee is taken on top of the + // sending amount. + auto finalAmt = amount; + if ((!senderIssuer && !receiverIssuer) && lockedRate != parityRate) + { + // compute transfer fee, if any + auto const xferFee = amount.value() - + divideRound(amount, lockedRate, amount.issue(), true); + // compute balance to transfer + finalAmt = amount.value() - xferFee; + } + + // validate the line limit if the account submitting txn is not the receiver + // of the funds + if (!createAsset) + { + auto const sleRippleState = view.peek(trustLineKey); + if (!sleRippleState) + return tecINTERNAL; // LCOV_EXCL_LINE + + // if the issuer is the high, then we use the low limit + // otherwise we use the high limit + STAmount const lineLimit = sleRippleState->getFieldAmount( + issuerHigh ? sfLowLimit : sfHighLimit); + + STAmount lineBalance = sleRippleState->getFieldAmount(sfBalance); + + // flip the sign of the line balance if the issuer is not high + if (!issuerHigh) + lineBalance.negate(); + + // add the final amount to the line balance + lineBalance += finalAmt; + + // if the transfer would exceed the line limit return tecLIMIT_EXCEEDED + if (lineLimit < lineBalance) + return tecLIMIT_EXCEEDED; + } + + // if destination is not the issuer then transfer funds + if (!receiverIssuer) + { + auto const ter = + rippleCredit(view, issuer, receiver, finalAmt, true, journal); + if (ter != tesSUCCESS) + return ter; // LCOV_EXCL_LINE + } + return tesSUCCESS; +} + +template <> +TER +escrowUnlockApplyHelper( + ApplyView& view, + Rate lockedRate, + std::shared_ptr const& sleDest, + STAmount const& xrpBalance, + STAmount const& amount, + AccountID const& issuer, + AccountID const& sender, + AccountID const& receiver, + bool createAsset, + beast::Journal journal) +{ + bool const senderIssuer = issuer == sender; + bool const receiverIssuer = issuer == receiver; + + auto const mptID = amount.get().getMptID(); + auto const issuanceKey = keylet::mptIssuance(mptID); + if (!view.exists(keylet::mptoken(issuanceKey.key, receiver)) && + createAsset && !receiverIssuer) + { + if (std::uint32_t const ownerCount = {sleDest->at(sfOwnerCount)}; + xrpBalance < view.fees().accountReserve(ownerCount + 1)) + { + return tecINSUFFICIENT_RESERVE; + } + + if (auto const ter = + MPTokenAuthorize::createMPToken(view, mptID, receiver, 0); + !isTesSuccess(ter)) + { + return ter; // LCOV_EXCL_LINE + } + + // update owner count. + adjustOwnerCount(view, sleDest, 1, journal); + } + + if (!view.exists(keylet::mptoken(issuanceKey.key, receiver)) && + !receiverIssuer) + return tecNO_PERMISSION; + + auto const xferRate = transferRate(view, amount); + // update if issuer rate is less than locked rate + if (xferRate < lockedRate) + lockedRate = xferRate; + + // Transfer Rate only applies when: + // 1. Issuer is not involved in the transfer (senderIssuer or + // receiverIssuer) + // 2. The locked rate is different from the parity rate + + // NOTE: Transfer fee in escrow works a bit differently from a normal + // payment. In escrow, the fee is deducted from the locked/sending amount, + // whereas in a normal payment, the transfer fee is taken on top of the + // sending amount. + auto finalAmt = amount; + if ((!senderIssuer && !receiverIssuer) && lockedRate != parityRate) + { + // compute transfer fee, if any + auto const xferFee = amount.value() - + divideRound(amount, lockedRate, amount.asset(), true); + // compute balance to transfer + finalAmt = amount.value() - xferFee; + } + + return rippleUnlockEscrowMPT(view, sender, receiver, finalAmt, journal); +} + TER EscrowFinish::doApply() { @@ -495,8 +1131,50 @@ EscrowFinish::doApply() } } + STAmount const amount = slep->getFieldAmount(sfAmount); // Transfer amount to destination - (*sled)[sfBalance] = (*sled)[sfBalance] + (*slep)[sfAmount]; + if (isXRP(amount)) + (*sled)[sfBalance] = (*sled)[sfBalance] + amount; + else + { + if (!ctx_.view().rules().enabled(featureTokenEscrow)) + return temDISABLED; // LCOV_EXCL_LINE + + Rate lockedRate = slep->isFieldPresent(sfTransferRate) + ? ripple::Rate(slep->getFieldU32(sfTransferRate)) + : parityRate; + auto const issuer = amount.getIssuer(); + bool const createAsset = destID == account_; + if (auto const ret = std::visit( + [&](T const&) { + return escrowUnlockApplyHelper( + ctx_.view(), + lockedRate, + sled, + mPriorBalance, + amount, + issuer, + account, + destID, + createAsset, + j_); + }, + amount.asset().value()); + !isTesSuccess(ret)) + return ret; + + // Remove escrow from issuers owner directory, if present. + if (auto const optPage = (*slep)[~sfIssuerNode]; optPage) + { + if (!ctx_.view().dirRemove( + keylet::ownerDir(issuer), *optPage, k.key, true)) + { + JLOG(j_.fatal()) << "Unable to delete Escrow from recipient."; + return tefBAD_LEDGER; // LCOV_EXCL_LINE + } + } + } + ctx_.view().update(sled); // Adjust source owner count @@ -506,7 +1184,6 @@ EscrowFinish::doApply() // Remove escrow from ledger ctx_.view().erase(slep); - return tesSUCCESS; } @@ -524,6 +1201,90 @@ EscrowCancel::preflight(PreflightContext const& ctx) return preflight2(ctx); } +template +static TER +escrowCancelPreclaimHelper( + PreclaimContext const& ctx, + AccountID const& account, + STAmount const& amount); + +template <> +TER +escrowCancelPreclaimHelper( + PreclaimContext const& ctx, + AccountID const& account, + STAmount const& amount) +{ + AccountID issuer = amount.getIssuer(); + // If the issuer is the same as the account, return tecINTERNAL + if (issuer == account) + return tecINTERNAL; // LCOV_EXCL_LINE + + // If the issuer has requireAuth set, check if the account is authorized + if (auto const ter = requireAuth(ctx.view, amount.issue(), account); + ter != tesSUCCESS) + return ter; + + return tesSUCCESS; +} + +template <> +TER +escrowCancelPreclaimHelper( + PreclaimContext const& ctx, + AccountID const& account, + STAmount const& amount) +{ + AccountID issuer = amount.getIssuer(); + // If the issuer is the same as the account, return tecINTERNAL + if (issuer == account) + return tecINTERNAL; // LCOV_EXCL_LINE + + // If the mpt does not exist, return tecOBJECT_NOT_FOUND + auto const issuanceKey = + keylet::mptIssuance(amount.get().getMptID()); + auto const sleIssuance = ctx.view.read(issuanceKey); + if (!sleIssuance) + return tecOBJECT_NOT_FOUND; // LCOV_EXCL_LINE + + // If the issuer has requireAuth set, check if the account is + // authorized + auto const& mptIssue = amount.get(); + if (auto const ter = + requireAuth(ctx.view, mptIssue, account, MPTAuthType::WeakAuth); + ter != tesSUCCESS) + return ter; + + return tesSUCCESS; +} + +TER +EscrowCancel::preclaim(PreclaimContext const& ctx) +{ + auto const k = keylet::escrow(ctx.tx[sfOwner], ctx.tx[sfOfferSequence]); + auto const slep = ctx.view.read(k); + if (!slep) + return tecNO_TARGET; + + AccountID const account = (*slep)[sfAccount]; + STAmount const amount = (*slep)[sfAmount]; + + if (!isXRP(amount)) + { + if (!ctx.view.rules().enabled(featureTokenEscrow)) + return temDISABLED; // LCOV_EXCL_LINE + + if (auto const ret = std::visit( + [&](T const&) { + return escrowCancelPreclaimHelper(ctx, account, amount); + }, + amount.asset().value()); + !isTesSuccess(ret)) + return ret; + } + return tesSUCCESS; +} + TER EscrowCancel::doApply() { @@ -580,9 +1341,49 @@ EscrowCancel::doApply() } } - // Transfer amount back to owner, decrement owner count auto const sle = ctx_.view().peek(keylet::account(account)); - (*sle)[sfBalance] = (*sle)[sfBalance] + (*slep)[sfAmount]; + STAmount const amount = slep->getFieldAmount(sfAmount); + + // Transfer amount back to the owner + if (isXRP(amount)) + (*sle)[sfBalance] = (*sle)[sfBalance] + amount; + else + { + if (!ctx_.view().rules().enabled(featureTokenEscrow)) + return temDISABLED; // LCOV_EXCL_LINE + + auto const issuer = amount.getIssuer(); + bool const createAsset = account == account_; + if (auto const ret = std::visit( + [&](T const&) { + return escrowUnlockApplyHelper( + ctx_.view(), + parityRate, + slep, + mPriorBalance, + amount, + issuer, + account, // sender and receiver are the same + account, + createAsset, + j_); + }, + amount.asset().value()); + !isTesSuccess(ret)) + return ret; // LCOV_EXCL_LINE + + // Remove escrow from issuers owner directory, if present. + if (auto const optPage = (*slep)[~sfIssuerNode]; optPage) + { + if (!ctx_.view().dirRemove( + keylet::ownerDir(issuer), *optPage, k.key, true)) + { + JLOG(j_.fatal()) << "Unable to delete Escrow from recipient."; + return tefBAD_LEDGER; // LCOV_EXCL_LINE + } + } + } + adjustOwnerCount(ctx_.view(), sle, -1, ctx_.journal); ctx_.view().update(sle); diff --git a/src/xrpld/app/tx/detail/Escrow.h b/src/xrpld/app/tx/detail/Escrow.h index 78acdbee00..2225c94f16 100644 --- a/src/xrpld/app/tx/detail/Escrow.h +++ b/src/xrpld/app/tx/detail/Escrow.h @@ -84,6 +84,9 @@ public: static NotTEC preflight(PreflightContext const& ctx); + static TER + preclaim(PreclaimContext const& ctx); + TER doApply() override; }; diff --git a/src/xrpld/app/tx/detail/InvariantCheck.cpp b/src/xrpld/app/tx/detail/InvariantCheck.cpp index 05c2a5d620..31b8fe3cc1 100644 --- a/src/xrpld/app/tx/detail/InvariantCheck.cpp +++ b/src/xrpld/app/tx/detail/InvariantCheck.cpp @@ -109,7 +109,8 @@ XRPNotCreated::visitEntry( ((*before)[sfAmount] - (*before)[sfBalance]).xrp().drops(); break; case ltESCROW: - drops_ -= (*before)[sfAmount].xrp().drops(); + if (isXRP((*before)[sfAmount])) + drops_ -= (*before)[sfAmount].xrp().drops(); break; default: break; @@ -130,7 +131,7 @@ XRPNotCreated::visitEntry( .drops(); break; case ltESCROW: - if (!isDelete) + if (!isDelete && isXRP((*after)[sfAmount])) drops_ += (*after)[sfAmount].xrp().drops(); break; default: @@ -270,14 +271,35 @@ NoZeroEscrow::visitEntry( std::shared_ptr const& after) { auto isBad = [](STAmount const& amount) { - if (!amount.native()) - return true; + // IOU case + if (amount.holds()) + { + if (amount <= beast::zero) + return true; - if (amount.xrp() <= XRPAmount{0}) - return true; + if (badCurrency() == amount.getCurrency()) + return true; + } - if (amount.xrp() >= INITIAL_XRP) - return true; + // MPT case + if (amount.holds()) + { + if (amount <= beast::zero) + return true; + + if (amount.mpt() > MPTAmount{maxMPTokenAmount}) + return true; + } + + // XRP case + if (amount.native()) + { + if (amount.xrp() <= XRPAmount{0}) + return true; + + if (amount.xrp() >= INITIAL_XRP) + return true; + } return false; }; @@ -287,14 +309,40 @@ NoZeroEscrow::visitEntry( if (after && after->getType() == ltESCROW) bad_ |= isBad((*after)[sfAmount]); + + auto checkAmount = [this](std::int64_t amount) { + if (amount > maxMPTokenAmount || amount < 0) + bad_ = true; + }; + + if (after && after->getType() == ltMPTOKEN_ISSUANCE) + { + auto const outstanding = (*after)[sfOutstandingAmount]; + checkAmount(outstanding); + if (auto const locked = (*after)[~sfLockedAmount]) + { + checkAmount(*locked); + bad_ = outstanding < *locked; + } + } + + if (after && after->getType() == ltMPTOKEN) + { + auto const mptAmount = (*after)[sfMPTAmount]; + checkAmount(mptAmount); + if (auto const locked = (*after)[~sfLockedAmount]) + { + checkAmount(*locked); + } + } } bool NoZeroEscrow::finalize( - STTx const&, + STTx const& txn, TER const, XRPAmount const, - ReadView const&, + ReadView const& rv, beast::Journal const& j) { if (bad_) @@ -1458,6 +1506,9 @@ ValidMPTIssuance::finalize( return mptIssuancesCreated_ == 0 && mptIssuancesDeleted_ == 0 && mptokensCreated_ == 0 && mptokensDeleted_ == 0; } + + if (tx.getTxnType() == ttESCROW_FINISH) + return true; } if (mptIssuancesCreated_ != 0) diff --git a/src/xrpld/app/tx/detail/MPTokenAuthorize.cpp b/src/xrpld/app/tx/detail/MPTokenAuthorize.cpp index 748c05869f..77b21b65f3 100644 --- a/src/xrpld/app/tx/detail/MPTokenAuthorize.cpp +++ b/src/xrpld/app/tx/detail/MPTokenAuthorize.cpp @@ -83,6 +83,15 @@ MPTokenAuthorize::preclaim(PreclaimContext const& ctx) return tecHAS_OBLIGATIONS; } + if ((*sleMpt)[~sfLockedAmount].value_or(0) != 0) + { + auto const sleMptIssuance = ctx.view.read( + keylet::mptIssuance(ctx.tx[sfMPTokenIssuanceID])); + if (!sleMptIssuance) + return tefINTERNAL; // LCOV_EXCL_LINE + + return tecHAS_OBLIGATIONS; + } if (ctx.view.rules().enabled(featureSingleAssetVault) && sleMpt->isFlag(lsfMPTLocked)) return tecNO_PERMISSION; @@ -140,6 +149,32 @@ MPTokenAuthorize::preclaim(PreclaimContext const& ctx) return tesSUCCESS; } +TER +MPTokenAuthorize::createMPToken( + ApplyView& view, + MPTID const& mptIssuanceID, + AccountID const& account, + std::uint32_t const flags) +{ + auto const mptokenKey = keylet::mptoken(mptIssuanceID, account); + + auto const ownerNode = view.dirInsert( + keylet::ownerDir(account), mptokenKey, describeOwnerDir(account)); + + if (!ownerNode) + return tecDIR_FULL; // LCOV_EXCL_LINE + + auto mptoken = std::make_shared(mptokenKey); + (*mptoken)[sfAccount] = account; + (*mptoken)[sfMPTokenIssuanceID] = mptIssuanceID; + (*mptoken)[sfFlags] = flags; + (*mptoken)[sfOwnerNode] = *ownerNode; + + view.insert(mptoken); + + return tesSUCCESS; +} + TER MPTokenAuthorize::authorize( ApplyView& view, diff --git a/src/xrpld/app/tx/detail/MPTokenAuthorize.h b/src/xrpld/app/tx/detail/MPTokenAuthorize.h index e2b135a22a..a81dc7dea2 100644 --- a/src/xrpld/app/tx/detail/MPTokenAuthorize.h +++ b/src/xrpld/app/tx/detail/MPTokenAuthorize.h @@ -54,6 +54,13 @@ public: beast::Journal journal, MPTAuthorizeArgs const& args); + static TER + createMPToken( + ApplyView& view, + MPTID const& mptIssuanceID, + AccountID const& account, + std::uint32_t const flags); + TER doApply() override; }; diff --git a/src/xrpld/app/tx/detail/MPTokenIssuanceDestroy.cpp b/src/xrpld/app/tx/detail/MPTokenIssuanceDestroy.cpp index d06ea3473e..a2e1f33b94 100644 --- a/src/xrpld/app/tx/detail/MPTokenIssuanceDestroy.cpp +++ b/src/xrpld/app/tx/detail/MPTokenIssuanceDestroy.cpp @@ -58,6 +58,9 @@ MPTokenIssuanceDestroy::preclaim(PreclaimContext const& ctx) if ((*sleMPT)[sfOutstandingAmount] != 0) return tecHAS_OBLIGATIONS; + if ((*sleMPT)[~sfLockedAmount].value_or(0) != 0) + return tecHAS_OBLIGATIONS; + return tesSUCCESS; } diff --git a/src/xrpld/app/tx/detail/SetAccount.cpp b/src/xrpld/app/tx/detail/SetAccount.cpp index 0c16182ed8..6e19c4ae86 100644 --- a/src/xrpld/app/tx/detail/SetAccount.cpp +++ b/src/xrpld/app/tx/detail/SetAccount.cpp @@ -650,6 +650,15 @@ SetAccount::doApply() uFlagsOut &= ~lsfDisallowIncomingTrustline; } + // Set or clear flags for disallowing escrow + if (ctx_.view().rules().enabled(featureTokenEscrow)) + { + if (uSetFlag == asfAllowTrustLineLocking) + uFlagsOut |= lsfAllowTrustLineLocking; + else if (uClearFlag == asfAllowTrustLineLocking) + uFlagsOut &= ~lsfAllowTrustLineLocking; + } + // Set flag for clawback if (ctx_.view().rules().enabled(featureClawback) && uSetFlag == asfAllowTrustLineClawback) diff --git a/src/xrpld/ledger/View.h b/src/xrpld/ledger/View.h index 387aedecfc..8c391499b6 100644 --- a/src/xrpld/ledger/View.h +++ b/src/xrpld/ledger/View.h @@ -357,6 +357,13 @@ transferRate(ReadView const& view, AccountID const& issuer); [[nodiscard]] Rate transferRate(ReadView const& view, MPTID const& issuanceID); +/** Returns the transfer fee as Rate based on the type of token + * @param view The ledger view + * @param amount The amount to transfer + */ +[[nodiscard]] Rate +transferRate(ReadView const& view, STAmount const& amount); + /** Returns `true` if the directory is empty @param key The key of the directory */ @@ -667,6 +674,21 @@ rippleCredit( bool bCheckIssuer, beast::Journal j); +TER +rippleLockEscrowMPT( + ApplyView& view, + AccountID const& uGrantorID, + STAmount const& saAmount, + beast::Journal j); + +TER +rippleUnlockEscrowMPT( + ApplyView& view, + AccountID const& uGrantorID, + AccountID const& uGranteeID, + STAmount const& saAmount, + beast::Journal j); + /** Calls static accountSendIOU if saAmount represents Issue. * Calls static accountSendMPT if saAmount represents MPTIssue. */ diff --git a/src/xrpld/ledger/detail/View.cpp b/src/xrpld/ledger/detail/View.cpp index 3978d26e56..d3161dccae 100644 --- a/src/xrpld/ledger/detail/View.cpp +++ b/src/xrpld/ledger/detail/View.cpp @@ -781,6 +781,19 @@ transferRate(ReadView const& view, MPTID const& issuanceID) return parityRate; } +Rate +transferRate(ReadView const& view, STAmount const& amount) +{ + return std::visit( + [&](TIss const& issue) { + if constexpr (std::is_same_v) + return transferRate(view, issue.getIssuer()); + else + return transferRate(view, issue.getMptID()); + }, + amount.asset().value()); +} + bool areCompatible( ReadView const& validLedger, @@ -2723,6 +2736,249 @@ sharesToAssetsWithdraw( return assets; } +TER +rippleLockEscrowMPT( + ApplyView& view, + AccountID const& sender, + STAmount const& amount, + beast::Journal j) +{ + auto const mptIssue = amount.get(); + auto const mptID = keylet::mptIssuance(mptIssue.getMptID()); + auto sleIssuance = view.peek(mptID); + if (!sleIssuance) + { + JLOG(j.error()) << "rippleLockEscrowMPT: MPT issuance not found for " + << mptIssue.getMptID(); + return tecOBJECT_NOT_FOUND; // LCOV_EXCL_LINE + } + + if (amount.getIssuer() == sender) + { + JLOG(j.error()) + << "rippleLockEscrowMPT: sender is the issuer, cannot lock MPTs."; + return tecINTERNAL; // LCOV_EXCL_LINE + } + + // 1. Decrease the MPT Holder MPTAmount + // 2. Increase the MPT Holder EscrowedAmount + { + auto const mptokenID = keylet::mptoken(mptID.key, sender); + auto sle = view.peek(mptokenID); + if (!sle) + { + JLOG(j.error()) + << "rippleLockEscrowMPT: MPToken not found for " << sender; + return tecOBJECT_NOT_FOUND; // LCOV_EXCL_LINE + } + + auto const amt = sle->getFieldU64(sfMPTAmount); + auto const pay = amount.mpt().value(); + + // Underflow check for subtraction + if (!canSubtract(STAmount(mptIssue, amt), STAmount(mptIssue, pay))) + { + JLOG(j.error()) + << "rippleLockEscrowMPT: insufficient MPTAmount for " + << to_string(sender) << ": " << amt << " < " << pay; + return tecINTERNAL; // LCOV_EXCL_LINE + } + + (*sle)[sfMPTAmount] = amt - pay; + + // Overflow check for addition + uint64_t const locked = (*sle)[~sfLockedAmount].value_or(0); + + if (!canAdd(STAmount(mptIssue, locked), STAmount(mptIssue, pay))) + { + JLOG(j.error()) + << "rippleLockEscrowMPT: overflow on locked amount for " + << to_string(sender) << ": " << locked << " + " << pay; + return tecINTERNAL; // LCOV_EXCL_LINE + } + + if (sle->isFieldPresent(sfLockedAmount)) + (*sle)[sfLockedAmount] += pay; + else + sle->setFieldU64(sfLockedAmount, pay); + + view.update(sle); + } + + // 1. Increase the Issuance EscrowedAmount + // 2. DO NOT change the Issuance OutstandingAmount + { + uint64_t const issuanceEscrowed = + (*sleIssuance)[~sfLockedAmount].value_or(0); + auto const pay = amount.mpt().value(); + + // Overflow check for addition + if (!canAdd( + STAmount(mptIssue, issuanceEscrowed), STAmount(mptIssue, pay))) + { + JLOG(j.error()) << "rippleLockEscrowMPT: overflow on issuance " + "locked amount for " + << mptIssue.getMptID() << ": " << issuanceEscrowed + << " + " << pay; + return tecINTERNAL; // LCOV_EXCL_LINE + } + + if (sleIssuance->isFieldPresent(sfLockedAmount)) + (*sleIssuance)[sfLockedAmount] += pay; + else + sleIssuance->setFieldU64(sfLockedAmount, pay); + + view.update(sleIssuance); + } + return tesSUCCESS; +} + +TER +rippleUnlockEscrowMPT( + ApplyView& view, + AccountID const& sender, + AccountID const& receiver, + STAmount const& amount, + beast::Journal j) +{ + auto const issuer = amount.getIssuer(); + auto const mptIssue = amount.get(); + auto const mptID = keylet::mptIssuance(mptIssue.getMptID()); + auto sleIssuance = view.peek(mptID); + if (!sleIssuance) + { + JLOG(j.error()) << "rippleUnlockEscrowMPT: MPT issuance not found for " + << mptIssue.getMptID(); + return tecOBJECT_NOT_FOUND; // LCOV_EXCL_LINE + } + + // Decrease the Issuance EscrowedAmount + { + if (!sleIssuance->isFieldPresent(sfLockedAmount)) + { + JLOG(j.error()) + << "rippleUnlockEscrowMPT: no locked amount in issuance for " + << mptIssue.getMptID(); + return tecINTERNAL; // LCOV_EXCL_LINE + } + + auto const locked = sleIssuance->getFieldU64(sfLockedAmount); + auto const redeem = amount.mpt().value(); + + // Underflow check for subtraction + if (!canSubtract( + STAmount(mptIssue, locked), STAmount(mptIssue, redeem))) + { + JLOG(j.error()) + << "rippleUnlockEscrowMPT: insufficient locked amount for " + << mptIssue.getMptID() << ": " << locked << " < " << redeem; + return tecINTERNAL; // LCOV_EXCL_LINE + } + + auto const newLocked = locked - redeem; + if (newLocked == 0) + sleIssuance->makeFieldAbsent(sfLockedAmount); + else + sleIssuance->setFieldU64(sfLockedAmount, newLocked); + view.update(sleIssuance); + } + + if (issuer != receiver) + { + // Increase the MPT Holder MPTAmount + auto const mptokenID = keylet::mptoken(mptID.key, receiver); + auto sle = view.peek(mptokenID); + if (!sle) + { + JLOG(j.error()) + << "rippleUnlockEscrowMPT: MPToken not found for " << receiver; + return tecOBJECT_NOT_FOUND; // LCOV_EXCL_LINE + } + + auto current = sle->getFieldU64(sfMPTAmount); + auto delta = amount.mpt().value(); + + // Overflow check for addition + if (!canAdd(STAmount(mptIssue, current), STAmount(mptIssue, delta))) + { + JLOG(j.error()) + << "rippleUnlockEscrowMPT: overflow on MPTAmount for " + << to_string(receiver) << ": " << current << " + " << delta; + return tecINTERNAL; // LCOV_EXCL_LINE + } + + (*sle)[sfMPTAmount] += delta; + view.update(sle); + } + else + { + // Decrease the Issuance OutstandingAmount + auto const outstanding = sleIssuance->getFieldU64(sfOutstandingAmount); + auto const redeem = amount.mpt().value(); + + // Underflow check for subtraction + if (!canSubtract( + STAmount(mptIssue, outstanding), STAmount(mptIssue, redeem))) + { + JLOG(j.error()) + << "rippleUnlockEscrowMPT: insufficient outstanding amount for " + << mptIssue.getMptID() << ": " << outstanding << " < " + << redeem; + return tecINTERNAL; // LCOV_EXCL_LINE + } + + sleIssuance->setFieldU64(sfOutstandingAmount, outstanding - redeem); + view.update(sleIssuance); + } + + if (issuer == sender) + { + JLOG(j.error()) << "rippleUnlockEscrowMPT: sender is the issuer, " + "cannot unlock MPTs."; + return tecINTERNAL; // LCOV_EXCL_LINE + } + else + { + // Decrease the MPT Holder EscrowedAmount + auto const mptokenID = keylet::mptoken(mptID.key, sender); + auto sle = view.peek(mptokenID); + if (!sle) + { + JLOG(j.error()) + << "rippleUnlockEscrowMPT: MPToken not found for " << sender; + return tecOBJECT_NOT_FOUND; // LCOV_EXCL_LINE + } + + if (!sle->isFieldPresent(sfLockedAmount)) + { + JLOG(j.error()) + << "rippleUnlockEscrowMPT: no locked amount in MPToken for " + << to_string(sender); + return tecINTERNAL; // LCOV_EXCL_LINE + } + + auto const locked = sle->getFieldU64(sfLockedAmount); + auto const delta = amount.mpt().value(); + + // Underflow check for subtraction + if (!canSubtract(STAmount(mptIssue, locked), STAmount(mptIssue, delta))) + { + JLOG(j.error()) + << "rippleUnlockEscrowMPT: insufficient locked amount for " + << to_string(sender) << ": " << locked << " < " << delta; + return tecINTERNAL; // LCOV_EXCL_LINE + } + + auto const newLocked = locked - delta; + if (newLocked == 0) + sle->makeFieldAbsent(sfLockedAmount); + else + sle->setFieldU64(sfLockedAmount, newLocked); + view.update(sle); + } + return tesSUCCESS; +} + bool after(NetClock::time_point now, std::uint32_t mark) { diff --git a/src/xrpld/rpc/handlers/GatewayBalances.cpp b/src/xrpld/rpc/handlers/GatewayBalances.cpp index e8b95bd75c..ca9e370c81 100644 --- a/src/xrpld/rpc/handlers/GatewayBalances.cpp +++ b/src/xrpld/rpc/handlers/GatewayBalances.cpp @@ -142,11 +142,41 @@ doGatewayBalances(RPC::JsonContext& context) std::map> hotBalances; std::map> assets; std::map> frozenBalances; + std::map locked; // Traverse the cold wallet's trust lines { forEachItem( *ledger, accountID, [&](std::shared_ptr const& sle) { + if (sle->getType() == ltESCROW) + { + auto const& escrow = sle->getFieldAmount(sfAmount); + auto& bal = locked[escrow.getCurrency()]; + if (bal == beast::zero) + { + // This is needed to set the currency code correctly + bal = escrow; + } + else + { + try + { + bal += escrow; + } + catch (std::runtime_error const&) + { + // Presumably the exception was caused by overflow. + // On overflow return the largest valid STAmount. + // Very large sums of STAmount are approximations + // anyway. + bal = STAmount( + bal.issue(), + STAmount::cMaxValue, + STAmount::cMaxOffset); + } + } + } + auto rs = PathFindTrustLine::makeItem(accountID, sle); if (!rs) @@ -246,6 +276,17 @@ doGatewayBalances(RPC::JsonContext& context) populateResult(frozenBalances, jss::frozen_balances); populateResult(assets, jss::assets); + // Add total escrow to the result + if (!locked.empty()) + { + Json::Value j; + for (auto const& [k, v] : locked) + { + j[to_string(k)] = v.getText(); + } + result[jss::locked] = std::move(j); + } + return result; } From 0310c5cbe0b78435056920b6c4b0533642fe18e6 Mon Sep 17 00:00:00 2001 From: Ayaz Salikhov Date: Tue, 3 Jun 2025 18:33:32 +0100 Subject: [PATCH 046/244] fix: Specify transitive_headers when building with Conan 2 (#5462) To be able to consume `rippled` in Conan 2, the recipe should specify transitive_headers for external libraries that are present in the exported header files. This change remains compatibility with Conan 1, where this flag was not present. --- conanfile.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/conanfile.py b/conanfile.py index a42c116ca2..da8a09611d 100644 --- a/conanfile.py +++ b/conanfile.py @@ -1,4 +1,4 @@ -from conan import ConanFile +from conan import ConanFile, __version__ as conan_version from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout import re @@ -24,13 +24,11 @@ class Xrpl(ConanFile): } requires = [ - 'date/3.0.3', 'grpc/1.50.1', 'libarchive/3.7.6', 'nudb/2.0.8', 'openssl/1.1.1v', 'soci/4.0.3', - 'xxhash/0.8.2', 'zlib/1.3.1', ] @@ -99,7 +97,10 @@ class Xrpl(ConanFile): self.options['boost'].visibility = 'global' def requirements(self): - self.requires('boost/1.83.0', force=True) + # Conan 2 requires transitive headers to be specified + transitive_headers_opt = {'transitive_headers': True} if conan_version.split('.')[0] == '2' else {} + self.requires('boost/1.83.0', force=True, **transitive_headers_opt) + self.requires('date/3.0.3', **transitive_headers_opt) self.requires('lz4/1.10.0', force=True) self.requires('protobuf/3.21.9', force=True) self.requires('sqlite3/3.47.0', force=True) @@ -107,6 +108,7 @@ class Xrpl(ConanFile): self.requires('jemalloc/5.3.0') if self.options.rocksdb: self.requires('rocksdb/9.7.3') + self.requires('xxhash/0.8.2', **transitive_headers_opt) exports_sources = ( 'CMakeLists.txt', From 506ae12a8c82677a78a2c612989d71e8529eaeac Mon Sep 17 00:00:00 2001 From: Mark Travis Date: Tue, 3 Jun 2025 14:33:09 -0700 Subject: [PATCH 047/244] Increase network i/o capacity (#5464) The change increases the default network I/O worker thread pool size from 2 to 6. This will improve stability, as worker thread saturation correlates to desyncs, particularly on high-traffic peers, such as hubs. --- src/xrpld/app/main/Application.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/xrpld/app/main/Application.cpp b/src/xrpld/app/main/Application.cpp index 5d495aaf06..7771086239 100644 --- a/src/xrpld/app/main/Application.cpp +++ b/src/xrpld/app/main/Application.cpp @@ -256,8 +256,8 @@ public: if ((cores == 1) || ((config.NODE_SIZE == 0) && (cores == 2))) return 1; - // Otherwise, prefer two threads. - return 2; + // Otherwise, prefer six threads. + return 6; #endif } From a5e953b1910be0cdf69dafbc7eed8171fe3907d8 Mon Sep 17 00:00:00 2001 From: yinyiqian1 Date: Tue, 3 Jun 2025 18:20:29 -0400 Subject: [PATCH 048/244] fix: Add tecNO_DELEGATE_PERMISSION and fix flags (#5465) * Adds `tecNO_DELEGATE_PERMISSION` for unauthorized transactions sent by a delegated account. * Returns `tecNO_TARGET` instead of `terNO_ACCOUNT` for the `DelegateSet` transaction if the delegated account does not exist. * Fixes `tfFullyCanonicalSig` and `tfInnerBatchTxn` blocking transactions issue by adding `tfUniversal` in the permission related masks in `txFlags.h` --- include/xrpl/protocol/TER.h | 1 + include/xrpl/protocol/TxFlags.h | 11 +- src/libxrpl/protocol/TER.cpp | 1 + src/test/app/Delegate_test.cpp | 338 ++++++++++++++---- src/xrpld/app/misc/DelegateUtils.h | 3 +- src/xrpld/app/misc/detail/DelegateUtils.cpp | 4 +- src/xrpld/app/tx/detail/DelegateSet.cpp | 2 +- .../app/tx/detail/MPTokenIssuanceSet.cpp | 8 +- src/xrpld/app/tx/detail/Payment.cpp | 4 +- src/xrpld/app/tx/detail/SetAccount.cpp | 16 +- src/xrpld/app/tx/detail/SetTrust.cpp | 16 +- src/xrpld/app/tx/detail/Transactor.cpp | 2 +- 12 files changed, 294 insertions(+), 112 deletions(-) diff --git a/include/xrpl/protocol/TER.h b/include/xrpl/protocol/TER.h index f71153cddb..9ace6b80f8 100644 --- a/include/xrpl/protocol/TER.h +++ b/include/xrpl/protocol/TER.h @@ -361,6 +361,7 @@ enum TECcodes : TERUnderlyingType { tecLIMIT_EXCEEDED = 195, tecPSEUDO_ACCOUNT = 196, tecPRECISION_LOSS = 197, + tecNO_DELEGATE_PERMISSION = 198, }; //------------------------------------------------------------------------------ diff --git a/include/xrpl/protocol/TxFlags.h b/include/xrpl/protocol/TxFlags.h index 2ce7a6b6a8..2831933afb 100644 --- a/include/xrpl/protocol/TxFlags.h +++ b/include/xrpl/protocol/TxFlags.h @@ -122,13 +122,7 @@ constexpr std::uint32_t tfClearDeepFreeze = 0x00800000; constexpr std::uint32_t tfTrustSetMask = ~(tfUniversal | tfSetfAuth | tfSetNoRipple | tfClearNoRipple | tfSetFreeze | tfClearFreeze | tfSetDeepFreeze | tfClearDeepFreeze); - -// valid flags for granular permission -constexpr std::uint32_t tfTrustSetGranularMask = tfSetfAuth | tfSetFreeze | tfClearFreeze; - -// bits representing supportedGranularMask are set to 0 and the bits -// representing other flags are set to 1 in tfPermissionMask. -constexpr std::uint32_t tfTrustSetPermissionMask = (~tfTrustSetMask) & (~tfTrustSetGranularMask); +constexpr std::uint32_t tfTrustSetPermissionMask = ~(tfUniversal | tfSetfAuth | tfSetFreeze | tfClearFreeze); // EnableAmendment flags: constexpr std::uint32_t tfGotMajority = 0x00010000; @@ -165,8 +159,7 @@ constexpr std::uint32_t const tfMPTokenAuthorizeMask = ~(tfUniversal | tfMPTUna constexpr std::uint32_t const tfMPTLock = 0x00000001; constexpr std::uint32_t const tfMPTUnlock = 0x00000002; constexpr std::uint32_t const tfMPTokenIssuanceSetMask = ~(tfUniversal | tfMPTLock | tfMPTUnlock); -constexpr std::uint32_t const tfMPTokenIssuanceSetGranularMask = tfMPTLock | tfMPTUnlock; -constexpr std::uint32_t const tfMPTokenIssuanceSetPermissionMask = (~tfMPTokenIssuanceSetMask) & (~tfMPTokenIssuanceSetGranularMask); +constexpr std::uint32_t const tfMPTokenIssuanceSetPermissionMask = ~(tfUniversal | tfMPTLock | tfMPTUnlock); // MPTokenIssuanceDestroy flags: constexpr std::uint32_t const tfMPTokenIssuanceDestroyMask = ~tfUniversal; diff --git a/src/libxrpl/protocol/TER.cpp b/src/libxrpl/protocol/TER.cpp index 18bf0e2936..a396949afe 100644 --- a/src/libxrpl/protocol/TER.cpp +++ b/src/libxrpl/protocol/TER.cpp @@ -127,6 +127,7 @@ transResults() MAKE_ERROR(tecLIMIT_EXCEEDED, "Limit exceeded."), MAKE_ERROR(tecPSEUDO_ACCOUNT, "This operation is not allowed against a pseudo-account."), MAKE_ERROR(tecPRECISION_LOSS, "The amounts used by the transaction cannot interact."), + MAKE_ERROR(tecNO_DELEGATE_PERMISSION, "Delegated account lacks permission to perform this transaction."), MAKE_ERROR(tefALREADY, "The exact transaction was already in this ledger."), MAKE_ERROR(tefBAD_ADD_AUTH, "Not authorized to add account."), diff --git a/src/test/app/Delegate_test.cpp b/src/test/app/Delegate_test.cpp index ca173a6993..dc3264d777 100644 --- a/src/test/app/Delegate_test.cpp +++ b/src/test/app/Delegate_test.cpp @@ -209,10 +209,10 @@ class Delegate_test : public beast::unit_test::suite } // when authorizing account which does not exist, should return - // terNO_ACCOUNT + // tecNO_TARGET { env(delegate::set(gw, Account("unknown"), {"Payment"}), - ter(terNO_ACCOUNT)); + ter(tecNO_TARGET)); } // non-delegatable transaction @@ -310,8 +310,9 @@ class Delegate_test : public beast::unit_test::suite { // Fee should be checked before permission check, - // otherwise tecNO_PERMISSION returned when permission check fails - // could cause context reset to pay fee because it is tec error + // otherwise tecNO_DELEGATE_PERMISSION returned when permission + // check fails could cause context reset to pay fee because it is + // tec error auto aliceBalance = env.balance(alice); auto bobBalance = env.balance(bob); auto carolBalance = env.balance(carol); @@ -526,12 +527,12 @@ class Delegate_test : public beast::unit_test::suite // bob does not have permission to create check env(check::create(alice, bob, XRP(10)), delegate::as(bob), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); // carol does not have permission to create check env(check::create(alice, bob, XRP(10)), delegate::as(carol), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); } void @@ -566,7 +567,7 @@ class Delegate_test : public beast::unit_test::suite // delegate ledger object is not created yet env(pay(gw, alice, USD(50)), delegate::as(bob), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); env.require(balance(bob, bobBalance - drops(baseFee))); bobBalance = env.balance(bob, XRP); @@ -579,7 +580,7 @@ class Delegate_test : public beast::unit_test::suite // bob sends a payment transaction on behalf of gw env(pay(gw, alice, USD(50)), delegate::as(bob), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); env.close(); env.require(balance(bob, bobBalance - drops(baseFee))); bobBalance = env.balance(bob, XRP); @@ -596,7 +597,7 @@ class Delegate_test : public beast::unit_test::suite // can not send XRP env(pay(gw, alice, XRP(50)), delegate::as(bob), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); env.close(); env.require(balance(bob, bobBalance - drops(baseFee))); bobBalance = env.balance(bob, XRP); @@ -684,7 +685,7 @@ class Delegate_test : public beast::unit_test::suite // permission env(pay(gw, alice, USD(50)), delegate::as(bob), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); env.close(); env.require(balance(bob, bobBalance - drops(baseFee))); bobBalance = env.balance(bob, XRP); @@ -729,7 +730,7 @@ class Delegate_test : public beast::unit_test::suite // has unfreeze permission env(trust(alice, gw["USD"](50)), delegate::as(bob), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); env.close(); // alice creates trustline by herself @@ -743,38 +744,38 @@ class Delegate_test : public beast::unit_test::suite // unsupported flags env(trust(alice, gw["USD"](50), tfSetNoRipple), delegate::as(bob), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); env(trust(alice, gw["USD"](50), tfClearNoRipple), delegate::as(bob), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); env(trust(gw, gw["USD"](0), alice, tfSetDeepFreeze), delegate::as(bob), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); env(trust(gw, gw["USD"](0), alice, tfClearDeepFreeze), delegate::as(bob), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); env.close(); // supported flags with wrong permission env(trust(gw, gw["USD"](0), alice, tfSetfAuth), delegate::as(bob), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); env(trust(gw, gw["USD"](0), alice, tfSetFreeze), delegate::as(bob), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); env.close(); env(delegate::set(gw, bob, {"TrustlineAuthorize"})); env.close(); env(trust(gw, gw["USD"](0), alice, tfClearFreeze), delegate::as(bob), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); env.close(); // although trustline authorize is granted, bob can not change the // limit number env(trust(gw, gw["USD"](50), alice, tfSetfAuth), delegate::as(bob), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); env.close(); // supported flags with correct permission @@ -795,30 +796,30 @@ class Delegate_test : public beast::unit_test::suite // permission env(trust(gw, gw["USD"](0), alice, tfSetFreeze), delegate::as(bob), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); // cannot update LimitAmount with granular permission, both high and // low account env(trust(alice, gw["USD"](100)), delegate::as(bob), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); env(trust(gw, alice["USD"](100)), delegate::as(bob), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); // can not set QualityIn or QualityOut auto tx = trust(alice, gw["USD"](50)); tx["QualityIn"] = "1000"; - env(tx, delegate::as(bob), ter(tecNO_PERMISSION)); + env(tx, delegate::as(bob), ter(tecNO_DELEGATE_PERMISSION)); auto tx2 = trust(alice, gw["USD"](50)); tx2["QualityOut"] = "1000"; - env(tx2, delegate::as(bob), ter(tecNO_PERMISSION)); + env(tx2, delegate::as(bob), ter(tecNO_DELEGATE_PERMISSION)); auto tx3 = trust(gw, alice["USD"](50)); tx3["QualityIn"] = "1000"; - env(tx3, delegate::as(bob), ter(tecNO_PERMISSION)); + env(tx3, delegate::as(bob), ter(tecNO_DELEGATE_PERMISSION)); auto tx4 = trust(gw, alice["USD"](50)); tx4["QualityOut"] = "1000"; - env(tx4, delegate::as(bob), ter(tecNO_PERMISSION)); + env(tx4, delegate::as(bob), ter(tecNO_DELEGATE_PERMISSION)); // granting TrustSet can make it work env(delegate::set(gw, bob, {"TrustSet"})); @@ -828,7 +829,7 @@ class Delegate_test : public beast::unit_test::suite env(tx5, delegate::as(bob)); auto tx6 = trust(alice, gw["USD"](50)); tx6["QualityOut"] = "1000"; - env(tx6, delegate::as(bob), ter(tecNO_PERMISSION)); + env(tx6, delegate::as(bob), ter(tecNO_DELEGATE_PERMISSION)); env(delegate::set(alice, bob, {"TrustSet"})); env.close(); env(tx6, delegate::as(bob)); @@ -847,14 +848,14 @@ class Delegate_test : public beast::unit_test::suite // bob does not have permission env(trust(alice, gw["USD"](50)), delegate::as(bob), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); env(delegate::set( alice, bob, {"TrustlineUnfreeze", "NFTokenCreateOffer"})); env.close(); // bob still does not have permission env(trust(alice, gw["USD"](50)), delegate::as(bob), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); // add TrustSet permission and some unrelated permission env(delegate::set( @@ -893,6 +894,56 @@ class Delegate_test : public beast::unit_test::suite env(trust(alice, gw["USD"](50), tfClearNoRipple), delegate::as(bob)); } + + // tfFullyCanonicalSig won't block delegated transaction + { + Env env(*this); + Account gw{"gw"}; + Account alice{"alice"}; + Account bob{"bob"}; + env.fund(XRP(10000), gw, alice, bob); + env(fset(gw, asfRequireAuth)); + env.close(); + env(trust(alice, gw["USD"](50))); + env.close(); + + env(delegate::set(gw, bob, {"TrustlineAuthorize"})); + env.close(); + env(trust( + gw, gw["USD"](0), alice, tfSetfAuth | tfFullyCanonicalSig), + delegate::as(bob)); + } + + // tfInnerBatchTxn won't block delegated transaction + { + Env env(*this); + Account gw{"gw"}; + Account alice{"alice"}; + Account bob{"bob"}; + env.fund(XRP(10000), gw, alice, bob); + env(fset(gw, asfRequireAuth)); + env.close(); + env(trust(alice, gw["USD"](50))); + env.close(); + + env(delegate::set( + gw, bob, {"TrustlineAuthorize", "TrustlineFreeze"})); + env.close(); + + auto const seq = env.seq(gw); + auto const batchFee = batch::calcBatchFee(env, 0, 2); + auto jv1 = trust(gw, gw["USD"](0), alice, tfSetfAuth); + jv1[sfDelegate] = bob.human(); + auto jv2 = trust(gw, gw["USD"](0), alice, tfSetFreeze); + jv2[sfDelegate] = bob.human(); + + // batch::inner will set tfInnerBatchTxn, this should not + // block delegated transaction + env(batch::outer(gw, seq, batchFee, tfAllOrNothing), + batch::inner(jv1, seq + 1), + batch::inner(jv2, seq + 2)); + env.close(); + } } void @@ -920,16 +971,15 @@ class Delegate_test : public beast::unit_test::suite // on behalf of alice std::string const domain = "example.com"; auto jt = noop(alice); - jt[sfDomain.fieldName] = strHex(domain); - jt[sfDelegate.fieldName] = bob.human(); - jt[sfFlags.fieldName] = tfFullyCanonicalSig; + jt[sfDomain] = strHex(domain); + jt[sfDelegate] = bob.human(); // add granular permission related to AccountSet but is not the // correct permission for domain set env(delegate::set( alice, bob, {"TrustlineUnfreeze", "AccountEmailHashSet"})); env.close(); - env(jt, ter(tecNO_PERMISSION)); + env(jt, ter(tecNO_DELEGATE_PERMISSION)); // alice give granular permission of AccountDomainSet to bob env(delegate::set(alice, bob, {"AccountDomainSet"})); @@ -940,25 +990,24 @@ class Delegate_test : public beast::unit_test::suite BEAST_EXPECT((*env.le(alice))[sfDomain] == makeSlice(domain)); // bob can reset domain - jt[sfDomain.fieldName] = ""; + jt[sfDomain] = ""; env(jt); BEAST_EXPECT(!env.le(alice)->isFieldPresent(sfDomain)); - // if flag is not equal to tfFullyCanonicalSig, which means bob - // is trying to set the flag at the same time, it will fail + // bob tries to set unauthorized flag, it will fail std::string const failDomain = "fail_domain_update"; - jt[sfFlags.fieldName] = tfRequireAuth; - jt[sfDomain.fieldName] = strHex(failDomain); - env(jt, ter(tecNO_PERMISSION)); + jt[sfFlags] = tfRequireAuth; + jt[sfDomain] = strHex(failDomain); + env(jt, ter(tecNO_DELEGATE_PERMISSION)); // reset flag number - jt[sfFlags.fieldName] = tfFullyCanonicalSig; + jt[sfFlags] = 0; // bob tries to update domain and set email hash, // but he does not have permission to set email hash - jt[sfDomain.fieldName] = strHex(domain); + jt[sfDomain] = strHex(domain); std::string const mh("5F31A79367DC3137FADA860C05742EE6"); - jt[sfEmailHash.fieldName] = mh; - env(jt, ter(tecNO_PERMISSION)); + jt[sfEmailHash] = mh; + env(jt, ter(tecNO_DELEGATE_PERMISSION)); // alice give granular permission of AccountEmailHashSet to bob env(delegate::set( @@ -970,8 +1019,8 @@ class Delegate_test : public beast::unit_test::suite // bob does not have permission to set message key for alice auto const rkp = randomKeyPair(KeyType::ed25519); - jt[sfMessageKey.fieldName] = strHex(rkp.first.slice()); - env(jt, ter(tecNO_PERMISSION)); + jt[sfMessageKey] = strHex(rkp.first.slice()); + env(jt, ter(tecNO_DELEGATE_PERMISSION)); // alice give granular permission of AccountMessageKeySet to bob env(delegate::set( @@ -987,12 +1036,14 @@ class Delegate_test : public beast::unit_test::suite BEAST_EXPECT( strHex((*env.le(alice))[sfMessageKey]) == strHex(rkp.first.slice())); - jt[sfMessageKey.fieldName] = ""; + jt[sfMessageKey] = ""; env(jt); BEAST_EXPECT(!env.le(alice)->isFieldPresent(sfMessageKey)); // bob does not have permission to set transfer rate for alice - env(rate(alice, 2.0), delegate::as(bob), ter(tecNO_PERMISSION)); + env(rate(alice, 2.0), + delegate::as(bob), + ter(tecNO_DELEGATE_PERMISSION)); // alice give granular permission of AccountTransferRateSet to bob env(delegate::set( @@ -1004,14 +1055,13 @@ class Delegate_test : public beast::unit_test::suite "AccountTransferRateSet"})); env.close(); auto jtRate = rate(alice, 2.0); - jtRate[sfDelegate.fieldName] = bob.human(); - jtRate[sfFlags.fieldName] = tfFullyCanonicalSig; + jtRate[sfDelegate] = bob.human(); env(jtRate, delegate::as(bob)); BEAST_EXPECT((*env.le(alice))[sfTransferRate] == 2000000000); // bob does not have permission to set ticksize for alice - jt[sfTickSize.fieldName] = 8; - env(jt, ter(tecNO_PERMISSION)); + jt[sfTickSize] = 8; + env(jt, ter(tecNO_DELEGATE_PERMISSION)); // alice give granular permission of AccountTickSizeSet to bob env(delegate::set( @@ -1029,7 +1079,7 @@ class Delegate_test : public beast::unit_test::suite // can not set asfRequireAuth flag for alice env(fset(alice, asfRequireAuth), delegate::as(bob), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); // reset Delegate will delete the Delegate // object @@ -1038,15 +1088,15 @@ class Delegate_test : public beast::unit_test::suite // alice env(fset(alice, asfRequireAuth), delegate::as(bob), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); // alice can set for herself env(fset(alice, asfRequireAuth)); env.require(flags(alice, asfRequireAuth)); env.close(); // can not update tick size because bob no longer has permission - jt[sfTickSize.fieldName] = 7; - env(jt, ter(tecNO_PERMISSION)); + jt[sfTickSize] = 7; + env(jt, ter(tecNO_DELEGATE_PERMISSION)); env(delegate::set( alice, @@ -1060,12 +1110,11 @@ class Delegate_test : public beast::unit_test::suite std::string const locator = "9633EC8AF54F16B5286DB1D7B519EF49EEFC050C0C8AC4384F1D88ACD1BFDF" "05"; - auto jt2 = noop(alice); - jt2[sfDomain.fieldName] = strHex(domain); - jt2[sfDelegate.fieldName] = bob.human(); - jt2[sfWalletLocator.fieldName] = locator; - jt2[sfFlags.fieldName] = tfFullyCanonicalSig; - env(jt2, ter(tecNO_PERMISSION)); + auto jv2 = noop(alice); + jv2[sfDomain] = strHex(domain); + jv2[sfDelegate] = bob.human(); + jv2[sfWalletLocator] = locator; + env(jv2, ter(tecNO_DELEGATE_PERMISSION)); } // can not set AccountSet flags on behalf of other account @@ -1080,7 +1129,7 @@ class Delegate_test : public beast::unit_test::suite // bob can not set flag on behalf of alice env(fset(alice, flag), delegate::as(bob), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); // alice set by herself env(fset(alice, flag)); env.close(); @@ -1088,7 +1137,7 @@ class Delegate_test : public beast::unit_test::suite // bob can not clear on behalf of alice env(fclear(alice, flag), delegate::as(bob), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); }; // testSetClearFlag(asfNoFreeze); @@ -1117,19 +1166,19 @@ class Delegate_test : public beast::unit_test::suite // bob can not set asfAccountTxnID on behalf of alice env(fset(alice, asfAccountTxnID), delegate::as(bob), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); env(fset(alice, asfAccountTxnID)); env.close(); BEAST_EXPECT(env.le(alice)->isFieldPresent(sfAccountTxnID)); env(fclear(alice, asfAccountTxnID), delegate::as(bob), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); // bob can not set asfAuthorizedNFTokenMinter on behalf of alice Json::Value jt = fset(alice, asfAuthorizedNFTokenMinter); - jt[sfDelegate.fieldName] = bob.human(); - jt[sfNFTokenMinter.fieldName] = bob.human(); - env(jt, ter(tecNO_PERMISSION)); + jt[sfDelegate] = bob.human(); + jt[sfNFTokenMinter] = bob.human(); + env(jt, ter(tecNO_DELEGATE_PERMISSION)); // bob gives alice some permissions env(delegate::set( @@ -1145,14 +1194,14 @@ class Delegate_test : public beast::unit_test::suite // behalf of bob. env(fset(alice, asfNoFreeze), delegate::as(bob), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); env(fset(bob, asfNoFreeze)); env.close(); env.require(flags(bob, asfNoFreeze)); // alice can not clear on behalf of bob env(fclear(alice, asfNoFreeze), delegate::as(bob), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); // bob can not set asfDisableMaster on behalf of alice Account const bobKey{"bobKey", KeyType::secp256k1}; @@ -1161,7 +1210,76 @@ class Delegate_test : public beast::unit_test::suite env(fset(alice, asfDisableMaster), delegate::as(bob), sig(bob), - ter(tecNO_PERMISSION)); + ter(tecNO_DELEGATE_PERMISSION)); + } + + // tfFullyCanonicalSig won't block delegated transaction + { + Env env(*this); + Account alice{"alice"}; + Account bob{"bob"}; + env.fund(XRP(10000), alice, bob); + env.close(); + + env(delegate::set( + alice, bob, {"AccountDomainSet", "AccountEmailHashSet"})); + env.close(); + + std::string const domain = "example.com"; + auto jt = noop(alice); + jt[sfDomain] = strHex(domain); + jt[sfDelegate] = bob.human(); + jt[sfFlags] = tfFullyCanonicalSig; + + env(jt); + BEAST_EXPECT((*env.le(alice))[sfDomain] == makeSlice(domain)); + } + + // tfInnerBatchTxn won't block delegated transaction + { + Env env(*this); + Account alice{"alice"}; + Account bob{"bob"}; + env.fund(XRP(10000), alice, bob); + env.close(); + + env(delegate::set( + alice, bob, {"AccountDomainSet", "AccountEmailHashSet"})); + env.close(); + + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 3); + + auto jv1 = noop(alice); + std::string const domain1 = "example1.com"; + jv1[sfDomain] = strHex(domain1); + jv1[sfDelegate] = bob.human(); + jv1[sfSequence] = seq + 1; + + auto jv2 = noop(alice); + std::string const domain2 = "example2.com"; + jv2[sfDomain] = strHex(domain2); + jv2[sfDelegate] = bob.human(); + jv2[sfSequence] = seq + 2; + + // bob set domain back and add email hash for alice + auto jv3 = noop(alice); + std::string const mh("5F31A79367DC3137FADA860C05742EE6"); + jv3[sfDomain] = strHex(domain1); + jv3[sfEmailHash] = mh; + jv3[sfDelegate] = bob.human(); + jv3[sfSequence] = seq + 3; + + // batch::inner will set tfInnerBatchTxn, this should not + // block delegated transaction + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(jv1, seq + 1), + batch::inner(jv2, seq + 2), + batch::inner(jv3, seq + 3)); + env.close(); + + BEAST_EXPECT((*env.le(alice))[sfDomain] == makeSlice(domain1)); + BEAST_EXPECT(to_string((*env.le(alice))[sfEmailHash]) == mh); } } @@ -1189,7 +1307,7 @@ class Delegate_test : public beast::unit_test::suite {.account = alice, .flags = tfMPTLock, .delegate = bob, - .err = tecNO_PERMISSION}); + .err = tecNO_DELEGATE_PERMISSION}); // alice gives granular permission to bob of MPTokenIssuanceUnlock env(delegate::set(alice, bob, {"MPTokenIssuanceUnlock"})); @@ -1199,7 +1317,7 @@ class Delegate_test : public beast::unit_test::suite {.account = alice, .flags = tfMPTLock, .delegate = bob, - .err = tecNO_PERMISSION}); + .err = tecNO_DELEGATE_PERMISSION}); // bob now has lock permission, but does not have unlock permission env(delegate::set(alice, bob, {"MPTokenIssuanceLock"})); env.close(); @@ -1208,7 +1326,7 @@ class Delegate_test : public beast::unit_test::suite {.account = alice, .flags = tfMPTUnlock, .delegate = bob, - .err = tecNO_PERMISSION}); + .err = tecNO_DELEGATE_PERMISSION}); // now bob can lock and unlock env(delegate::set( @@ -1241,7 +1359,7 @@ class Delegate_test : public beast::unit_test::suite {.account = alice, .flags = tfMPTUnlock, .delegate = bob, - .err = tecNO_PERMISSION}); + .err = tecNO_DELEGATE_PERMISSION}); // alice gives bob some unrelated permission with // MPTokenIssuanceLock @@ -1255,7 +1373,7 @@ class Delegate_test : public beast::unit_test::suite {.account = alice, .flags = tfMPTUnlock, .delegate = bob, - .err = tecNO_PERMISSION}); + .err = tecNO_DELEGATE_PERMISSION}); // alice add MPTokenIssuanceSet to permissions env(delegate::set( @@ -1271,6 +1389,74 @@ class Delegate_test : public beast::unit_test::suite mpt.set({.account = alice, .flags = tfMPTUnlock, .delegate = bob}); mpt.set({.account = alice, .flags = tfMPTLock, .delegate = bob}); } + + // tfFullyCanonicalSig won't block delegated transaction + { + Env env(*this); + Account alice{"alice"}; + Account bob{"bob"}; + env.fund(XRP(100000), alice, bob); + env.close(); + + MPTTester mpt(env, alice, {.fund = false}); + env.close(); + mpt.create({.flags = tfMPTCanLock}); + env.close(); + + // alice gives granular permission to bob of MPTokenIssuanceLock + env(delegate::set(alice, bob, {"MPTokenIssuanceLock"})); + env.close(); + mpt.set( + {.account = alice, + .flags = tfMPTLock | tfFullyCanonicalSig, + .delegate = bob}); + } + + // tfInnerBatchTxn won't block delegated transaction + { + Env env(*this); + Account alice{"alice"}; + Account bob{"bob"}; + env.fund(XRP(100000), alice, bob); + env.close(); + + auto const mptID = makeMptID(env.seq(alice), alice); + MPTTester mpt(env, alice, {.fund = false}); + env.close(); + mpt.create({.flags = tfMPTCanLock}); + env.close(); + + // alice gives granular permission to bob of MPTokenIssuanceLock + env(delegate::set( + alice, bob, {"MPTokenIssuanceLock", "MPTokenIssuanceUnlock"})); + env.close(); + + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 2); + + Json::Value jv1; + jv1[sfTransactionType] = jss::MPTokenIssuanceSet; + jv1[sfAccount] = alice.human(); + jv1[sfDelegate] = bob.human(); + jv1[sfSequence] = seq + 1; + jv1[sfMPTokenIssuanceID] = to_string(mptID); + jv1[sfFlags] = tfMPTLock; + + Json::Value jv2; + jv2[sfTransactionType] = jss::MPTokenIssuanceSet; + jv2[sfAccount] = alice.human(); + jv2[sfDelegate] = bob.human(); + jv2[sfSequence] = seq + 2; + jv2[sfMPTokenIssuanceID] = to_string(mptID); + jv2[sfFlags] = tfMPTUnlock; + + // batch::inner will set tfInnerBatchTxn, this should not + // block delegated transaction + env(batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(jv1, seq + 1), + batch::inner(jv2, seq + 2)); + env.close(); + } } void diff --git a/src/xrpld/app/misc/DelegateUtils.h b/src/xrpld/app/misc/DelegateUtils.h index cad3bed376..8d657e6a09 100644 --- a/src/xrpld/app/misc/DelegateUtils.h +++ b/src/xrpld/app/misc/DelegateUtils.h @@ -31,7 +31,8 @@ namespace ripple { * Check if the delegate account has permission to execute the transaction. * @param delegate The delegate account. * @param tx The transaction that the delegate account intends to execute. - * @return tesSUCCESS if the transaction is allowed, tecNO_PERMISSION if not. + * @return tesSUCCESS if the transaction is allowed, tecNO_DELEGATE_PERMISSION + * if not. */ TER checkTxPermission(std::shared_ptr const& delegate, STTx const& tx); diff --git a/src/xrpld/app/misc/detail/DelegateUtils.cpp b/src/xrpld/app/misc/detail/DelegateUtils.cpp index 7b7021fe9e..229af555ff 100644 --- a/src/xrpld/app/misc/detail/DelegateUtils.cpp +++ b/src/xrpld/app/misc/detail/DelegateUtils.cpp @@ -26,7 +26,7 @@ TER checkTxPermission(std::shared_ptr const& delegate, STTx const& tx) { if (!delegate) - return tecNO_PERMISSION; // LCOV_EXCL_LINE + return tecNO_DELEGATE_PERMISSION; // LCOV_EXCL_LINE auto const permissionArray = delegate->getFieldArray(sfPermissions); auto const txPermission = tx.getTxnType() + 1; @@ -38,7 +38,7 @@ checkTxPermission(std::shared_ptr const& delegate, STTx const& tx) return tesSUCCESS; } - return tecNO_PERMISSION; + return tecNO_DELEGATE_PERMISSION; } void diff --git a/src/xrpld/app/tx/detail/DelegateSet.cpp b/src/xrpld/app/tx/detail/DelegateSet.cpp index d93ed6fa96..34e1c3afd3 100644 --- a/src/xrpld/app/tx/detail/DelegateSet.cpp +++ b/src/xrpld/app/tx/detail/DelegateSet.cpp @@ -63,7 +63,7 @@ DelegateSet::preclaim(PreclaimContext const& ctx) return terNO_ACCOUNT; // LCOV_EXCL_LINE if (!ctx.view.exists(keylet::account(ctx.tx[sfAuthorize]))) - return terNO_ACCOUNT; + return tecNO_TARGET; auto const& permissions = ctx.tx.getFieldArray(sfPermissions); for (auto const& permission : permissions) diff --git a/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp b/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp index 85a1f6cf1a..06ea089526 100644 --- a/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp +++ b/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp @@ -62,7 +62,7 @@ MPTokenIssuanceSet::checkPermission(ReadView const& view, STTx const& tx) auto const sle = view.read(delegateKey); if (!sle) - return tecNO_PERMISSION; + return tecNO_DELEGATE_PERMISSION; if (checkTxPermission(sle, tx) == tesSUCCESS) return tesSUCCESS; @@ -72,18 +72,18 @@ MPTokenIssuanceSet::checkPermission(ReadView const& view, STTx const& tx) // this is added in case more flags will be added for MPTokenIssuanceSet // in the future. Currently unreachable. if (txFlags & tfMPTokenIssuanceSetPermissionMask) - return tecNO_PERMISSION; // LCOV_EXCL_LINE + return tecNO_DELEGATE_PERMISSION; // LCOV_EXCL_LINE std::unordered_set granularPermissions; loadGranularPermission(sle, ttMPTOKEN_ISSUANCE_SET, granularPermissions); if (txFlags & tfMPTLock && !granularPermissions.contains(MPTokenIssuanceLock)) - return tecNO_PERMISSION; + return tecNO_DELEGATE_PERMISSION; if (txFlags & tfMPTUnlock && !granularPermissions.contains(MPTokenIssuanceUnlock)) - return tecNO_PERMISSION; + return tecNO_DELEGATE_PERMISSION; return tesSUCCESS; } diff --git a/src/xrpld/app/tx/detail/Payment.cpp b/src/xrpld/app/tx/detail/Payment.cpp index b597af570a..f36e1bfe3d 100644 --- a/src/xrpld/app/tx/detail/Payment.cpp +++ b/src/xrpld/app/tx/detail/Payment.cpp @@ -255,7 +255,7 @@ Payment::checkPermission(ReadView const& view, STTx const& tx) auto const sle = view.read(delegateKey); if (!sle) - return tecNO_PERMISSION; + return tecNO_DELEGATE_PERMISSION; if (checkTxPermission(sle, tx) == tesSUCCESS) return tesSUCCESS; @@ -274,7 +274,7 @@ Payment::checkPermission(ReadView const& view, STTx const& tx) amountIssue.account == tx[sfDestination]) return tesSUCCESS; - return tecNO_PERMISSION; + return tecNO_DELEGATE_PERMISSION; } TER diff --git a/src/xrpld/app/tx/detail/SetAccount.cpp b/src/xrpld/app/tx/detail/SetAccount.cpp index 6e19c4ae86..ec618981c1 100644 --- a/src/xrpld/app/tx/detail/SetAccount.cpp +++ b/src/xrpld/app/tx/detail/SetAccount.cpp @@ -202,7 +202,7 @@ SetAccount::checkPermission(ReadView const& view, STTx const& tx) auto const sle = view.read(delegateKey); if (!sle) - return tecNO_PERMISSION; + return tecNO_DELEGATE_PERMISSION; std::unordered_set granularPermissions; loadGranularPermission(sle, ttACCOUNT_SET, granularPermissions); @@ -215,31 +215,31 @@ SetAccount::checkPermission(ReadView const& view, STTx const& tx) // update the flag on behalf of another account, it is not // authorized. if (uSetFlag != 0 || uClearFlag != 0 || uTxFlags & tfUniversalMask) - return tecNO_PERMISSION; + return tecNO_DELEGATE_PERMISSION; if (tx.isFieldPresent(sfEmailHash) && !granularPermissions.contains(AccountEmailHashSet)) - return tecNO_PERMISSION; + return tecNO_DELEGATE_PERMISSION; if (tx.isFieldPresent(sfWalletLocator) || tx.isFieldPresent(sfNFTokenMinter)) - return tecNO_PERMISSION; + return tecNO_DELEGATE_PERMISSION; if (tx.isFieldPresent(sfMessageKey) && !granularPermissions.contains(AccountMessageKeySet)) - return tecNO_PERMISSION; + return tecNO_DELEGATE_PERMISSION; if (tx.isFieldPresent(sfDomain) && !granularPermissions.contains(AccountDomainSet)) - return tecNO_PERMISSION; + return tecNO_DELEGATE_PERMISSION; if (tx.isFieldPresent(sfTransferRate) && !granularPermissions.contains(AccountTransferRateSet)) - return tecNO_PERMISSION; + return tecNO_DELEGATE_PERMISSION; if (tx.isFieldPresent(sfTickSize) && !granularPermissions.contains(AccountTickSizeSet)) - return tecNO_PERMISSION; + return tecNO_DELEGATE_PERMISSION; return tesSUCCESS; } diff --git a/src/xrpld/app/tx/detail/SetTrust.cpp b/src/xrpld/app/tx/detail/SetTrust.cpp index 5e83c201fa..d3b39aaf11 100644 --- a/src/xrpld/app/tx/detail/SetTrust.cpp +++ b/src/xrpld/app/tx/detail/SetTrust.cpp @@ -141,7 +141,7 @@ SetTrust::checkPermission(ReadView const& view, STTx const& tx) auto const sle = view.read(delegateKey); if (!sle) - return tecNO_PERMISSION; + return tecNO_DELEGATE_PERMISSION; if (checkTxPermission(sle, tx) == tesSUCCESS) return tesSUCCESS; @@ -152,10 +152,10 @@ SetTrust::checkPermission(ReadView const& view, STTx const& tx) // TrustlineUnfreeze granular permission. Setting other flags returns // error. if (txFlags & tfTrustSetPermissionMask) - return tecNO_PERMISSION; + return tecNO_DELEGATE_PERMISSION; if (tx.isFieldPresent(sfQualityIn) || tx.isFieldPresent(sfQualityOut)) - return tecNO_PERMISSION; + return tecNO_DELEGATE_PERMISSION; auto const saLimitAmount = tx.getFieldAmount(sfLimitAmount); auto const sleRippleState = view.read(keylet::line( @@ -164,19 +164,19 @@ SetTrust::checkPermission(ReadView const& view, STTx const& tx) // if the trustline does not exist, granular permissions are // not allowed to create trustline if (!sleRippleState) - return tecNO_PERMISSION; + return tecNO_DELEGATE_PERMISSION; std::unordered_set granularPermissions; loadGranularPermission(sle, ttTRUST_SET, granularPermissions); if (txFlags & tfSetfAuth && !granularPermissions.contains(TrustlineAuthorize)) - return tecNO_PERMISSION; + return tecNO_DELEGATE_PERMISSION; if (txFlags & tfSetFreeze && !granularPermissions.contains(TrustlineFreeze)) - return tecNO_PERMISSION; + return tecNO_DELEGATE_PERMISSION; if (txFlags & tfClearFreeze && !granularPermissions.contains(TrustlineUnfreeze)) - return tecNO_PERMISSION; + return tecNO_DELEGATE_PERMISSION; // updating LimitAmount is not allowed only with granular permissions, // unless there's a new granular permission for this in the future. @@ -188,7 +188,7 @@ SetTrust::checkPermission(ReadView const& view, STTx const& tx) saLimitAllow.setIssuer(tx[sfAccount]); if (curLimit != saLimitAllow) - return tecNO_PERMISSION; + return tecNO_DELEGATE_PERMISSION; return tesSUCCESS; } diff --git a/src/xrpld/app/tx/detail/Transactor.cpp b/src/xrpld/app/tx/detail/Transactor.cpp index cc82f7c3ca..5f4cba5cf8 100644 --- a/src/xrpld/app/tx/detail/Transactor.cpp +++ b/src/xrpld/app/tx/detail/Transactor.cpp @@ -215,7 +215,7 @@ Transactor::checkPermission(ReadView const& view, STTx const& tx) auto const sle = view.read(delegateKey); if (!sle) - return tecNO_PERMISSION; + return tecNO_DELEGATE_PERMISSION; return checkTxPermission(sle, tx); } From 11edaa441db07d527fe16c300b822239de7d7012 Mon Sep 17 00:00:00 2001 From: Michael Legleux Date: Wed, 4 Jun 2025 10:55:23 -0700 Subject: [PATCH 049/244] Set version to 2.5.0-rc1 (#5472) --- src/libxrpl/protocol/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libxrpl/protocol/BuildInfo.cpp b/src/libxrpl/protocol/BuildInfo.cpp index 1f061cebdc..b9b583046e 100644 --- a/src/libxrpl/protocol/BuildInfo.cpp +++ b/src/libxrpl/protocol/BuildInfo.cpp @@ -36,7 +36,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "2.5.0-b1" +char const* const versionString = "2.5.0-rc1" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From 58c2c82a30322ca3b0fd390620ab41926308da83 Mon Sep 17 00:00:00 2001 From: Denis Angell Date: Thu, 5 Jun 2025 14:54:45 +0200 Subject: [PATCH 050/244] fix: Amendment-guard `TokenEscrow` preclaim and expand tests (#5473) This change amendment-guards the preclaim for `TokenEscrow`, as well as expands tests to increase code coverage. --- src/libxrpl/protocol/STAmount.cpp | 7 +- src/test/app/EscrowToken_test.cpp | 227 +++++++++++++++--- src/test/app/Escrow_test.cpp | 6 + src/test/ledger/Invariants_test.cpp | 147 ++++++++++++ src/xrpld/app/tx/detail/Escrow.cpp | 87 ++++--- src/xrpld/app/tx/detail/InvariantCheck.cpp | 41 ++-- .../app/tx/detail/MPTokenIssuanceDestroy.cpp | 2 +- src/xrpld/ledger/detail/View.cpp | 95 ++++---- 8 files changed, 467 insertions(+), 145 deletions(-) diff --git a/src/libxrpl/protocol/STAmount.cpp b/src/libxrpl/protocol/STAmount.cpp index 845ad6481a..0c72244885 100644 --- a/src/libxrpl/protocol/STAmount.cpp +++ b/src/libxrpl/protocol/STAmount.cpp @@ -581,7 +581,10 @@ canAdd(STAmount const& a, STAmount const& b) return true; } + // LCOV_EXCL_START + UNREACHABLE("STAmount::canAdd : unexpected STAmount type"); return false; + // LCOV_EXCL_STOP } /** @@ -653,8 +656,10 @@ canSubtract(STAmount const& a, STAmount const& b) return false; return true; } - + // LCOV_EXCL_START + UNREACHABLE("STAmount::canSubtract : unexpected STAmount type"); return false; + // LCOV_EXCL_STOP } void diff --git a/src/test/app/EscrowToken_test.cpp b/src/test/app/EscrowToken_test.cpp index da9610f0c3..6ba8c48c93 100644 --- a/src/test/app/EscrowToken_test.cpp +++ b/src/test/app/EscrowToken_test.cpp @@ -21,9 +21,11 @@ #include #include +#include #include #include +#include #include #include @@ -56,27 +58,39 @@ struct EscrowToken_test : public beast::unit_test::suite return 0; } - void - issuerIOUEscrowed( + jtx::PrettyAmount + issuerBalance( jtx::Env& env, jtx::Account const& account, - Currency const& currency, - int const& outstanding, - int const& locked) + Issue const& issue) { Json::Value params; params[jss::account] = account.human(); auto jrr = env.rpc("json", "gateway_balances", to_string(params)); auto const result = jrr[jss::result]; - auto const actualOutstanding = - result[jss::obligations][to_string(currency)]; - BEAST_EXPECT(actualOutstanding == to_string(outstanding)); - if (locked != 0) - { - auto const actualEscrowed = - result[jss::locked][to_string(currency)]; - BEAST_EXPECT(actualEscrowed == to_string(locked)); - } + auto const obligations = + result[jss::obligations][to_string(issue.currency)]; + if (obligations.isNull()) + return {STAmount(issue, 0), account.name()}; + STAmount const amount = amountFromString(issue, obligations.asString()); + return {amount, account.name()}; + } + + jtx::PrettyAmount + issuerEscrowed( + jtx::Env& env, + jtx::Account const& account, + Issue const& issue) + { + Json::Value params; + params[jss::account] = account.human(); + auto jrr = env.rpc("json", "gateway_balances", to_string(params)); + auto const result = jrr[jss::result]; + auto const locked = result[jss::locked][to_string(issue.currency)]; + if (locked.isNull()) + return {STAmount(issue, 0), account.name()}; + STAmount const amount = amountFromString(issue, locked.asString()); + return {amount, account.name()}; } void @@ -136,6 +150,37 @@ struct EscrowToken_test : public beast::unit_test::suite env(escrow::cancel(bob, alice, seq2), finishResult); env.close(); } + + for (bool const withTokenEscrow : {false, true}) + { + auto const amend = + withTokenEscrow ? features : features - featureTokenEscrow; + Env env{*this, amend}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + env.fund(XRP(5000), alice, bob, gw); + env(fset(gw, asfAllowTrustLineLocking)); + env.close(); + env.trust(USD(10'000), alice, bob); + env.close(); + env(pay(gw, alice, USD(5000))); + env(pay(gw, bob, USD(5000))); + env.close(); + + auto const seq1 = env.seq(alice); + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150), + ter(tecNO_TARGET)); + env.close(); + + env(escrow::cancel(bob, alice, seq1), ter(tecNO_TARGET)); + env.close(); + } } void @@ -865,34 +910,76 @@ struct EscrowToken_test : public beast::unit_test::suite env.close(); env.trust(USD(10'000), alice, bob); env.close(); - env(pay(gw, alice, USD(5000))); - env(pay(gw, bob, USD(5000))); + env(pay(gw, alice, USD(5'000))); + env(pay(gw, bob, USD(5'000))); env.close(); + auto const outstandingUSD = USD(10'000); + + // Create & Finish Escrow auto const seq1 = env.seq(alice); - env(escrow::create(alice, bob, USD(1'000)), - escrow::condition(escrow::cb1), - escrow::finish_time(env.now() + 1s), - fee(baseFee * 150), - ter(tesSUCCESS)); - env.close(); - env(escrow::finish(bob, alice, seq1), - escrow::condition(escrow::cb1), - escrow::fulfillment(escrow::fb1), - fee(baseFee * 150), - ter(tesSUCCESS)); - env.close(); + { + auto const preAliceUSD = env.balance(alice, USD); + auto const preBobUSD = env.balance(bob, USD); + env(escrow::create(alice, bob, USD(1'000)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + BEAST_EXPECT(env.balance(alice, USD) == preAliceUSD - USD(1'000)); + BEAST_EXPECT(env.balance(bob, USD) == preBobUSD); + BEAST_EXPECT( + issuerBalance(env, gw, USD) == outstandingUSD - USD(1'000)); + BEAST_EXPECT(issuerEscrowed(env, gw, USD) == USD(1'000)); + } + { + auto const preAliceUSD = env.balance(alice, USD); + auto const preBobUSD = env.balance(bob, USD); + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + BEAST_EXPECT(env.balance(alice, USD) == preAliceUSD); + BEAST_EXPECT(env.balance(bob, USD) == preBobUSD + USD(1'000)); + BEAST_EXPECT(issuerBalance(env, gw, USD) == outstandingUSD); + BEAST_EXPECT(issuerEscrowed(env, gw, USD) == USD(0)); + } + + // Create & Cancel Escrow auto const seq2 = env.seq(alice); - env(escrow::create(alice, bob, USD(1'000)), - escrow::condition(escrow::cb2), - escrow::finish_time(env.now() + 1s), - escrow::cancel_time(env.now() + 2s), - fee(baseFee * 150), - ter(tesSUCCESS)); - env.close(); - env(escrow::cancel(bob, alice, seq2), ter(tesSUCCESS)); - env.close(); + { + auto const preAliceUSD = env.balance(alice, USD); + auto const preBobUSD = env.balance(bob, USD); + env(escrow::create(alice, bob, USD(1'000)), + escrow::condition(escrow::cb2), + escrow::finish_time(env.now() + 1s), + escrow::cancel_time(env.now() + 2s), + fee(baseFee * 150), + ter(tesSUCCESS)); + env.close(); + + BEAST_EXPECT(env.balance(alice, USD) == preAliceUSD - USD(1'000)); + BEAST_EXPECT(env.balance(bob, USD) == preBobUSD); + BEAST_EXPECT( + issuerBalance(env, gw, USD) == outstandingUSD - USD(1'000)); + BEAST_EXPECT(issuerEscrowed(env, gw, USD) == USD(1'000)); + } + { + auto const preAliceUSD = env.balance(alice, USD); + auto const preBobUSD = env.balance(bob, USD); + env(escrow::cancel(bob, alice, seq2), ter(tesSUCCESS)); + env.close(); + + BEAST_EXPECT(env.balance(alice, USD) == preAliceUSD + USD(1'000)); + BEAST_EXPECT(env.balance(bob, USD) == preBobUSD); + BEAST_EXPECT(issuerBalance(env, gw, USD) == outstandingUSD); + BEAST_EXPECT(issuerEscrowed(env, gw, USD) == USD(0)); + } } void @@ -2430,7 +2517,6 @@ struct EscrowToken_test : public beast::unit_test::suite mptGw.authorize({.account = alice}); mptGw.authorize({.account = bob}); auto const MPT = mptGw["MPT"]; - env(pay(gw, alice, MPT(10))); env(pay(gw, bob, MPT(10))); env.close(); @@ -2521,6 +2607,39 @@ struct EscrowToken_test : public beast::unit_test::suite env.close(); } + // tecOBJECT_NOT_FOUND: MPT issuance does not exist + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + env.fund(XRP(10'000), alice, bob); + env.close(); + + auto const seq1 = env.seq(alice); + env.app().openLedger().modify( + [&](OpenView& view, beast::Journal j) { + Sandbox sb(&view, tapNONE); + auto sleNew = + std::make_shared(keylet::escrow(alice, seq1)); + MPTIssue const mpt{ + MPTIssue{makeMptID(1, AccountID(0x4985601))}}; + STAmount amt(mpt, 10); + sleNew->setAccountID(sfDestination, bob); + sleNew->setFieldAmount(sfAmount, amt); + sb.insert(sleNew); + sb.apply(view); + return true; + }); + + env(escrow::finish(bob, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150), + ter(tecOBJECT_NOT_FOUND)); + env.close(); + } + // tecLOCKED: issuer has locked the dest { Env env{*this, features}; @@ -2726,6 +2845,36 @@ struct EscrowToken_test : public beast::unit_test::suite env(escrow::cancel(bob, alice, seq1), ter(tecNO_AUTH)); env.close(); } + + // tecOBJECT_NOT_FOUND: MPT issuance does not exist + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + env.fund(XRP(10'000), alice, bob); + + auto const seq1 = env.seq(alice); + env.app().openLedger().modify( + [&](OpenView& view, beast::Journal j) { + Sandbox sb(&view, tapNONE); + auto sleNew = + std::make_shared(keylet::escrow(alice, seq1)); + MPTIssue const mpt{ + MPTIssue{makeMptID(1, AccountID(0x4985601))}}; + STAmount amt(mpt, 10); + sleNew->setAccountID(sfDestination, bob); + sleNew->setFieldAmount(sfAmount, amt); + sb.insert(sleNew); + sb.apply(view); + return true; + }); + + env(escrow::cancel(bob, alice, seq1), + fee(baseFee), + ter(tecOBJECT_NOT_FOUND)); + env.close(); + } } void @@ -3603,12 +3752,14 @@ struct EscrowToken_test : public beast::unit_test::suite fee(baseFee * 150)); env.close(); + env(pay(alice, gw, MPT(10'000)), ter(tecPATH_PARTIAL)); env(pay(alice, gw, MPT(9'990))); env(pay(bob, gw, MPT(10'000))); BEAST_EXPECT(env.balance(alice, MPT) == MPT(0)); BEAST_EXPECT(mptEscrowed(env, alice, MPT) == 10); BEAST_EXPECT(env.balance(bob, MPT) == MPT(0)); BEAST_EXPECT(mptEscrowed(env, bob, MPT) == 0); + BEAST_EXPECT(env.balance(gw, MPT) == MPT(10)); mptGw.authorize({.account = bob, .flags = tfMPTUnauthorize}); mptGw.destroy( {.id = mptGw.issuanceID(), diff --git a/src/test/app/Escrow_test.cpp b/src/test/app/Escrow_test.cpp index aa86ad338e..21ef70a86e 100644 --- a/src/test/app/Escrow_test.cpp +++ b/src/test/app/Escrow_test.cpp @@ -369,6 +369,12 @@ struct Escrow_test : public beast::unit_test::suite env.fund(XRP(5000), "alice", "bob", "gw"); env.close(); + // temINVALID_FLAG + env(escrow::create("alice", "bob", XRP(1000)), + escrow::finish_time(env.now() + 5s), + txflags(tfPassive), + ter(temINVALID_FLAG)); + // Finish time is in the past env(escrow::create("alice", "bob", XRP(1000)), escrow::finish_time(env.now() - 5s), diff --git a/src/test/ledger/Invariants_test.cpp b/src/test/ledger/Invariants_test.cpp index ebd2235cc9..6178b413d5 100644 --- a/src/test/ledger/Invariants_test.cpp +++ b/src/test/ledger/Invariants_test.cpp @@ -762,6 +762,153 @@ class Invariants_test : public beast::unit_test::suite ac.view().insert(sleNew); return true; }); + + // IOU < 0 + doInvariantCheck( + {{"escrow specifies invalid amount"}}, + [](Account const& A1, Account const&, ApplyContext& ac) { + // escrow with too-little iou + auto const sle = ac.view().peek(keylet::account(A1.id())); + if (!sle) + return false; + auto sleNew = std::make_shared( + keylet::escrow(A1, (*sle)[sfSequence] + 2)); + + Issue const usd{ + Currency(0x5553440000000000), AccountID(0x4985601)}; + STAmount amt(usd, -1); + sleNew->setFieldAmount(sfAmount, amt); + ac.view().insert(sleNew); + return true; + }); + + // IOU bad currency + doInvariantCheck( + {{"escrow specifies invalid amount"}}, + [](Account const& A1, Account const&, ApplyContext& ac) { + // escrow with bad iou currency + auto const sle = ac.view().peek(keylet::account(A1.id())); + if (!sle) + return false; + auto sleNew = std::make_shared( + keylet::escrow(A1, (*sle)[sfSequence] + 2)); + + Issue const bad{badCurrency(), AccountID(0x4985601)}; + STAmount amt(bad, 1); + sleNew->setFieldAmount(sfAmount, amt); + ac.view().insert(sleNew); + return true; + }); + + // MPT < 0 + doInvariantCheck( + {{"escrow specifies invalid amount"}}, + [](Account const& A1, Account const&, ApplyContext& ac) { + // escrow with too-little mpt + auto const sle = ac.view().peek(keylet::account(A1.id())); + if (!sle) + return false; + auto sleNew = std::make_shared( + keylet::escrow(A1, (*sle)[sfSequence] + 2)); + + MPTIssue const mpt{ + MPTIssue{makeMptID(1, AccountID(0x4985601))}}; + STAmount amt(mpt, -1); + sleNew->setFieldAmount(sfAmount, amt); + ac.view().insert(sleNew); + return true; + }); + + // MPT OutstandingAmount < 0 + doInvariantCheck( + {{"escrow specifies invalid amount"}}, + [](Account const& A1, Account const&, ApplyContext& ac) { + // mpissuance outstanding is negative + auto const sle = ac.view().peek(keylet::account(A1.id())); + if (!sle) + return false; + + MPTIssue const mpt{ + MPTIssue{makeMptID(1, AccountID(0x4985601))}}; + auto sleNew = + std::make_shared(keylet::mptIssuance(mpt.getMptID())); + sleNew->setFieldU64(sfOutstandingAmount, -1); + ac.view().insert(sleNew); + return true; + }); + + // MPT LockedAmount < 0 + doInvariantCheck( + {{"escrow specifies invalid amount"}}, + [](Account const& A1, Account const&, ApplyContext& ac) { + // mpissuance locked is less than locked + auto const sle = ac.view().peek(keylet::account(A1.id())); + if (!sle) + return false; + + MPTIssue const mpt{ + MPTIssue{makeMptID(1, AccountID(0x4985601))}}; + auto sleNew = + std::make_shared(keylet::mptIssuance(mpt.getMptID())); + sleNew->setFieldU64(sfLockedAmount, -1); + ac.view().insert(sleNew); + return true; + }); + + // MPT OutstandingAmount < LockedAmount + doInvariantCheck( + {{"escrow specifies invalid amount"}}, + [](Account const& A1, Account const&, ApplyContext& ac) { + // mpissuance outstanding is less than locked + auto const sle = ac.view().peek(keylet::account(A1.id())); + if (!sle) + return false; + + MPTIssue const mpt{ + MPTIssue{makeMptID(1, AccountID(0x4985601))}}; + auto sleNew = + std::make_shared(keylet::mptIssuance(mpt.getMptID())); + sleNew->setFieldU64(sfOutstandingAmount, 1); + sleNew->setFieldU64(sfLockedAmount, 10); + ac.view().insert(sleNew); + return true; + }); + + // MPT MPTAmount < 0 + doInvariantCheck( + {{"escrow specifies invalid amount"}}, + [](Account const& A1, Account const&, ApplyContext& ac) { + // mptoken amount is negative + auto const sle = ac.view().peek(keylet::account(A1.id())); + if (!sle) + return false; + + MPTIssue const mpt{ + MPTIssue{makeMptID(1, AccountID(0x4985601))}}; + auto sleNew = + std::make_shared(keylet::mptoken(mpt.getMptID(), A1)); + sleNew->setFieldU64(sfMPTAmount, -1); + ac.view().insert(sleNew); + return true; + }); + + // MPT LockedAmount < 0 + doInvariantCheck( + {{"escrow specifies invalid amount"}}, + [](Account const& A1, Account const&, ApplyContext& ac) { + // mptoken locked amount is negative + auto const sle = ac.view().peek(keylet::account(A1.id())); + if (!sle) + return false; + + MPTIssue const mpt{ + MPTIssue{makeMptID(1, AccountID(0x4985601))}}; + auto sleNew = + std::make_shared(keylet::mptoken(mpt.getMptID(), A1)); + sleNew->setFieldU64(sfLockedAmount, -1); + ac.view().insert(sleNew); + return true; + }); } void diff --git a/src/xrpld/app/tx/detail/Escrow.cpp b/src/xrpld/app/tx/detail/Escrow.cpp index f7840650f7..75080da9a5 100644 --- a/src/xrpld/app/tx/detail/Escrow.cpp +++ b/src/xrpld/app/tx/detail/Escrow.cpp @@ -595,7 +595,7 @@ EscrowCreate::doApply() }, amount.asset().value()); !isTesSuccess(ret)) - return ret; + return ret; // LCOV_EXCL_LINE } // increment owner count @@ -766,26 +766,26 @@ EscrowFinish::preclaim(PreclaimContext const& ctx) return err; } - auto const k = keylet::escrow(ctx.tx[sfOwner], ctx.tx[sfOfferSequence]); - auto const slep = ctx.view.read(k); - if (!slep) - return tecNO_TARGET; - - AccountID const dest = (*slep)[sfDestination]; - STAmount const amount = (*slep)[sfAmount]; - - if (!isXRP(amount)) + if (ctx.view.rules().enabled(featureTokenEscrow)) { - if (!ctx.view.rules().enabled(featureTokenEscrow)) - return temDISABLED; // LCOV_EXCL_LINE + auto const k = keylet::escrow(ctx.tx[sfOwner], ctx.tx[sfOfferSequence]); + auto const slep = ctx.view.read(k); + if (!slep) + return tecNO_TARGET; - if (auto const ret = std::visit( - [&](T const&) { - return escrowFinishPreclaimHelper(ctx, dest, amount); - }, - amount.asset().value()); - !isTesSuccess(ret)) - return ret; + AccountID const dest = (*slep)[sfDestination]; + STAmount const amount = (*slep)[sfAmount]; + + if (!isXRP(amount)) + { + if (auto const ret = std::visit( + [&](T const&) { + return escrowFinishPreclaimHelper(ctx, dest, amount); + }, + amount.asset().value()); + !isTesSuccess(ret)) + return ret; + } } return tesSUCCESS; } @@ -1015,7 +1015,12 @@ EscrowFinish::doApply() auto const k = keylet::escrow(ctx_.tx[sfOwner], ctx_.tx[sfOfferSequence]); auto const slep = ctx_.view().peek(k); if (!slep) + { + if (ctx_.view().rules().enabled(featureTokenEscrow)) + return tecINTERNAL; // LCOV_EXCL_LINE + return tecNO_TARGET; + } // If a cancel time is present, a finish operation should only succeed prior // to that time. fix1571 corrects a logic error in the check that would make @@ -1245,7 +1250,7 @@ escrowCancelPreclaimHelper( keylet::mptIssuance(amount.get().getMptID()); auto const sleIssuance = ctx.view.read(issuanceKey); if (!sleIssuance) - return tecOBJECT_NOT_FOUND; // LCOV_EXCL_LINE + return tecOBJECT_NOT_FOUND; // If the issuer has requireAuth set, check if the account is // authorized @@ -1261,26 +1266,27 @@ escrowCancelPreclaimHelper( TER EscrowCancel::preclaim(PreclaimContext const& ctx) { - auto const k = keylet::escrow(ctx.tx[sfOwner], ctx.tx[sfOfferSequence]); - auto const slep = ctx.view.read(k); - if (!slep) - return tecNO_TARGET; - - AccountID const account = (*slep)[sfAccount]; - STAmount const amount = (*slep)[sfAmount]; - - if (!isXRP(amount)) + if (ctx.view.rules().enabled(featureTokenEscrow)) { - if (!ctx.view.rules().enabled(featureTokenEscrow)) - return temDISABLED; // LCOV_EXCL_LINE + auto const k = keylet::escrow(ctx.tx[sfOwner], ctx.tx[sfOfferSequence]); + auto const slep = ctx.view.read(k); + if (!slep) + return tecNO_TARGET; - if (auto const ret = std::visit( - [&](T const&) { - return escrowCancelPreclaimHelper(ctx, account, amount); - }, - amount.asset().value()); - !isTesSuccess(ret)) - return ret; + AccountID const account = (*slep)[sfAccount]; + STAmount const amount = (*slep)[sfAmount]; + + if (!isXRP(amount)) + { + if (auto const ret = std::visit( + [&](T const&) { + return escrowCancelPreclaimHelper( + ctx, account, amount); + }, + amount.asset().value()); + !isTesSuccess(ret)) + return ret; + } } return tesSUCCESS; } @@ -1291,7 +1297,12 @@ EscrowCancel::doApply() auto const k = keylet::escrow(ctx_.tx[sfOwner], ctx_.tx[sfOfferSequence]); auto const slep = ctx_.view().peek(k); if (!slep) + { + if (ctx_.view().rules().enabled(featureTokenEscrow)) + return tecINTERNAL; // LCOV_EXCL_LINE + return tecNO_TARGET; + } if (ctx_.view().rules().enabled(fix1571)) { diff --git a/src/xrpld/app/tx/detail/InvariantCheck.cpp b/src/xrpld/app/tx/detail/InvariantCheck.cpp index 31b8fe3cc1..d93378d3cd 100644 --- a/src/xrpld/app/tx/detail/InvariantCheck.cpp +++ b/src/xrpld/app/tx/detail/InvariantCheck.cpp @@ -271,26 +271,6 @@ NoZeroEscrow::visitEntry( std::shared_ptr const& after) { auto isBad = [](STAmount const& amount) { - // IOU case - if (amount.holds()) - { - if (amount <= beast::zero) - return true; - - if (badCurrency() == amount.getCurrency()) - return true; - } - - // MPT case - if (amount.holds()) - { - if (amount <= beast::zero) - return true; - - if (amount.mpt() > MPTAmount{maxMPTokenAmount}) - return true; - } - // XRP case if (amount.native()) { @@ -300,7 +280,28 @@ NoZeroEscrow::visitEntry( if (amount.xrp() >= INITIAL_XRP) return true; } + else + { + // IOU case + if (amount.holds()) + { + if (amount <= beast::zero) + return true; + if (badCurrency() == amount.getCurrency()) + return true; + } + + // MPT case + if (amount.holds()) + { + if (amount <= beast::zero) + return true; + + if (amount.mpt() > MPTAmount{maxMPTokenAmount}) + return true; // LCOV_EXCL_LINE + } + } return false; }; diff --git a/src/xrpld/app/tx/detail/MPTokenIssuanceDestroy.cpp b/src/xrpld/app/tx/detail/MPTokenIssuanceDestroy.cpp index a2e1f33b94..e2b87dbd79 100644 --- a/src/xrpld/app/tx/detail/MPTokenIssuanceDestroy.cpp +++ b/src/xrpld/app/tx/detail/MPTokenIssuanceDestroy.cpp @@ -59,7 +59,7 @@ MPTokenIssuanceDestroy::preclaim(PreclaimContext const& ctx) return tecHAS_OBLIGATIONS; if ((*sleMPT)[~sfLockedAmount].value_or(0) != 0) - return tecHAS_OBLIGATIONS; + return tecHAS_OBLIGATIONS; // LCOV_EXCL_LINE return tesSUCCESS; } diff --git a/src/xrpld/ledger/detail/View.cpp b/src/xrpld/ledger/detail/View.cpp index d3161dccae..cb95819014 100644 --- a/src/xrpld/ledger/detail/View.cpp +++ b/src/xrpld/ledger/detail/View.cpp @@ -2747,18 +2747,18 @@ rippleLockEscrowMPT( auto const mptID = keylet::mptIssuance(mptIssue.getMptID()); auto sleIssuance = view.peek(mptID); if (!sleIssuance) - { + { // LCOV_EXCL_START JLOG(j.error()) << "rippleLockEscrowMPT: MPT issuance not found for " << mptIssue.getMptID(); - return tecOBJECT_NOT_FOUND; // LCOV_EXCL_LINE - } + return tecOBJECT_NOT_FOUND; + } // LCOV_EXCL_STOP if (amount.getIssuer() == sender) - { + { // LCOV_EXCL_START JLOG(j.error()) << "rippleLockEscrowMPT: sender is the issuer, cannot lock MPTs."; - return tecINTERNAL; // LCOV_EXCL_LINE - } + return tecINTERNAL; + } // LCOV_EXCL_STOP // 1. Decrease the MPT Holder MPTAmount // 2. Increase the MPT Holder EscrowedAmount @@ -2766,23 +2766,23 @@ rippleLockEscrowMPT( auto const mptokenID = keylet::mptoken(mptID.key, sender); auto sle = view.peek(mptokenID); if (!sle) - { + { // LCOV_EXCL_START JLOG(j.error()) << "rippleLockEscrowMPT: MPToken not found for " << sender; - return tecOBJECT_NOT_FOUND; // LCOV_EXCL_LINE - } + return tecOBJECT_NOT_FOUND; + } // LCOV_EXCL_STOP auto const amt = sle->getFieldU64(sfMPTAmount); auto const pay = amount.mpt().value(); // Underflow check for subtraction if (!canSubtract(STAmount(mptIssue, amt), STAmount(mptIssue, pay))) - { + { // LCOV_EXCL_START JLOG(j.error()) << "rippleLockEscrowMPT: insufficient MPTAmount for " << to_string(sender) << ": " << amt << " < " << pay; - return tecINTERNAL; // LCOV_EXCL_LINE - } + return tecINTERNAL; + } // LCOV_EXCL_STOP (*sle)[sfMPTAmount] = amt - pay; @@ -2790,12 +2790,12 @@ rippleLockEscrowMPT( uint64_t const locked = (*sle)[~sfLockedAmount].value_or(0); if (!canAdd(STAmount(mptIssue, locked), STAmount(mptIssue, pay))) - { + { // LCOV_EXCL_START JLOG(j.error()) << "rippleLockEscrowMPT: overflow on locked amount for " << to_string(sender) << ": " << locked << " + " << pay; - return tecINTERNAL; // LCOV_EXCL_LINE - } + return tecINTERNAL; + } // LCOV_EXCL_STOP if (sle->isFieldPresent(sfLockedAmount)) (*sle)[sfLockedAmount] += pay; @@ -2815,13 +2815,13 @@ rippleLockEscrowMPT( // Overflow check for addition if (!canAdd( STAmount(mptIssue, issuanceEscrowed), STAmount(mptIssue, pay))) - { + { // LCOV_EXCL_START JLOG(j.error()) << "rippleLockEscrowMPT: overflow on issuance " "locked amount for " << mptIssue.getMptID() << ": " << issuanceEscrowed << " + " << pay; - return tecINTERNAL; // LCOV_EXCL_LINE - } + return tecINTERNAL; + } // LCOV_EXCL_STOP if (sleIssuance->isFieldPresent(sfLockedAmount)) (*sleIssuance)[sfLockedAmount] += pay; @@ -2846,21 +2846,21 @@ rippleUnlockEscrowMPT( auto const mptID = keylet::mptIssuance(mptIssue.getMptID()); auto sleIssuance = view.peek(mptID); if (!sleIssuance) - { + { // LCOV_EXCL_START JLOG(j.error()) << "rippleUnlockEscrowMPT: MPT issuance not found for " << mptIssue.getMptID(); - return tecOBJECT_NOT_FOUND; // LCOV_EXCL_LINE - } + return tecOBJECT_NOT_FOUND; + } // LCOV_EXCL_STOP // Decrease the Issuance EscrowedAmount { if (!sleIssuance->isFieldPresent(sfLockedAmount)) - { + { // LCOV_EXCL_START JLOG(j.error()) << "rippleUnlockEscrowMPT: no locked amount in issuance for " << mptIssue.getMptID(); - return tecINTERNAL; // LCOV_EXCL_LINE - } + return tecINTERNAL; + } // LCOV_EXCL_STOP auto const locked = sleIssuance->getFieldU64(sfLockedAmount); auto const redeem = amount.mpt().value(); @@ -2868,12 +2868,12 @@ rippleUnlockEscrowMPT( // Underflow check for subtraction if (!canSubtract( STAmount(mptIssue, locked), STAmount(mptIssue, redeem))) - { + { // LCOV_EXCL_START JLOG(j.error()) << "rippleUnlockEscrowMPT: insufficient locked amount for " << mptIssue.getMptID() << ": " << locked << " < " << redeem; - return tecINTERNAL; // LCOV_EXCL_LINE - } + return tecINTERNAL; + } // LCOV_EXCL_STOP auto const newLocked = locked - redeem; if (newLocked == 0) @@ -2889,23 +2889,23 @@ rippleUnlockEscrowMPT( auto const mptokenID = keylet::mptoken(mptID.key, receiver); auto sle = view.peek(mptokenID); if (!sle) - { + { // LCOV_EXCL_START JLOG(j.error()) << "rippleUnlockEscrowMPT: MPToken not found for " << receiver; return tecOBJECT_NOT_FOUND; // LCOV_EXCL_LINE - } + } // LCOV_EXCL_STOP auto current = sle->getFieldU64(sfMPTAmount); auto delta = amount.mpt().value(); // Overflow check for addition if (!canAdd(STAmount(mptIssue, current), STAmount(mptIssue, delta))) - { + { // LCOV_EXCL_START JLOG(j.error()) << "rippleUnlockEscrowMPT: overflow on MPTAmount for " << to_string(receiver) << ": " << current << " + " << delta; - return tecINTERNAL; // LCOV_EXCL_LINE - } + return tecINTERNAL; + } // LCOV_EXCL_STOP (*sle)[sfMPTAmount] += delta; view.update(sle); @@ -2919,55 +2919,56 @@ rippleUnlockEscrowMPT( // Underflow check for subtraction if (!canSubtract( STAmount(mptIssue, outstanding), STAmount(mptIssue, redeem))) - { + { // LCOV_EXCL_START JLOG(j.error()) << "rippleUnlockEscrowMPT: insufficient outstanding amount for " << mptIssue.getMptID() << ": " << outstanding << " < " << redeem; - return tecINTERNAL; // LCOV_EXCL_LINE - } + return tecINTERNAL; + } // LCOV_EXCL_STOP sleIssuance->setFieldU64(sfOutstandingAmount, outstanding - redeem); view.update(sleIssuance); } if (issuer == sender) - { + { // LCOV_EXCL_START JLOG(j.error()) << "rippleUnlockEscrowMPT: sender is the issuer, " "cannot unlock MPTs."; - return tecINTERNAL; // LCOV_EXCL_LINE - } + return tecINTERNAL; + } // LCOV_EXCL_STOP else { // Decrease the MPT Holder EscrowedAmount auto const mptokenID = keylet::mptoken(mptID.key, sender); auto sle = view.peek(mptokenID); if (!sle) - { + { // LCOV_EXCL_START JLOG(j.error()) << "rippleUnlockEscrowMPT: MPToken not found for " << sender; - return tecOBJECT_NOT_FOUND; // LCOV_EXCL_LINE - } + return tecOBJECT_NOT_FOUND; + } // LCOV_EXCL_STOP if (!sle->isFieldPresent(sfLockedAmount)) - { + { // LCOV_EXCL_START JLOG(j.error()) << "rippleUnlockEscrowMPT: no locked amount in MPToken for " << to_string(sender); - return tecINTERNAL; // LCOV_EXCL_LINE - } + return tecINTERNAL; + } // LCOV_EXCL_STOP auto const locked = sle->getFieldU64(sfLockedAmount); auto const delta = amount.mpt().value(); // Underflow check for subtraction + // LCOV_EXCL_START if (!canSubtract(STAmount(mptIssue, locked), STAmount(mptIssue, delta))) - { + { // LCOV_EXCL_START JLOG(j.error()) << "rippleUnlockEscrowMPT: insufficient locked amount for " << to_string(sender) << ": " << locked << " < " << delta; - return tecINTERNAL; // LCOV_EXCL_LINE - } + return tecINTERNAL; + } // LCOV_EXCL_STOP auto const newLocked = locked - delta; if (newLocked == 0) From 8bf4a5cbff4392fcca9990d931ba528085b9a22a Mon Sep 17 00:00:00 2001 From: Vlad <129996061+vvysokikh1@users.noreply.github.com> Date: Thu, 5 Jun 2025 14:37:30 +0100 Subject: [PATCH 051/244] chore: Remove external project build cores division (#5475) The CMake statements that make it seem as if the number of cores used to build external project dependencies is halved don't actually do anything. This change removes these statements. --- cmake/RippledSanity.cmake | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/cmake/RippledSanity.cmake b/cmake/RippledSanity.cmake index 3dd5fb782f..28ce854135 100644 --- a/cmake/RippledSanity.cmake +++ b/cmake/RippledSanity.cmake @@ -2,16 +2,6 @@ convenience variables and sanity checks #]===================================================================] -include(ProcessorCount) - -if (NOT ep_procs) - ProcessorCount(ep_procs) - if (ep_procs GREATER 1) - # never use more than half of cores for EP builds - math (EXPR ep_procs "${ep_procs} / 2") - message (STATUS "Using ${ep_procs} cores for ExternalProject builds.") - endif () -endif () get_property(is_multiconfig GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) set (CMAKE_CONFIGURATION_TYPES "Debug;Release" CACHE STRING "" FORCE) From d494bf45b2ba646a7283746f94306b08eaf3f08c Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Fri, 6 Jun 2025 12:01:02 -0400 Subject: [PATCH 052/244] refactor: Collapse some split log messages into one (#5347) Multi-line log messages are hard to work with. Writing these handful of related messages as one message should make the log a tiny bit easier to manage. --- src/xrpld/app/ledger/detail/InboundLedger.cpp | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/xrpld/app/ledger/detail/InboundLedger.cpp b/src/xrpld/app/ledger/detail/InboundLedger.cpp index c1eed3a9f3..eafa939506 100644 --- a/src/xrpld/app/ledger/detail/InboundLedger.cpp +++ b/src/xrpld/app/ledger/detail/InboundLedger.cpp @@ -502,15 +502,17 @@ InboundLedger::trigger(std::shared_ptr const& peer, TriggerReason reason) if (auto stream = journal_.debug()) { - stream << "Trigger acquiring ledger " << hash_; + std::stringstream ss; + ss << "Trigger acquiring ledger " << hash_; if (peer) - stream << " from " << peer; + ss << " from " << peer; if (complete_ || failed_) - stream << "complete=" << complete_ << " failed=" << failed_; + ss << " complete=" << complete_ << " failed=" << failed_; else - stream << "header=" << mHaveHeader << " tx=" << mHaveTransactions - << " as=" << mHaveState; + ss << " header=" << mHaveHeader << " tx=" << mHaveTransactions + << " as=" << mHaveState; + stream << ss.str(); } if (!mHaveHeader) From 35a40a8e6236110bfbaa96d9a891728b4bae8a4e Mon Sep 17 00:00:00 2001 From: Mayukha Vadari Date: Tue, 10 Jun 2025 14:47:27 +0800 Subject: [PATCH 053/244] fix: Improve multi-sign usage of `simulate` (#5479) This change allows users to submit simulate requests from a multi-sign account without needing to specify the accounts that are doing the multi-signing, and fixes an error with simulate that allowed double-"signed" (both single-sign and multi-sign public keys are provided) transactions. --- src/test/rpc/Simulate_test.cpp | 92 ++++++++++++++++++++++++-- src/xrpld/app/tx/detail/Transactor.cpp | 38 +++++++---- 2 files changed, 112 insertions(+), 18 deletions(-) diff --git a/src/test/rpc/Simulate_test.cpp b/src/test/rpc/Simulate_test.cpp index a4360ccc8b..5b3c0d2372 100644 --- a/src/test/rpc/Simulate_test.cpp +++ b/src/test/rpc/Simulate_test.cpp @@ -720,7 +720,11 @@ class Simulate_test : public beast::unit_test::suite Json::Value const& tx) { auto result = resp[jss::result]; checkBasicReturnValidity( - result, tx, env.seq(alice), env.current()->fees().base * 2); + result, + tx, + env.seq(alice), + tx.isMember(jss::Signers) ? env.current()->fees().base * 2 + : env.current()->fees().base); BEAST_EXPECT(result[jss::engine_result] == "tesSUCCESS"); BEAST_EXPECT(result[jss::engine_result_code] == 0); @@ -762,6 +766,10 @@ class Simulate_test : public beast::unit_test::suite tx[jss::Account] = alice.human(); tx[jss::TransactionType] = jss::AccountSet; tx[sfDomain] = newDomain; + + // test with autofill + testTx(env, tx, validateOutput, false); + tx[sfSigners] = Json::arrayValue; { Json::Value signer; @@ -771,7 +779,7 @@ class Simulate_test : public beast::unit_test::suite tx[sfSigners].append(signerOuter); } - // test with autofill + // test with just signer accounts testTx(env, tx, validateOutput, false); tx[sfSigningPubKey] = ""; @@ -780,8 +788,7 @@ class Simulate_test : public beast::unit_test::suite // transaction requires a non-base fee tx[sfFee] = (env.current()->fees().base * 2).jsonClipped().asString(); - tx[sfSigners][0u][sfSigner][jss::SigningPubKey] = - strHex(becky.pk().slice()); + tx[sfSigners][0u][sfSigner][jss::SigningPubKey] = ""; tx[sfSigners][0u][sfSigner][jss::TxnSignature] = ""; // test without autofill @@ -830,11 +837,12 @@ class Simulate_test : public beast::unit_test::suite tx[jss::Account] = env.master.human(); tx[jss::TransactionType] = jss::AccountSet; tx[sfDomain] = newDomain; + // master key is disabled, so this is invalid + tx[jss::SigningPubKey] = strHex(env.master.pk().slice()); // test with autofill testTx(env, tx, testSimulation); - tx[sfSigningPubKey] = ""; tx[sfTxnSignature] = ""; tx[sfSequence] = env.seq(env.master); tx[sfFee] = env.current()->fees().base.jsonClipped().asString(); @@ -844,6 +852,79 @@ class Simulate_test : public beast::unit_test::suite } } + void + testInvalidSingleAndMultiSigningTransaction() + { + testcase( + "Transaction with both single-signing SigningPubKey and " + "multi-signing Signers"); + + using namespace jtx; + Env env(*this); + static auto const newDomain = "123ABC"; + Account const alice("alice"); + Account const becky("becky"); + Account const carol("carol"); + env.fund(XRP(10000), alice); + env.close(); + + // set up valid multisign + env(signers(alice, 1, {{becky, 1}, {carol, 1}})); + env.close(); + + { + std::function const& + testSimulation = [&](Json::Value const& resp, + Json::Value const& tx) { + auto result = resp[jss::result]; + checkBasicReturnValidity( + result, + tx, + env.seq(env.master), + env.current()->fees().base * 2); + + BEAST_EXPECT(result[jss::engine_result] == "temINVALID"); + BEAST_EXPECT(result[jss::engine_result_code] == -277); + BEAST_EXPECT( + result[jss::engine_result_message] == + "The transaction is ill-formed."); + + BEAST_EXPECT( + !result.isMember(jss::meta) && + !result.isMember(jss::meta_blob)); + }; + + Json::Value tx; + + tx[jss::Account] = env.master.human(); + tx[jss::TransactionType] = jss::AccountSet; + tx[sfDomain] = newDomain; + // master key is disabled, so this is invalid + tx[jss::SigningPubKey] = strHex(env.master.pk().slice()); + tx[sfSigners] = Json::arrayValue; + { + Json::Value signer; + signer[jss::Account] = becky.human(); + Json::Value signerOuter; + signerOuter[sfSigner] = signer; + tx[sfSigners].append(signerOuter); + } + + // test with autofill + testTx(env, tx, testSimulation, false); + + tx[sfTxnSignature] = ""; + tx[sfSequence] = env.seq(env.master); + tx[sfFee] = env.current()->fees().base.jsonClipped().asString(); + tx[sfSigners][0u][sfSigner][jss::SigningPubKey] = + strHex(becky.pk().slice()); + tx[sfSigners][0u][sfSigner][jss::TxnSignature] = ""; + + // test without autofill + testTx(env, tx, testSimulation); + } + } + void testMultisignedBadPubKey() { @@ -1117,6 +1198,7 @@ public: testTransactionTecFailure(); testSuccessfulTransactionMultisigned(); testTransactionSigningFailure(); + testInvalidSingleAndMultiSigningTransaction(); testMultisignedBadPubKey(); testDeleteExpiredCredentials(); testSuccessfulTransactionNetworkID(); diff --git a/src/xrpld/app/tx/detail/Transactor.cpp b/src/xrpld/app/tx/detail/Transactor.cpp index 5f4cba5cf8..0db0484842 100644 --- a/src/xrpld/app/tx/detail/Transactor.cpp +++ b/src/xrpld/app/tx/detail/Transactor.cpp @@ -184,6 +184,12 @@ preflight2(PreflightContext const& ctx) return temINVALID; // LCOV_EXCL_LINE } } + + if (!ctx.tx.getSigningPubKey().empty()) + { + // trying to single-sign _and_ multi-sign a transaction + return temINVALID; + } return tesSUCCESS; } @@ -297,9 +303,9 @@ Transactor::checkFee(PreclaimContext const& ctx, XRPAmount baseFee) if (balance < feePaid) { - JLOG(ctx.j.trace()) << "Insufficient balance:" - << " balance=" << to_string(balance) - << " paid=" << to_string(feePaid); + JLOG(ctx.j.trace()) + << "Insufficient balance:" << " balance=" << to_string(balance) + << " paid=" << to_string(feePaid); if ((balance > beast::zero) && !ctx.view.open()) { @@ -571,13 +577,13 @@ Transactor::apply() NotTEC Transactor::checkSign(PreclaimContext const& ctx) { + auto const pkSigner = ctx.tx.getSigningPubKey(); // Ignore signature check on batch inner transactions if (ctx.tx.isFlag(tfInnerBatchTxn) && ctx.view.rules().enabled(featureBatch)) { // Defensive Check: These values are also checked in Batch::preflight - if (ctx.tx.isFieldPresent(sfTxnSignature) || - !ctx.tx.getSigningPubKey().empty() || + if (ctx.tx.isFieldPresent(sfTxnSignature) || !pkSigner.empty() || ctx.tx.isFieldPresent(sfSigners)) { return temINVALID_FLAG; // LCOV_EXCL_LINE @@ -585,25 +591,30 @@ Transactor::checkSign(PreclaimContext const& ctx) return tesSUCCESS; } + if ((ctx.flags & tapDRY_RUN) && pkSigner.empty() && + !ctx.tx.isFieldPresent(sfSigners)) + { + // simulate: skip signature validation when neither SigningPubKey nor + // Signers are provided + return tesSUCCESS; + } + auto const idAccount = ctx.tx[~sfDelegate].value_or(ctx.tx[sfAccount]); // If the pk is empty and not simulate or simulate and signers, // then we must be multi-signing. - if ((ctx.flags & tapDRY_RUN && ctx.tx.isFieldPresent(sfSigners)) || - (!(ctx.flags & tapDRY_RUN) && ctx.tx.getSigningPubKey().empty())) + if (ctx.tx.isFieldPresent(sfSigners)) { STArray const& txSigners(ctx.tx.getFieldArray(sfSigners)); return checkMultiSign(ctx.view, idAccount, txSigners, ctx.flags, ctx.j); } // Check Single Sign - auto const pkSigner = ctx.tx.getSigningPubKey(); - // This ternary is only needed to handle `simulate` XRPL_ASSERT( - (ctx.flags & tapDRY_RUN) || !pkSigner.empty(), + !pkSigner.empty(), "ripple::Transactor::checkSingleSign : non-empty signer or simulation"); - if (!(ctx.flags & tapDRY_RUN) && !publicKeyType(makeSlice(pkSigner))) + if (!publicKeyType(makeSlice(pkSigner))) { JLOG(ctx.j.trace()) << "checkSingleSign: signing public key type is unknown"; @@ -798,14 +809,15 @@ Transactor::checkMultiSign( // public key. auto const spk = txSigner.getFieldVL(sfSigningPubKey); - if (!(flags & tapDRY_RUN) && !publicKeyType(makeSlice(spk))) + // spk being non-empty in non-simulate is checked in + // STTx::checkMultiSign + if (!spk.empty() && !publicKeyType(makeSlice(spk))) { JLOG(j.trace()) << "checkMultiSign: signing public key type is unknown"; return tefBAD_SIGNATURE; } - // This ternary is only needed to handle `simulate` XRPL_ASSERT( (flags & tapDRY_RUN) || !spk.empty(), "ripple::Transactor::checkMultiSign : non-empty signer or " From ea17abb92a261da2cbf2bbcdbd6231a29f1fa9db Mon Sep 17 00:00:00 2001 From: yinyiqian1 Date: Wed, 11 Jun 2025 01:21:24 -0400 Subject: [PATCH 054/244] fix: Ensure delegate tests do not silently fail with batch (#5476) The tests that ensure `tfInnerBatchTxn` won't block delegated transactions silently fail in `Delegate_test.cpp`. This change removes these cases from that file and adds them to `Batch_test.cpp` instead where they do not silently fail, because there the batch delegate results are explicitly checked. Moving them to that file further avoids refactoring many helper functions. --- src/test/app/Batch_test.cpp | 141 +++++++++++++++++++++++++++++++++ src/test/app/Delegate_test.cpp | 124 ----------------------------- 2 files changed, 141 insertions(+), 124 deletions(-) diff --git a/src/test/app/Batch_test.cpp b/src/test/app/Batch_test.cpp index 6874a42c9e..0866bca2ef 100644 --- a/src/test/app/Batch_test.cpp +++ b/src/test/app/Batch_test.cpp @@ -3765,6 +3765,8 @@ class Batch_test : public beast::unit_test::suite } // delegated non atomic inner (AccountSet) + // this also makes sure tfInnerBatchTxn won't block delegated AccountSet + // with granular permission { test::jtx::Env env{*this, envconfig()}; @@ -3810,6 +3812,145 @@ class Batch_test : public beast::unit_test::suite BEAST_EXPECT(env.balance(alice) == preAlice - XRP(2) - batchFee); BEAST_EXPECT(env.balance(bob) == preBob + XRP(2)); } + + // delegated non atomic inner (MPTokenIssuanceSet) + // this also makes sure tfInnerBatchTxn won't block delegated + // MPTokenIssuanceSet with granular permission + { + test::jtx::Env env{*this, envconfig()}; + Account alice{"alice"}; + Account bob{"bob"}; + env.fund(XRP(100000), alice, bob); + env.close(); + + auto const mptID = makeMptID(env.seq(alice), alice); + MPTTester mpt(env, alice, {.fund = false}); + env.close(); + mpt.create({.flags = tfMPTCanLock}); + env.close(); + + // alice gives granular permission to bob of MPTokenIssuanceLock + env(delegate::set( + alice, bob, {"MPTokenIssuanceLock", "MPTokenIssuanceUnlock"})); + env.close(); + + auto const seq = env.seq(alice); + auto const batchFee = batch::calcBatchFee(env, 0, 2); + + Json::Value jv1; + jv1[sfTransactionType] = jss::MPTokenIssuanceSet; + jv1[sfAccount] = alice.human(); + jv1[sfDelegate] = bob.human(); + jv1[sfSequence] = seq + 1; + jv1[sfMPTokenIssuanceID] = to_string(mptID); + jv1[sfFlags] = tfMPTLock; + + Json::Value jv2; + jv2[sfTransactionType] = jss::MPTokenIssuanceSet; + jv2[sfAccount] = alice.human(); + jv2[sfDelegate] = bob.human(); + jv2[sfSequence] = seq + 2; + jv2[sfMPTokenIssuanceID] = to_string(mptID); + jv2[sfFlags] = tfMPTUnlock; + + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(jv1, seq + 1), + batch::inner(jv2, seq + 2)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "MPTokenIssuanceSet", "tesSUCCESS", txIDs[0], batchID}, + {2, "MPTokenIssuanceSet", "tesSUCCESS", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + } + + // delegated non atomic inner (TrustSet) + // this also makes sure tfInnerBatchTxn won't block delegated TrustSet + // with granular permission + { + test::jtx::Env env{*this, envconfig()}; + Account gw{"gw"}; + Account alice{"alice"}; + Account bob{"bob"}; + env.fund(XRP(10000), gw, alice, bob); + env(fset(gw, asfRequireAuth)); + env.close(); + env(trust(alice, gw["USD"](50))); + env.close(); + + env(delegate::set( + gw, bob, {"TrustlineAuthorize", "TrustlineFreeze"})); + env.close(); + + auto const seq = env.seq(gw); + auto const batchFee = batch::calcBatchFee(env, 0, 2); + + auto jv1 = trust(gw, gw["USD"](0), alice, tfSetfAuth); + jv1[sfDelegate] = bob.human(); + auto jv2 = trust(gw, gw["USD"](0), alice, tfSetFreeze); + jv2[sfDelegate] = bob.human(); + + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(gw, seq, batchFee, tfAllOrNothing), + batch::inner(jv1, seq + 1), + batch::inner(jv2, seq + 2)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "TrustSet", "tesSUCCESS", txIDs[0], batchID}, + {2, "TrustSet", "tesSUCCESS", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + } + + // inner transaction not authorized by the delegating account. + { + test::jtx::Env env{*this, envconfig()}; + Account gw{"gw"}; + Account alice{"alice"}; + Account bob{"bob"}; + env.fund(XRP(10000), gw, alice, bob); + env(fset(gw, asfRequireAuth)); + env.close(); + env(trust(alice, gw["USD"](50))); + env.close(); + + env(delegate::set( + gw, bob, {"TrustlineAuthorize", "TrustlineFreeze"})); + env.close(); + + auto const seq = env.seq(gw); + auto const batchFee = batch::calcBatchFee(env, 0, 2); + + auto jv1 = trust(gw, gw["USD"](0), alice, tfSetFreeze); + jv1[sfDelegate] = bob.human(); + auto jv2 = trust(gw, gw["USD"](0), alice, tfClearFreeze); + jv2[sfDelegate] = bob.human(); + + auto const [txIDs, batchID] = submitBatch( + env, + tesSUCCESS, + batch::outer(gw, seq, batchFee, tfIndependent), + batch::inner(jv1, seq + 1), + // tecNO_DELEGATE_PERMISSION: not authorized to clear freeze + batch::inner(jv2, seq + 2)); + env.close(); + + std::vector testCases = { + {0, "Batch", "tesSUCCESS", batchID, std::nullopt}, + {1, "TrustSet", "tesSUCCESS", txIDs[0], batchID}, + {2, "TrustSet", "tecNO_DELEGATE_PERMISSION", txIDs[1], batchID}, + }; + validateClosedLedger(env, testCases); + } } void diff --git a/src/test/app/Delegate_test.cpp b/src/test/app/Delegate_test.cpp index dc3264d777..ca13e4f4cd 100644 --- a/src/test/app/Delegate_test.cpp +++ b/src/test/app/Delegate_test.cpp @@ -913,37 +913,6 @@ class Delegate_test : public beast::unit_test::suite gw, gw["USD"](0), alice, tfSetfAuth | tfFullyCanonicalSig), delegate::as(bob)); } - - // tfInnerBatchTxn won't block delegated transaction - { - Env env(*this); - Account gw{"gw"}; - Account alice{"alice"}; - Account bob{"bob"}; - env.fund(XRP(10000), gw, alice, bob); - env(fset(gw, asfRequireAuth)); - env.close(); - env(trust(alice, gw["USD"](50))); - env.close(); - - env(delegate::set( - gw, bob, {"TrustlineAuthorize", "TrustlineFreeze"})); - env.close(); - - auto const seq = env.seq(gw); - auto const batchFee = batch::calcBatchFee(env, 0, 2); - auto jv1 = trust(gw, gw["USD"](0), alice, tfSetfAuth); - jv1[sfDelegate] = bob.human(); - auto jv2 = trust(gw, gw["USD"](0), alice, tfSetFreeze); - jv2[sfDelegate] = bob.human(); - - // batch::inner will set tfInnerBatchTxn, this should not - // block delegated transaction - env(batch::outer(gw, seq, batchFee, tfAllOrNothing), - batch::inner(jv1, seq + 1), - batch::inner(jv2, seq + 2)); - env.close(); - } } void @@ -1234,53 +1203,6 @@ class Delegate_test : public beast::unit_test::suite env(jt); BEAST_EXPECT((*env.le(alice))[sfDomain] == makeSlice(domain)); } - - // tfInnerBatchTxn won't block delegated transaction - { - Env env(*this); - Account alice{"alice"}; - Account bob{"bob"}; - env.fund(XRP(10000), alice, bob); - env.close(); - - env(delegate::set( - alice, bob, {"AccountDomainSet", "AccountEmailHashSet"})); - env.close(); - - auto const seq = env.seq(alice); - auto const batchFee = batch::calcBatchFee(env, 0, 3); - - auto jv1 = noop(alice); - std::string const domain1 = "example1.com"; - jv1[sfDomain] = strHex(domain1); - jv1[sfDelegate] = bob.human(); - jv1[sfSequence] = seq + 1; - - auto jv2 = noop(alice); - std::string const domain2 = "example2.com"; - jv2[sfDomain] = strHex(domain2); - jv2[sfDelegate] = bob.human(); - jv2[sfSequence] = seq + 2; - - // bob set domain back and add email hash for alice - auto jv3 = noop(alice); - std::string const mh("5F31A79367DC3137FADA860C05742EE6"); - jv3[sfDomain] = strHex(domain1); - jv3[sfEmailHash] = mh; - jv3[sfDelegate] = bob.human(); - jv3[sfSequence] = seq + 3; - - // batch::inner will set tfInnerBatchTxn, this should not - // block delegated transaction - env(batch::outer(alice, seq, batchFee, tfAllOrNothing), - batch::inner(jv1, seq + 1), - batch::inner(jv2, seq + 2), - batch::inner(jv3, seq + 3)); - env.close(); - - BEAST_EXPECT((*env.le(alice))[sfDomain] == makeSlice(domain1)); - BEAST_EXPECT(to_string((*env.le(alice))[sfEmailHash]) == mh); - } } void @@ -1411,52 +1333,6 @@ class Delegate_test : public beast::unit_test::suite .flags = tfMPTLock | tfFullyCanonicalSig, .delegate = bob}); } - - // tfInnerBatchTxn won't block delegated transaction - { - Env env(*this); - Account alice{"alice"}; - Account bob{"bob"}; - env.fund(XRP(100000), alice, bob); - env.close(); - - auto const mptID = makeMptID(env.seq(alice), alice); - MPTTester mpt(env, alice, {.fund = false}); - env.close(); - mpt.create({.flags = tfMPTCanLock}); - env.close(); - - // alice gives granular permission to bob of MPTokenIssuanceLock - env(delegate::set( - alice, bob, {"MPTokenIssuanceLock", "MPTokenIssuanceUnlock"})); - env.close(); - - auto const seq = env.seq(alice); - auto const batchFee = batch::calcBatchFee(env, 0, 2); - - Json::Value jv1; - jv1[sfTransactionType] = jss::MPTokenIssuanceSet; - jv1[sfAccount] = alice.human(); - jv1[sfDelegate] = bob.human(); - jv1[sfSequence] = seq + 1; - jv1[sfMPTokenIssuanceID] = to_string(mptID); - jv1[sfFlags] = tfMPTLock; - - Json::Value jv2; - jv2[sfTransactionType] = jss::MPTokenIssuanceSet; - jv2[sfAccount] = alice.human(); - jv2[sfDelegate] = bob.human(); - jv2[sfSequence] = seq + 2; - jv2[sfMPTokenIssuanceID] = to_string(mptID); - jv2[sfFlags] = tfMPTUnlock; - - // batch::inner will set tfInnerBatchTxn, this should not - // block delegated transaction - env(batch::outer(alice, seq, batchFee, tfAllOrNothing), - batch::inner(jv1, seq + 1), - batch::inner(jv2, seq + 2)); - env.close(); - } } void From edb4f0342c65bd739fee60b74566f3e771134c6c Mon Sep 17 00:00:00 2001 From: Michael Legleux Date: Wed, 11 Jun 2025 17:10:45 -0700 Subject: [PATCH 055/244] Set version to 2.5.0-rc2 --- src/libxrpl/protocol/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libxrpl/protocol/BuildInfo.cpp b/src/libxrpl/protocol/BuildInfo.cpp index b9b583046e..24485d1c16 100644 --- a/src/libxrpl/protocol/BuildInfo.cpp +++ b/src/libxrpl/protocol/BuildInfo.cpp @@ -36,7 +36,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "2.5.0-rc1" +char const* const versionString = "2.5.0-rc2" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From 8f2f5310e216d0ca6a068132730338f79686a709 Mon Sep 17 00:00:00 2001 From: Denis Angell Date: Thu, 19 Jun 2025 04:46:45 +0700 Subject: [PATCH 056/244] Fix: Improve error handling in Batch RPC response (#5503) --- src/libxrpl/protocol/STTx.cpp | 6 + src/test/app/Batch_test.cpp | 178 +++++++++++++++++++++++++++++- src/xrpld/app/misc/NetworkOPs.cpp | 2 +- src/xrpld/app/tx/detail/Batch.cpp | 58 ++++++---- 4 files changed, 222 insertions(+), 22 deletions(-) diff --git a/src/libxrpl/protocol/STTx.cpp b/src/libxrpl/protocol/STTx.cpp index ee26dd69de..615012dba4 100644 --- a/src/libxrpl/protocol/STTx.cpp +++ b/src/libxrpl/protocol/STTx.cpp @@ -760,6 +760,12 @@ isRawTransactionOkay(STObject const& st, std::string& reason) { TxType const tt = safe_cast(raw.getFieldU16(sfTransactionType)); + if (tt == ttBATCH) + { + reason = "Raw Transactions may not contain batch transactions."; + return false; + } + raw.applyTemplate(getTxFormat(tt)->getSOTemplate()); } catch (std::exception const& e) diff --git a/src/test/app/Batch_test.cpp b/src/test/app/Batch_test.cpp index 0866bca2ef..6ce95c56d0 100644 --- a/src/test/app/Batch_test.cpp +++ b/src/test/app/Batch_test.cpp @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -317,7 +318,8 @@ class Batch_test : public beast::unit_test::suite env.close(); } - // temINVALID: Batch: batch cannot have inner batch txn. + // DEFENSIVE: temINVALID: Batch: batch cannot have inner batch txn. + // ACTUAL: telENV_RPC_FAILED: isRawTransactionOkay() { auto const seq = env.seq(alice); auto const batchFee = batch::calcBatchFee(env, 0, 2); @@ -325,7 +327,7 @@ class Batch_test : public beast::unit_test::suite batch::inner( batch::outer(alice, seq, batchFee, tfAllOrNothing), seq), batch::inner(pay(alice, bob, XRP(1)), seq + 2), - ter(temINVALID)); + ter(telENV_RPC_FAILED)); env.close(); } @@ -3953,6 +3955,176 @@ class Batch_test : public beast::unit_test::suite } } + void + testValidateRPCResponse(FeatureBitset features) + { + // Verifying that the RPC response from submit includes + // the account_sequence_available, account_sequence_next, + // open_ledger_cost and validated_ledger_index fields. + testcase("Validate RPC response"); + + using namespace jtx; + Env env(*this); + Account const alice("alice"); + Account const bob("bob"); + env.fund(XRP(10000), alice, bob); + env.close(); + + // tes + { + auto const baseFee = env.current()->fees().base; + auto const aliceSeq = env.seq(alice); + auto jtx = env.jt(pay(alice, bob, XRP(1))); + + Serializer s; + jtx.stx->add(s); + auto const jr = env.rpc("submit", strHex(s.slice()))[jss::result]; + env.close(); + + BEAST_EXPECT(jr.isMember(jss::account_sequence_available)); + BEAST_EXPECT( + jr[jss::account_sequence_available].asUInt() == aliceSeq + 1); + BEAST_EXPECT(jr.isMember(jss::account_sequence_next)); + BEAST_EXPECT( + jr[jss::account_sequence_next].asUInt() == aliceSeq + 1); + BEAST_EXPECT(jr.isMember(jss::open_ledger_cost)); + BEAST_EXPECT(jr[jss::open_ledger_cost] == to_string(baseFee)); + BEAST_EXPECT(jr.isMember(jss::validated_ledger_index)); + } + + // tec failure + { + auto const baseFee = env.current()->fees().base; + auto const aliceSeq = env.seq(alice); + env(fset(bob, asfRequireDest)); + auto jtx = env.jt(pay(alice, bob, XRP(1)), seq(aliceSeq)); + + Serializer s; + jtx.stx->add(s); + auto const jr = env.rpc("submit", strHex(s.slice()))[jss::result]; + env.close(); + + BEAST_EXPECT(jr.isMember(jss::account_sequence_available)); + BEAST_EXPECT( + jr[jss::account_sequence_available].asUInt() == aliceSeq + 1); + BEAST_EXPECT(jr.isMember(jss::account_sequence_next)); + BEAST_EXPECT( + jr[jss::account_sequence_next].asUInt() == aliceSeq + 1); + BEAST_EXPECT(jr.isMember(jss::open_ledger_cost)); + BEAST_EXPECT(jr[jss::open_ledger_cost] == to_string(baseFee)); + BEAST_EXPECT(jr.isMember(jss::validated_ledger_index)); + } + + // tem failure + { + auto const baseFee = env.current()->fees().base; + auto const aliceSeq = env.seq(alice); + auto jtx = env.jt(pay(alice, bob, XRP(1)), seq(aliceSeq + 1)); + + Serializer s; + jtx.stx->add(s); + auto const jr = env.rpc("submit", strHex(s.slice()))[jss::result]; + env.close(); + + BEAST_EXPECT(jr.isMember(jss::account_sequence_available)); + BEAST_EXPECT( + jr[jss::account_sequence_available].asUInt() == aliceSeq); + BEAST_EXPECT(jr.isMember(jss::account_sequence_next)); + BEAST_EXPECT(jr[jss::account_sequence_next].asUInt() == aliceSeq); + BEAST_EXPECT(jr.isMember(jss::open_ledger_cost)); + BEAST_EXPECT(jr[jss::open_ledger_cost] == to_string(baseFee)); + BEAST_EXPECT(jr.isMember(jss::validated_ledger_index)); + } + } + + void + testBatchCalculateBaseFee(FeatureBitset features) + { + using namespace jtx; + Env env(*this); + Account const alice("alice"); + Account const bob("bob"); + Account const carol("carol"); + env.fund(XRP(10000), alice, bob, carol); + env.close(); + + auto getBaseFee = [&](JTx const& jtx) -> XRPAmount { + Serializer s; + jtx.stx->add(s); + return Batch::calculateBaseFee(*env.current(), *jtx.stx); + }; + + // bad: Inner Batch transaction found + { + auto const seq = env.seq(alice); + XRPAmount const batchFee = batch::calcBatchFee(env, 0, 2); + auto jtx = env.jt( + batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner( + batch::outer(alice, seq, batchFee, tfAllOrNothing), seq), + batch::inner(pay(alice, bob, XRP(1)), seq + 2)); + XRPAmount const txBaseFee = getBaseFee(jtx); + BEAST_EXPECT(txBaseFee == XRPAmount(INITIAL_XRP)); + } + + // bad: Raw Transactions array exceeds max entries. + { + auto const seq = env.seq(alice); + XRPAmount const batchFee = batch::calcBatchFee(env, 0, 2); + + auto jtx = env.jt( + batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + batch::inner(pay(alice, bob, XRP(1)), seq + 2), + batch::inner(pay(alice, bob, XRP(1)), seq + 3), + batch::inner(pay(alice, bob, XRP(1)), seq + 4), + batch::inner(pay(alice, bob, XRP(1)), seq + 5), + batch::inner(pay(alice, bob, XRP(1)), seq + 6), + batch::inner(pay(alice, bob, XRP(1)), seq + 7), + batch::inner(pay(alice, bob, XRP(1)), seq + 8), + batch::inner(pay(alice, bob, XRP(1)), seq + 9)); + + XRPAmount const txBaseFee = getBaseFee(jtx); + BEAST_EXPECT(txBaseFee == XRPAmount(INITIAL_XRP)); + } + + // bad: Signers array exceeds max entries. + { + auto const seq = env.seq(alice); + XRPAmount const batchFee = batch::calcBatchFee(env, 0, 2); + + auto jtx = env.jt( + batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(10)), seq + 1), + batch::inner(pay(alice, bob, XRP(5)), seq + 2), + batch::sig( + bob, + carol, + alice, + bob, + carol, + alice, + bob, + carol, + alice, + alice)); + XRPAmount const txBaseFee = getBaseFee(jtx); + BEAST_EXPECT(txBaseFee == XRPAmount(INITIAL_XRP)); + } + + // good: + { + auto const seq = env.seq(alice); + XRPAmount const batchFee = batch::calcBatchFee(env, 0, 2); + auto jtx = env.jt( + batch::outer(alice, seq, batchFee, tfAllOrNothing), + batch::inner(pay(alice, bob, XRP(1)), seq + 1), + batch::inner(pay(bob, alice, XRP(2)), seq + 2)); + XRPAmount const txBaseFee = getBaseFee(jtx); + BEAST_EXPECT(txBaseFee == batchFee); + } + } + void testWithFeats(FeatureBitset features) { @@ -3983,6 +4155,8 @@ class Batch_test : public beast::unit_test::suite testBatchTxQueue(features); testBatchNetworkOps(features); testBatchDelegate(features); + testValidateRPCResponse(features); + testBatchCalculateBaseFee(features); } public: diff --git a/src/xrpld/app/misc/NetworkOPs.cpp b/src/xrpld/app/misc/NetworkOPs.cpp index c8197b2219..1b1bea3ad9 100644 --- a/src/xrpld/app/misc/NetworkOPs.cpp +++ b/src/xrpld/app/misc/NetworkOPs.cpp @@ -1701,7 +1701,7 @@ NetworkOPsImp::apply(std::unique_lock& batchLock) } } - if (!isTemMalformed(e.result) && validatedLedgerIndex) + if (validatedLedgerIndex) { auto [fee, accountSeq, availableSeq] = app_.getTxQ().getTxRequiredFeeAndSeq( diff --git a/src/xrpld/app/tx/detail/Batch.cpp b/src/xrpld/app/tx/detail/Batch.cpp index dcac889a5a..40991ea99a 100644 --- a/src/xrpld/app/tx/detail/Batch.cpp +++ b/src/xrpld/app/tx/detail/Batch.cpp @@ -61,7 +61,10 @@ Batch::calculateBaseFee(ReadView const& view, STTx const& tx) // LCOV_EXCL_START if (baseFee > maxAmount - view.fees().base) - throw std::overflow_error("XRPAmount overflow"); + { + JLOG(debugLog().error()) << "BatchTrace: Base fee overflow detected."; + return XRPAmount{INITIAL_XRP}; + } // LCOV_EXCL_STOP XRPAmount const batchBase = view.fees().base + baseFee; @@ -72,32 +75,36 @@ Batch::calculateBaseFee(ReadView const& view, STTx const& tx) { auto const& txns = tx.getFieldArray(sfRawTransactions); - XRPL_ASSERT( - txns.size() <= maxBatchTxCount, - "Raw Transactions array exceeds max entries."); - // LCOV_EXCL_START if (txns.size() > maxBatchTxCount) - throw std::length_error( - "Raw Transactions array exceeds max entries"); + { + JLOG(debugLog().error()) + << "BatchTrace: Raw Transactions array exceeds max entries."; + return XRPAmount{INITIAL_XRP}; + } // LCOV_EXCL_STOP for (STObject txn : txns) { STTx const stx = STTx{std::move(txn)}; - XRPL_ASSERT( - stx.getTxnType() != ttBATCH, "Inner Batch transaction found."); - // LCOV_EXCL_START if (stx.getTxnType() == ttBATCH) - throw std::invalid_argument("Inner Batch transaction found"); + { + JLOG(debugLog().error()) + << "BatchTrace: Inner Batch transaction found."; + return XRPAmount{INITIAL_XRP}; + } // LCOV_EXCL_STOP auto const fee = ripple::calculateBaseFee(view, stx); // LCOV_EXCL_START if (txnFees > maxAmount - fee) - throw std::overflow_error("XRPAmount overflow"); + { + JLOG(debugLog().error()) + << "BatchTrace: XRPAmount overflow in txnFees calculation."; + return XRPAmount{INITIAL_XRP}; + } // LCOV_EXCL_STOP txnFees += fee; } @@ -108,13 +115,14 @@ Batch::calculateBaseFee(ReadView const& view, STTx const& tx) if (tx.isFieldPresent(sfBatchSigners)) { auto const& signers = tx.getFieldArray(sfBatchSigners); - XRPL_ASSERT( - signers.size() <= maxBatchTxCount, - "Batch Signers array exceeds max entries."); // LCOV_EXCL_START if (signers.size() > maxBatchTxCount) - throw std::length_error("Batch Signers array exceeds max entries"); + { + JLOG(debugLog().error()) + << "BatchTrace: Batch Signers array exceeds max entries."; + return XRPAmount{INITIAL_XRP}; + } // LCOV_EXCL_STOP for (STObject const& signer : signers) @@ -128,16 +136,28 @@ Batch::calculateBaseFee(ReadView const& view, STTx const& tx) // LCOV_EXCL_START if (signerCount > 0 && view.fees().base > maxAmount / signerCount) - throw std::overflow_error("XRPAmount overflow"); + { + JLOG(debugLog().error()) + << "BatchTrace: XRPAmount overflow in signerCount calculation."; + return XRPAmount{INITIAL_XRP}; + } // LCOV_EXCL_STOP XRPAmount signerFees = signerCount * view.fees().base; // LCOV_EXCL_START if (signerFees > maxAmount - txnFees) - throw std::overflow_error("XRPAmount overflow"); + { + JLOG(debugLog().error()) + << "BatchTrace: XRPAmount overflow in signerFees calculation."; + return XRPAmount{INITIAL_XRP}; + } if (txnFees + signerFees > maxAmount - batchBase) - throw std::overflow_error("XRPAmount overflow"); + { + JLOG(debugLog().error()) + << "BatchTrace: XRPAmount overflow in total fee calculation."; + return XRPAmount{INITIAL_XRP}; + } // LCOV_EXCL_STOP // 10 drops per batch signature + sum of inner tx fees + batchBase From 8b3dcd41f7ad64e166e2f2eb3f9c4641319ed848 Mon Sep 17 00:00:00 2001 From: Valentin Balaschenko <13349202+vlntb@users.noreply.github.com> Date: Fri, 20 Jun 2025 16:44:42 +0100 Subject: [PATCH 057/244] refactor: Change getNodeFat Missing Node State Tree error into warning (#5455) --- src/xrpld/overlay/detail/PeerImp.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/xrpld/overlay/detail/PeerImp.cpp b/src/xrpld/overlay/detail/PeerImp.cpp index 68894fb234..1238833d0d 100644 --- a/src/xrpld/overlay/detail/PeerImp.cpp +++ b/src/xrpld/overlay/detail/PeerImp.cpp @@ -3440,7 +3440,7 @@ PeerImp::processLedgerRequest(std::shared_ptr const& m) if (!m->has_ledgerhash()) info += ", no hash specified"; - JLOG(p_journal_.error()) + JLOG(p_journal_.warn()) << "processLedgerRequest: getNodeFat with nodeId " << *shaMapNodeId << " and ledger info type " << info << " throws exception: " << e.what(); From fc0984d2863c3fdde124cc5be3575a6a8b0cdb44 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Fri, 20 Jun 2025 12:24:34 -0400 Subject: [PATCH 058/244] Require a message on "Application::signalStop" (#5255) This change adds a message parameter to Application::signalStop for extra context. --- src/test/jtx/impl/Env.cpp | 2 +- src/xrpld/app/main/Application.cpp | 8 ++++---- src/xrpld/app/main/Application.h | 2 +- src/xrpld/rpc/handlers/Stop.cpp | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/test/jtx/impl/Env.cpp b/src/test/jtx/impl/Env.cpp index 58d26da26e..7c17687eee 100644 --- a/src/test/jtx/impl/Env.cpp +++ b/src/test/jtx/impl/Env.cpp @@ -96,7 +96,7 @@ Env::AppBundle::~AppBundle() if (app) { app->getJobQueue().rendezvous(); - app->signalStop(); + app->signalStop("~AppBundle"); } if (thread.joinable()) thread.join(); diff --git a/src/xrpld/app/main/Application.cpp b/src/xrpld/app/main/Application.cpp index 7771086239..ea0b794116 100644 --- a/src/xrpld/app/main/Application.cpp +++ b/src/xrpld/app/main/Application.cpp @@ -285,7 +285,7 @@ public: config_->CONFIG_DIR), *this, logs_->journal("PerfLog"), - [this] { signalStop(); })) + [this] { signalStop("PerfLog"); })) , m_txMaster(*this) @@ -505,7 +505,7 @@ public: void run() override; void - signalStop(std::string msg = "") override; + signalStop(std::string msg) override; bool checkSigs() const override; void @@ -977,7 +977,7 @@ public: if (!config_->standalone() && !getRelationalDatabase().transactionDbHasSpace(*config_)) { - signalStop(); + signalStop("Out of transaction DB space"); } // VFALCO NOTE Does the order of calls matter? @@ -1193,7 +1193,7 @@ ApplicationImp::setup(boost::program_options::variables_map const& cmdline) JLOG(m_journal.info()) << "Received signal " << signum; if (signum == SIGTERM || signum == SIGINT) - signalStop(); + signalStop("Signal: " + to_string(signum)); }); auto debug_log = config_->getDebugLogFile(); diff --git a/src/xrpld/app/main/Application.h b/src/xrpld/app/main/Application.h index f3cff35d4b..36477cb75c 100644 --- a/src/xrpld/app/main/Application.h +++ b/src/xrpld/app/main/Application.h @@ -141,7 +141,7 @@ public: virtual void run() = 0; virtual void - signalStop(std::string msg = "") = 0; + signalStop(std::string msg) = 0; virtual bool checkSigs() const = 0; virtual void diff --git a/src/xrpld/rpc/handlers/Stop.cpp b/src/xrpld/rpc/handlers/Stop.cpp index 03e73fb6b7..95da27dc62 100644 --- a/src/xrpld/rpc/handlers/Stop.cpp +++ b/src/xrpld/rpc/handlers/Stop.cpp @@ -31,7 +31,7 @@ struct JsonContext; Json::Value doStop(RPC::JsonContext& context) { - context.app.signalStop(); + context.app.signalStop("RPC"); return RPC::makeObjectValue(systemName() + " server stopping"); } From e2fa5c1b7cfff1c6c1617ef3bc3e95cb65e0352d Mon Sep 17 00:00:00 2001 From: Alex Kremer Date: Fri, 20 Jun 2025 18:02:16 +0100 Subject: [PATCH 059/244] chore: Change libXRPL check conan remote to dev (#5482) This change aligns the Conan remote used by the libXRPL Clio compatibility check workflow with the recent changes applied to Clio. --- .github/workflows/libxrpl.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/libxrpl.yml b/.github/workflows/libxrpl.yml index 36ccad5c96..79cd872210 100644 --- a/.github/workflows/libxrpl.yml +++ b/.github/workflows/libxrpl.yml @@ -1,6 +1,6 @@ name: Check libXRPL compatibility with Clio env: - CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod + CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/dev CONAN_LOGIN_USERNAME_RIPPLE: ${{ secrets.CONAN_USERNAME }} CONAN_PASSWORD_RIPPLE: ${{ secrets.CONAN_TOKEN }} on: From 1e01cd34f7a216092ed779f291b43324c167167a Mon Sep 17 00:00:00 2001 From: Michael Legleux Date: Tue, 17 Jun 2025 10:38:07 -0700 Subject: [PATCH 060/244] Set version to 2.5.0 --- src/libxrpl/protocol/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libxrpl/protocol/BuildInfo.cpp b/src/libxrpl/protocol/BuildInfo.cpp index 24485d1c16..4cb6fbfd36 100644 --- a/src/libxrpl/protocol/BuildInfo.cpp +++ b/src/libxrpl/protocol/BuildInfo.cpp @@ -36,7 +36,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "2.5.0-rc2" +char const* const versionString = "2.5.0" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From c55ea56c5ef405bbadaa123ae02c7f69e9a8cdbd Mon Sep 17 00:00:00 2001 From: tequ Date: Tue, 24 Jun 2025 22:02:22 +0900 Subject: [PATCH 061/244] Add nftoken_id, nftoken_ids, offer_id to meta for transaction stream (#5230) --- .../xrpl/protocol/NFTSyntheticSerializer.h | 3 + .../protocol/NFTSyntheticSerializer.cpp | 2 + src/test/rpc/Subscribe_test.cpp | 221 ++++++++++++++++++ src/xrpld/app/misc/NetworkOPs.cpp | 2 + src/xrpld/rpc/handlers/AccountTx.cpp | 2 +- src/xrpld/rpc/handlers/Tx.cpp | 2 +- 6 files changed, 230 insertions(+), 2 deletions(-) diff --git a/include/xrpl/protocol/NFTSyntheticSerializer.h b/include/xrpl/protocol/NFTSyntheticSerializer.h index e57b3ff71c..cb33744485 100644 --- a/include/xrpl/protocol/NFTSyntheticSerializer.h +++ b/include/xrpl/protocol/NFTSyntheticSerializer.h @@ -28,6 +28,8 @@ namespace ripple { +namespace RPC { + /** Adds common synthetic fields to transaction-related JSON responses @@ -40,6 +42,7 @@ insertNFTSyntheticInJson( TxMeta const&); /** @} */ +} // namespace RPC } // namespace ripple #endif diff --git a/src/libxrpl/protocol/NFTSyntheticSerializer.cpp b/src/libxrpl/protocol/NFTSyntheticSerializer.cpp index 0c0a657512..64fa9319de 100644 --- a/src/libxrpl/protocol/NFTSyntheticSerializer.cpp +++ b/src/libxrpl/protocol/NFTSyntheticSerializer.cpp @@ -28,6 +28,7 @@ #include namespace ripple { +namespace RPC { void insertNFTSyntheticInJson( @@ -39,4 +40,5 @@ insertNFTSyntheticInJson( insertNFTokenOfferID(response[jss::meta], transaction, transactionMeta); } +} // namespace RPC } // namespace ripple diff --git a/src/test/rpc/Subscribe_test.cpp b/src/test/rpc/Subscribe_test.cpp index 32296c5d0a..e414b60f93 100644 --- a/src/test/rpc/Subscribe_test.cpp +++ b/src/test/rpc/Subscribe_test.cpp @@ -1354,6 +1354,225 @@ public: })); } + void + testNFToken(FeatureBitset features) + { + // `nftoken_id` is added for `transaction` stream in the `subscribe` + // response for NFTokenMint and NFTokenAcceptOffer. + // + // `nftoken_ids` is added for `transaction` stream in the `subscribe` + // response for NFTokenCancelOffer + // + // `offer_id` is added for `transaction` stream in the `subscribe` + // response for NFTokenCreateOffer + // + // The values of these fields are dependent on the NFTokenID/OfferID + // changed in its corresponding transaction. We want to validate each + // response to make sure the synethic fields hold the right values. + + testcase("Test synthetic fields from Subscribe response"); + + using namespace test::jtx; + using namespace std::chrono_literals; + + Account const alice{"alice"}; + Account const bob{"bob"}; + Account const broker{"broker"}; + + Env env{*this, features}; + env.fund(XRP(10000), alice, bob, broker); + env.close(); + + auto wsc = test::makeWSClient(env.app().config()); + Json::Value stream; + stream[jss::streams] = Json::arrayValue; + stream[jss::streams].append("transactions"); + auto jv = wsc->invoke("subscribe", stream); + + // Verify `nftoken_id` value equals to the NFTokenID that was + // changed in the most recent NFTokenMint or NFTokenAcceptOffer + // transaction + auto verifyNFTokenID = [&](uint256 const& actualNftID) { + BEAST_EXPECT(wsc->findMsg(5s, [&](auto const& jv) { + uint256 nftID; + BEAST_EXPECT( + nftID.parseHex(jv[jss::meta][jss::nftoken_id].asString())); + return nftID == actualNftID; + })); + }; + + // Verify `nftoken_ids` value equals to the NFTokenIDs that were + // changed in the most recent NFTokenCancelOffer transaction + auto verifyNFTokenIDsInCancelOffer = + [&](std::vector actualNftIDs) { + BEAST_EXPECT(wsc->findMsg(5s, [&](auto const& jv) { + std::vector metaIDs; + std::transform( + jv[jss::meta][jss::nftoken_ids].begin(), + jv[jss::meta][jss::nftoken_ids].end(), + std::back_inserter(metaIDs), + [this](Json::Value id) { + uint256 nftID; + BEAST_EXPECT(nftID.parseHex(id.asString())); + return nftID; + }); + // Sort both array to prepare for comparison + std::sort(metaIDs.begin(), metaIDs.end()); + std::sort(actualNftIDs.begin(), actualNftIDs.end()); + + // Make sure the expect number of NFTs is correct + BEAST_EXPECT(metaIDs.size() == actualNftIDs.size()); + + // Check the value of NFT ID in the meta with the + // actual values + for (size_t i = 0; i < metaIDs.size(); ++i) + BEAST_EXPECT(metaIDs[i] == actualNftIDs[i]); + return true; + })); + }; + + // Verify `offer_id` value equals to the offerID that was + // changed in the most recent NFTokenCreateOffer tx + auto verifyNFTokenOfferID = [&](uint256 const& offerID) { + BEAST_EXPECT(wsc->findMsg(5s, [&](auto const& jv) { + uint256 metaOfferID; + BEAST_EXPECT(metaOfferID.parseHex( + jv[jss::meta][jss::offer_id].asString())); + return metaOfferID == offerID; + })); + }; + + // Check new fields in tx meta when for all NFTtransactions + { + // Alice mints 2 NFTs + // Verify the NFTokenIDs are correct in the NFTokenMint tx meta + uint256 const nftId1{ + token::getNextID(env, alice, 0u, tfTransferable)}; + env(token::mint(alice, 0u), txflags(tfTransferable)); + env.close(); + verifyNFTokenID(nftId1); + + uint256 const nftId2{ + token::getNextID(env, alice, 0u, tfTransferable)}; + env(token::mint(alice, 0u), txflags(tfTransferable)); + env.close(); + verifyNFTokenID(nftId2); + + // Alice creates one sell offer for each NFT + // Verify the offer indexes are correct in the NFTokenCreateOffer tx + // meta + uint256 const aliceOfferIndex1 = + keylet::nftoffer(alice, env.seq(alice)).key; + env(token::createOffer(alice, nftId1, drops(1)), + txflags(tfSellNFToken)); + env.close(); + verifyNFTokenOfferID(aliceOfferIndex1); + + uint256 const aliceOfferIndex2 = + keylet::nftoffer(alice, env.seq(alice)).key; + env(token::createOffer(alice, nftId2, drops(1)), + txflags(tfSellNFToken)); + env.close(); + verifyNFTokenOfferID(aliceOfferIndex2); + + // Alice cancels two offers she created + // Verify the NFTokenIDs are correct in the NFTokenCancelOffer tx + // meta + env(token::cancelOffer( + alice, {aliceOfferIndex1, aliceOfferIndex2})); + env.close(); + verifyNFTokenIDsInCancelOffer({nftId1, nftId2}); + + // Bobs creates a buy offer for nftId1 + // Verify the offer id is correct in the NFTokenCreateOffer tx meta + auto const bobBuyOfferIndex = + keylet::nftoffer(bob, env.seq(bob)).key; + env(token::createOffer(bob, nftId1, drops(1)), token::owner(alice)); + env.close(); + verifyNFTokenOfferID(bobBuyOfferIndex); + + // Alice accepts bob's buy offer + // Verify the NFTokenID is correct in the NFTokenAcceptOffer tx meta + env(token::acceptBuyOffer(alice, bobBuyOfferIndex)); + env.close(); + verifyNFTokenID(nftId1); + } + + // Check `nftoken_ids` in brokered mode + { + // Alice mints a NFT + uint256 const nftId{ + token::getNextID(env, alice, 0u, tfTransferable)}; + env(token::mint(alice, 0u), txflags(tfTransferable)); + env.close(); + verifyNFTokenID(nftId); + + // Alice creates sell offer and set broker as destination + uint256 const offerAliceToBroker = + keylet::nftoffer(alice, env.seq(alice)).key; + env(token::createOffer(alice, nftId, drops(1)), + token::destination(broker), + txflags(tfSellNFToken)); + env.close(); + verifyNFTokenOfferID(offerAliceToBroker); + + // Bob creates buy offer + uint256 const offerBobToBroker = + keylet::nftoffer(bob, env.seq(bob)).key; + env(token::createOffer(bob, nftId, drops(1)), token::owner(alice)); + env.close(); + verifyNFTokenOfferID(offerBobToBroker); + + // Check NFTokenID meta for NFTokenAcceptOffer in brokered mode + env(token::brokerOffers( + broker, offerBobToBroker, offerAliceToBroker)); + env.close(); + verifyNFTokenID(nftId); + } + + // Check if there are no duplicate nft id in Cancel transactions where + // multiple offers are cancelled for the same NFT + { + // Alice mints a NFT + uint256 const nftId{ + token::getNextID(env, alice, 0u, tfTransferable)}; + env(token::mint(alice, 0u), txflags(tfTransferable)); + env.close(); + verifyNFTokenID(nftId); + + // Alice creates 2 sell offers for the same NFT + uint256 const aliceOfferIndex1 = + keylet::nftoffer(alice, env.seq(alice)).key; + env(token::createOffer(alice, nftId, drops(1)), + txflags(tfSellNFToken)); + env.close(); + verifyNFTokenOfferID(aliceOfferIndex1); + + uint256 const aliceOfferIndex2 = + keylet::nftoffer(alice, env.seq(alice)).key; + env(token::createOffer(alice, nftId, drops(1)), + txflags(tfSellNFToken)); + env.close(); + verifyNFTokenOfferID(aliceOfferIndex2); + + // Make sure the metadata only has 1 nft id, since both offers are + // for the same nft + env(token::cancelOffer( + alice, {aliceOfferIndex1, aliceOfferIndex2})); + env.close(); + verifyNFTokenIDsInCancelOffer({nftId}); + } + + if (features[featureNFTokenMintOffer]) + { + uint256 const aliceMintWithOfferIndex1 = + keylet::nftoffer(alice, env.seq(alice)).key; + env(token::mint(alice), token::amount(XRP(0))); + env.close(); + verifyNFTokenOfferID(aliceMintWithOfferIndex1); + } + } + void run() override { @@ -1373,6 +1592,8 @@ public: testSubByUrl(); testHistoryTxStream(); testSubBookChanges(); + testNFToken(all); + testNFToken(all - featureNFTokenMintOffer); } }; diff --git a/src/xrpld/app/misc/NetworkOPs.cpp b/src/xrpld/app/misc/NetworkOPs.cpp index 1b1bea3ad9..a7ddbe912c 100644 --- a/src/xrpld/app/misc/NetworkOPs.cpp +++ b/src/xrpld/app/misc/NetworkOPs.cpp @@ -63,6 +63,7 @@ #include #include #include +#include #include #include #include @@ -3258,6 +3259,7 @@ NetworkOPsImp::transJson( jvObj[jss::meta] = meta->get().getJson(JsonOptions::none); RPC::insertDeliveredAmount( jvObj[jss::meta], *ledger, transaction, meta->get()); + RPC::insertNFTSyntheticInJson(jvObj, transaction, meta->get()); RPC::insertMPTokenIssuanceID( jvObj[jss::meta], transaction, meta->get()); } diff --git a/src/xrpld/rpc/handlers/AccountTx.cpp b/src/xrpld/rpc/handlers/AccountTx.cpp index 26c8065edf..d5df40303b 100644 --- a/src/xrpld/rpc/handlers/AccountTx.cpp +++ b/src/xrpld/rpc/handlers/AccountTx.cpp @@ -348,7 +348,7 @@ populateJsonResponse( txnMeta->getJson(JsonOptions::include_date); insertDeliveredAmount( jvObj[jss::meta], context, txn, *txnMeta); - insertNFTSyntheticInJson(jvObj, sttx, *txnMeta); + RPC::insertNFTSyntheticInJson(jvObj, sttx, *txnMeta); RPC::insertMPTokenIssuanceID( jvObj[jss::meta], sttx, *txnMeta); } diff --git a/src/xrpld/rpc/handlers/Tx.cpp b/src/xrpld/rpc/handlers/Tx.cpp index 3db71d9002..d43a699ab3 100644 --- a/src/xrpld/rpc/handlers/Tx.cpp +++ b/src/xrpld/rpc/handlers/Tx.cpp @@ -270,7 +270,7 @@ populateJsonResponse( response[jss::meta] = meta->getJson(JsonOptions::none); insertDeliveredAmount( response[jss::meta], context, result.txn, *meta); - insertNFTSyntheticInJson(response, sttx, *meta); + RPC::insertNFTSyntheticInJson(response, sttx, *meta); RPC::insertMPTokenIssuanceID(response[jss::meta], sttx, *meta); } } From 42fd74b77bc924c2488548fee5aa88d533234059 Mon Sep 17 00:00:00 2001 From: Bart Date: Tue, 24 Jun 2025 13:10:00 -0400 Subject: [PATCH 062/244] Removes release notes from codebase (#5508) --- RELEASENOTES.md | 4817 ----------------------------------------------- 1 file changed, 4817 deletions(-) delete mode 100644 RELEASENOTES.md diff --git a/RELEASENOTES.md b/RELEASENOTES.md deleted file mode 100644 index 6bc7beccc7..0000000000 --- a/RELEASENOTES.md +++ /dev/null @@ -1,4817 +0,0 @@ -# Release Notes - -![XRP](docs/images/xrp-text-mark-black-small@2x.png) - -This document contains the release notes for `rippled`, the reference server implementation of the XRP Ledger protocol. To learn more about how to build, run or update a `rippled` server, visit https://xrpl.org/install-rippled.html - -Have new ideas? Need help with setting up your node? [Please open an issue here](https://github.com/xrplf/rippled/issues/new/choose). -## Full Changelog - -### Amendments - -The following amendments are open for voting with this release: - -- **DynamicNFT (XLS-46)** - Adds the ability to mint mutable `NFToken` objects whose URI can be changed. ([#5048](https://github.com/XRPLF/rippled/pull/5048)) -- **PermissionedDomains (XLS-80)** - Adds Permissioned Domains, which act as part of broader systems on the XRP Ledger to restrict access to satisfy compliance rules. ([#5161](https://github.com/XRPLF/rippled/pull/5161)) -- **DeepFreeze (XLS-77)** - Adds the ability to deep freeze trust lines, enabling token issuers to block the transfer of assets for holders who have been deep frozen. ([#5187](https://github.com/XRPLF/rippled/pull/5187)) -- **fixFrozenLPTokenTransfer** - Prohibits the transfer of LP tokens when the associated liquidity pool contains at least one frozen asset. ([#5227](https://github.com/XRPLF/rippled/pull/5227)) -- **fixInvalidTxFlags** - Adds transaction flag checking for `CredentialCreate`, `CredentialAccept`, and `CredentialDelete` transactions. ([#5250](https://github.com/XRPLF/rippled/pull/5250)) - - -### New Features - -- Added a new `simulate` API method to execute dry runs of transactions and see the simulated metadata. ([#5069](https://github.com/XRPLF/rippled/pull/5069), [#5265](https://github.com/XRPLF/rippled/pull/5265)) -- Added the ability to specify MPTs when defining assets in transactions. ([#5200](https://github.com/XRPLF/rippled/pull/5200)) -- Added a `state` alias for `ripple_state` in the `ledger_entry` API method. Also refactored `LedgerEntry.cpp` to make it easier to read. ([#5199](https://github.com/XRPLF/rippled/pull/5199)) -- Improved UNL security by enabling validators to set a minimum number of UNL publishers to agree on validators. ([#5112](https://github.com/XRPLF/rippled/pull/5112)) -- Updated the XRPL Foundation UNL keys. ([#5289](https://github.com/XRPLF/rippled/pull/5289)) -- Added a new XRPL Foundation subdomain to enable a staged migration without modifying the key for the current UNL list. ([#5326](https://github.com/XRPLF/rippled/pull/5326)) -- Added support to filter ledger entry types by their canonical names in the `ledger`, `ledger_data`, and `account_objects` API methods. ([#5271](https://github.com/XRPLF/rippled/pull/5271)) -- Added detailed logging for each validation and proposal received from the network. ([#5291](https://github.com/XRPLF/rippled/pull/5291)) -- Improved git commit hash lookups when checking the version of a `rippled` debug build. Also added git commit hash info when using the `server_info` API method on an admin connection. ([#5225](https://github.com/XRPLF/rippled/pull/5225)) - - -### Bug fixes - -- Fixed an issue with overlapping data types in the `Expected` class. ([#5218](https://github.com/XRPLF/rippled/pull/5218)) -- Fixed an issue that prevented `rippled` from building on Windows with VS2022. ([#5197](https://github.com/XRPLF/rippled/pull/5197)) -- Fixed `server_definitions` prefixes. ([#5231](https://github.com/XRPLF/rippled/pull/5231)) -- Added missing dependency installations for generic MasOS runners. ([#5233](https://github.com/XRPLF/rippled/pull/5233)) -- Updated deprecated Github actions. ([#5241](https://github.com/XRPLF/rippled/pull/5241)) -- Fixed a failing assert scenario when submitting the `connect` admin RPC. ([#5235](https://github.com/XRPLF/rippled/pull/5235)) -- Fixed the levelization script to ignore single-line comments during dependency analysis. ([#5194](https://github.com/XRPLF/rippled/pull/5194)) -- Fixed the assert name used in `PermissionedDomainDelete`. ([#5245](https://github.com/XRPLF/rippled/pull/5245)) -- Fixed macOS unit tests. ([#5196](https://github.com/XRPLF/rippled/pull/5196)) -- Fixed an issue with validators not accurately reflecting amendment votes. Also added debug logging of amendment votes. ([#5173](https://github.com/XRPLF/rippled/pull/5173), [#5312](https://github.com/XRPLF/rippled/pull/5312)) -- Fixed a potential issue with double-charging fees. ([#5269](https://github.com/XRPLF/rippled/pull/5269)) -- Removed the `new parent hash` assert and replaced it with a log message. ([#5313](https://github.com/XRPLF/rippled/pull/5313)) -- Fixed an issue that prevented previously-failed inbound ledgers to not be acquired if a new trusted proposal arrived. ([#5318](https://github.com/XRPLF/rippled/pull/5318)) - - -### Other Improvements - -- Added unit tests for `AccountID` handling. ([#5174](https://github.com/XRPLF/rippled/pull/5174)) -- Added enforced levelization in `libxrpl` with CMake. ([#5199](https://github.com/XRPLF/rippled/pull/5111)) -- Updated `libxrpl` and all submodules to use the same compiler options. ([#5228](https://github.com/XRPLF/rippled/pull/5228)) -- Added Antithesis instrumentation. ([#5042](https://github.com/XRPLF/rippled/pull/5042), [#5213](https://github.com/XRPLF/rippled/pull/5213)) -- Added `rpcName` to the `LEDGER_ENTRY` macro to help prevent future bugs. ([#5202](https://github.com/XRPLF/rippled/pull/5202)) -- Updated the contribution guidelines to introduce a new workflow that avoids code freezes. Also added scripts that can be used by maintainers in branch management, and a CI job to check that code is consistent across the three main branches: `master`, `release`, and `develop`. ([#5215](https://github.com/XRPLF/rippled/pull/5215)) -- Added unit tests to check for caching issues fixed in `rippled 2.3.0`. ([#5242](https://github.com/XRPLF/rippled/pull/5242)) -- Cleaned up the API changelog. ([#5207](https://github.com/XRPLF/rippled/pull/5207)) -- Improved logs readability. ([#5251](https://github.com/XRPLF/rippled/pull/5251)) -- Updated Visual Studio CI to VS 2022, and added VS Debug builds. ([#5240](https://github.com/XRPLF/rippled/pull/5240)) -- Updated the `secp256k1` library to version 0.6.0. ([#5254](https://github.com/XRPLF/rippled/pull/5254)) -- Changed the `[port_peer]` parameter in `rippled` example config back to `51235`; also added the recommendation to use the default port of `2459` for new deployments. ([#5290](https://github.com/XRPLF/rippled/pull/5290), [#5299](https://github.com/XRPLF/rippled/pull/5299)) -- Improved CI management. ([#5268](https://github.com/XRPLF/rippled/pull/5268)) -- Updated the git commit message rules for contributors. ([#5283](https://github.com/XRPLF/rippled/pull/5283)) -- Fixed unnecessary `setCurrentThreadName` calls. ([#5280](https://github.com/XRPLF/rippled/pull/5280)) -- Added a check to prevent permissioned domains from being created in the event the Permissioned Domains amendement is enabled before the Credentials amendement. ([#5275](https://github.com/XRPLF/rippled/pull/5275)) -- Updated Conan dependencies. ([#5256](https://github.com/XRPLF/rippled/pull/5256)) -- Fixed minor typos in code comments. ([#5279](https://github.com/XRPLF/rippled/pull/5279)) -- Fixed incorrect build instructions. ([#5274](https://github.com/XRPLF/rippled/pull/5274)) -- Refactored `rotateWithLock()` to not hold a lock during callbacks. ([#5276](https://github.com/XRPLF/rippled/pull/5276)) -- Cleaned up debug logging by combining multiple data points into a single message. ([#5302](https://github.com/XRPLF/rippled/pull/5302)) -- Updated build flags to fix performance regressions. ([#5325](https://github.com/XRPLF/rippled/pull/5325)) - - -## Credits - -The following people contributed directly to this release: - -- Aanchal Malhotra -- Bart Thomee <11445373+bthomee@users.noreply.github.com> -- Bronek Kozicki -- code0xff -- Darius Tumas -- David Fuelling -- Donovan Hide -- Ed Hennis -- Elliot Lee -- Javier Romero -- Kenny Lei -- Mark Travis <7728157+mtrippled@users.noreply.github.com> -- Mayukha Vadari -- Michael Legleux -- Oleksandr <115580134+oleks-rip@users.noreply.github.com> -- Qi Zhao -- Ramkumar Srirengaram Gunasegharan -- Shae Wang -- Shawn Xie -- Sophia Xie -- Vijay Khanna Raviraj -- Vladislav Vysokikh -- Xun Zhao - -## Bug Bounties and Responsible Disclosures - -We welcome reviews of the `rippled` code and urge researchers to responsibly disclose any issues they may find. - -To report a bug, please send a detailed report to: - -# Version 2.3.1 - -Version 2.3.1 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. -This is a hotfix release that includes the following updates: -- Fix an erroneous high fee penalty that peers could incur for sending older transactions. -- Update to the fees charged for imposing a load on the server. -- Prevent the relaying of internal pseudo-transactions. - - Before: Pseudo-transactions received from a peer will fail the signature check, even if they were requested (using TMGetObjectByHash) because they have no signature. This causes the peer to be charged for an invalid signature. - - After: Pseudo-transactions, are put into the global cache (TransactionMaster) only. If the transaction is not part of a TMTransactions batch, the peer is charged an unwanted data fee. These fees will not be a problem in the normal course of operations but should dissuade peers from behaving badly by sending a bunch of junk. -- Improved logging now specifies the reason for the fee charged to the peer. - -[Sign Up for Future Release Announcements](https://groups.google.com/g/ripple-server) - - - -## Action Required - -If you run an XRP Ledger validator, upgrade to version 2.3.1 as soon as possible to ensure stable and uninterrupted network behavior. - -## Changelog - -### Amendments and New Features - -- None - -### Bug Fixes and Performance Improvements - -- Change the charged fee for sending older transactions from feeInvalidSignature to feeUnwantedData. [#5243](https://github.com/XRPLF/rippled/pull/5243) - -### Docs and Build System - -- None - -### GitHub - -The public source code repository for `rippled` is hosted on GitHub at . - -We welcome all contributions and invite everyone to join the community of XRP Ledger developers to help build the Internet of Value. - - -## Credits - -The following people contributed directly to this release: - -Ed Hennis -JoelKatz -Sophia Xie <106177003+sophiax851@users.noreply.github.com> -Valentin Balaschenko <13349202+vlntb@users.noreply.github.com> - - -Bug Bounties and Responsible Disclosures: - -We welcome reviews of the `rippled` code and urge researchers to responsibly disclose any issues they may find. - -To report a bug, please send a detailed report to: - -# Version 2.3.0 - -Version 2.3.0 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. This release includes 8 new amendments, including Multi-Purpose Tokens, Credentials, Clawback support for AMMs, and the ability to make offers as part of minting NFTs. Additionally, this release includes important fixes for stability, so server operators are encouraged to upgrade as soon as possible. - - -## Action Required - -If you run an XRP Ledger server, upgrade to version 2.3.0 as soon as possible to ensure service continuity. - -Additionally, new amendments are now open for voting according to the XRP Ledger's [amendment process](https://xrpl.org/amendments.html), which enables protocol changes following two weeks of >80% support from trusted validators. The exact time that protocol changes take effect depends on the voting decisions of the decentralized network. - -## Full Changelog - -### Amendments - -The following amendments are open for voting with this release: - -- **XLS-70 Credentials** - Users can issue Credentials on the ledger and use Credentials to pre-approve incoming payments when using Deposit Authorization instead of individually approving payers. ([#5103](https://github.com/XRPLF/rippled/pull/5103)) - - related fix: #5189 (https://github.com/XRPLF/rippled/pull/5189) -- **XLS-33 Multi-Purpose Tokens** - A new type of fungible token optimized for institutional DeFi including stablecoins. ([#5143](https://github.com/XRPLF/rippled/pull/5143)) -- **XLS-37 AMM Clawback** - Allows clawback-enabled tokens to be used in AMMs, with appropriate guardrails. ([#5142](https://github.com/XRPLF/rippled/pull/5142)) -- **XLS-52 NFTokenMintOffer** - Allows creating an NFT sell offer as part of minting a new NFT. ([#4845](https://github.com/XRPLF/rippled/pull/4845)) -- **fixAMMv1_2** - Fixes two bugs in Automated Market Maker (AMM) transaction processing. ([#5176](https://github.com/XRPLF/rippled/pull/5176)) -- **fixNFTokenPageLinks** - Fixes a bug that can cause NFT directories to have missing links, and introduces a transaction to repair corrupted ledger state. ([#4945](https://github.com/XRPLF/rippled/pull/4945)) -- **fixEnforceNFTokenTrustline** - Fixes two bugs in the interaction between NFT offers and trust lines. ([#4946](https://github.com/XRPLF/rippled/pull/4946)) -- **fixInnerObjTemplate2** - Standardizes the way inner objects are enforced across all transaction and ledger data. ([#5047](https://github.com/XRPLF/rippled/pull/5047)) - -The following amendment is partially implemented but not open for voting: - -- **InvariantsV1_1** - Adds new invariants to ensure transactions process as intended, starting with an invariant to ensure that ledger entries owned by an account are deleted when the account is deleted. ([#4663](https://github.com/XRPLF/rippled/pull/4663)) - -### New Features - -- Allow configuration of SQLite database page size. ([#5135](https://github.com/XRPLF/rippled/pull/5135), [#5140](https://github.com/XRPLF/rippled/pull/5140)) -- In the `libxrpl` C++ library, provide a list of known amendments. ([#5026](https://github.com/XRPLF/rippled/pull/5026)) - -### Deprecations - -- History Shards are removed. ([#5066](https://github.com/XRPLF/rippled/pull/5066)) -- Reporting mode is removed. ([#5092](https://github.com/XRPLF/rippled/pull/5092)) - -For users wanting to store more ledger history, it is recommended to run a Clio server instead. - -### Bug fixes - -- Fix a crash in debug builds when amm_info request contains an invalid AMM account ID. ([#5188](https://github.com/XRPLF/rippled/pull/5188)) -- Fix a crash caused by a race condition in peer-to-peer code. ([#5071](https://github.com/XRPLF/rippled/pull/5071)) -- Fix a crash in certain situations -- Fix several bugs in the book_changes API method. ([#5096](https://github.com/XRPLF/rippled/pull/5096)) -- Fix bug triggered by providing an invalid marker to the account_nfts API method. ([#5045](https://github.com/XRPLF/rippled/pull/5045)) -- Accept lower-case hexadecimal in compact transaction identifier (CTID) parameters in API methods. ([#5049](https://github.com/XRPLF/rippled/pull/5049)) -- Disallow filtering by types that an account can't own in the account_objects API method. ([#5056](https://github.com/XRPLF/rippled/pull/5056)) -- Fix error code returned by the feature API method when providing an invalid parameter. ([#5063](https://github.com/XRPLF/rippled/pull/5063)) -- (API v3) Fix error code returned by amm_info when providing invalid parameters. ([#4924](https://github.com/XRPLF/rippled/pull/4924)) - -### Other Improvements - -- Adds a new default hub, hubs.xrpkuwait.com, to the config file and bootstrapping code. ([#5169](https://github.com/XRPLF/rippled/pull/5169)) -- Improve error message when commandline interface fails with `rpcInternal` because there was no response from the server. ([#4959](https://github.com/XRPLF/rippled/pull/4959)) -- Add tools for debugging specific transactions via replay. ([#5027](https://github.com/XRPLF/rippled/pull/5027), [#5087](https://github.com/XRPLF/rippled/pull/5087)) -- Major reorganization of source code files. ([#4997](https://github.com/XRPLF/rippled/pull/4997)) -- Add new unit tests. ([#4886](https://github.com/XRPLF/rippled/pull/4886)) -- Various improvements to build tools and contributor documentation. ([#5001](https://github.com/XRPLF/rippled/pull/5001), [#5028](https://github.com/XRPLF/rippled/pull/5028), [#5052](https://github.com/XRPLF/rippled/pull/5052), [#5091](https://github.com/XRPLF/rippled/pull/5091), [#5084](https://github.com/XRPLF/rippled/pull/5084), [#5120](https://github.com/XRPLF/rippled/pull/5120), [#5010](https://github.com/XRPLF/rippled/pull/5010). [#5055](https://github.com/XRPLF/rippled/pull/5055), [#5067](https://github.com/XRPLF/rippled/pull/5067), [#5061](https://github.com/XRPLF/rippled/pull/5061), [#5072](https://github.com/XRPLF/rippled/pull/5072), [#5044](https://github.com/XRPLF/rippled/pull/5044) ) -- Various code cleanup and refactoring. ([#4509](https://github.com/XRPLF/rippled/pull/4509), [#4521](https://github.com/XRPLF/rippled/pull/4521), [#4856](https://github.com/XRPLF/rippled/pull/4856), [#5190](https://github.com/XRPLF/rippled/pull/5190), [#5081](https://github.com/XRPLF/rippled/pull/5081), [#5053](https://github.com/XRPLF/rippled/pull/5053), [#5058](https://github.com/XRPLF/rippled/pull/5058), [#5122](https://github.com/XRPLF/rippled/pull/5122), [#5059](https://github.com/XRPLF/rippled/pull/5059), [#5041](https://github.com/XRPLF/rippled/pull/5041)) - - -Bug Bounties and Responsible Disclosures: - -We welcome reviews of the `rippled` code and urge researchers to responsibly disclose any issues they may find. - -To report a bug, please send a detailed report to: - - -# Version 2.2.3 - -Version 2.2.3 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. This release fixes a problem that can cause full-history servers to run out of space in their SQLite databases, depending on configuration. There are no new amendments in this release. - -[Sign Up for Future Release Announcements](https://groups.google.com/g/ripple-server) - - - -## Background - -The `rippled` server uses a SQLite database for tracking transactions, in addition to the main data store (usually NuDB) for ledger data. In servers keeping a large amount of history, this database can run out of space based on the configured number and size of database pages, even if the machine has disk space available. Based on the size of full history on Mainnet, servers with the default SQLite page size of 4096 may now run out of space if they store full history. In this case, your server may shut down with an error such as the following: - -```text -Free SQLite space for transaction db is less than 512MB. To fix this, rippled - must be executed with the vacuum parameter before restarting. - Note that this activity can take multiple days, depending on database size. -``` - -The exact timing of when a server runs out of space can vary based on a few factors. Server operators who encountered a similar problem in 2018 and followed steps to [increase the SQLite transaction database page size issue](../../../docs/infrastructure/troubleshooting/fix-sqlite-tx-db-page-size-issue) may not encounter this problem at all. The `--vacuum` commandline option to `rippled` from that time may work to free up space in the database, but requires extended downtime. - -Version 2.2.3 of `rippled` reconfigures the maximum number of SQLite pages so that the issue does not occur. - -Clio servers providing full history are not affected by this issue. - - -## Action Required - -If you run an [XRP Ledger full history server](https://xrpl.org/docs/infrastructure/configuration/data-retention/configure-full-history), upgrading to version 2.2.3 may prevent the server from crashing when `transaction.db` exceeds approximately 8.7 terabytes. - -Additionally, five amendments introduced in version 2.2.0 are open for voting according to the XRP Ledger's [amendment process](https://xrpl.org/amendments.html), which enables protocol changes following two weeks of >80% support from trusted validators. If you operate an XRP Ledger server older than version 2.2.0, upgrade by Sep 23, 2024 to ensure service continuity. The exact time that protocol changes take effect depends on the voting decisions of the decentralized network. - -## Changelog - -### Bug Fixes - -- Update SQLite3 max_page_count to match current defaults ([#5114](https://github.com/XRPLF/rippled/pull/5114)) - -### GitHub - -The public source code repository for `rippled` is hosted on GitHub at . - -We welcome all contributions and invite everyone to join the community of XRP Ledger developers to help build the Internet of Value. - - -## Credits - -The following people contributed directly to this release: - -J. Scott Branson - - -Bug Bounties and Responsible Disclosures: - -We welcome reviews of the `rippled` code and urge researchers to responsibly disclose any issues they may find. - -To report a bug, please send a detailed report to: - - -# Version 2.2.2 - -Version 2.2.2 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. This release fixes an ongoing issue with Mainnet where validators can stall during consensus processing due to lock contention, preventing ledgers from being validated for up to two minutes. There are no new amendments in this release. - -[Sign Up for Future Release Announcements](https://groups.google.com/g/ripple-server) - - - -## Action Required - -If you run an XRP Ledger validator, upgrade to version 2.2.2 as soon as possible to ensure stable and uninterrupted network behavior. - -Additionally, five amendments introduced in version 2.2.0 are open for voting according to the XRP Ledger's [amendment process](https://xrpl.org/amendments.html), which enables protocol changes following two weeks of >80% support from trusted validators. If you operate an XRP Ledger server older than version 2.2.0, upgrade by September 17, 2024 to ensure service continuity. The exact time that protocol changes take effect depends on the voting decisions of the decentralized network. Version 2.2.2 is recommended because of known bugs affecting stability of versions 2.2.0 and 2.2.1. - -If you operate a Clio server, Clio needs to be updated to 2.1.2 before updating to rippled 2.2.0. Clio will be blocked if it is not updated. - -## Changelog - -### Amendments and New Features - -- None - -### Bug Fixes and Performance Improvements - -- Allow only 1 job queue slot for acquiring inbound ledger [#5115](https://github.com/XRPLF/rippled/pull/5115) ([7741483](https://github.com/XRPLF/rippled/commit/774148389467781aca7c01bac90af2fba870570c)) - -- Allow only 1 job queue slot for each validation ledger check [#5115](https://github.com/XRPLF/rippled/pull/5115) ([fbbea9e](https://github.com/XRPLF/rippled/commit/fbbea9e6e25795a8a6bd1bf64b780771933a9579)) - -### Other improvements - - - Track latencies of certain code blocks, and log if they take too long [#5115](https://github.com/XRPLF/rippled/pull/5115) ([00ed7c9](https://github.com/XRPLF/rippled/commit/00ed7c942436f02644a13169002b5123f4e2a116)) - -### Docs and Build System - -- None - -### GitHub - -The public source code repository for `rippled` is hosted on GitHub at . - -We welcome all contributions and invite everyone to join the community of XRP Ledger developers to help build the Internet of Value. - - -## Credits - -The following people contributed directly to this release: - -Mark Travis -Valentin Balaschenko <13349202+vlntb@users.noreply.github.com> - -Bug Bounties and Responsible Disclosures: - -We welcome reviews of the `rippled` code and urge researchers to responsibly disclose any issues they may find. - -To report a bug, please send a detailed report to: - -# Version 2.2.1 - -Version 2.2.1 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. This release fixes a critical bug introduced in 2.2.0 handling some types of RPC requests. - -[Sign Up for Future Release Announcements](https://groups.google.com/g/ripple-server) - - - -## Action Required - -If you run an XRP Ledger validator, upgrade to version 2.2.1 as soon as possible to ensure stable and uninterrupted network behavior. - -Additionally, five amendments introduced in version 2.2.0 are open for voting according to the XRP Ledger's [amendment process](https://xrpl.org/amendments.html), which enables protocol changes following two weeks of >80% support from trusted validators. If you operate an XRP Ledger server older than version 2.2.0, upgrade by August 14, 2024 to ensure service continuity. The exact time that protocol changes take effect depends on the voting decisions of the decentralized network. Version 2.2.1 is recommended because of known bugs affecting stability of versions 2.2.0. - -If you operate a Clio server, Clio needs to be updated to 2.2.2 before updating to rippled 2.2.1. Clio will be blocked if it is not updated. - -## Changelog - -### Amendments and New Features - -- None - -### Bug Fixes and Performance Improvements - -- Improve error handling in some RPC commands. [#5078](https://github.com/XRPLF/rippled/pull/5078) - -- Use error codes throughout fast Base58 implementation. [#5078](https://github.com/XRPLF/rippled/pull/5078) - -### Docs and Build System - -- None - -### GitHub - -The public source code repository for `rippled` is hosted on GitHub at . - -We welcome all contributions and invite everyone to join the community of XRP Ledger developers to help build the Internet of Value. - - -## Credits - -The following people contributed directly to this release: - -John Freeman -Mayukha Vadari - -Bug Bounties and Responsible Disclosures: - -We welcome reviews of the `rippled` code and urge researchers to responsibly disclose any issues they may find. - -To report a bug, please send a detailed report to: - - -# Version 2.2.0 - -Version 2.2.0 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. This release adds performance optimizations, several bug fixes, and introduces the `featurePriceOracle`, `fixEmptyDID`, `fixXChainRewardRounding`, `fixPreviousTxnID`, and `fixAMMv1_1` amendments. - -[Sign Up for Future Release Announcements](https://groups.google.com/g/ripple-server) - - - -## Action Required - -Five new amendments are now open for voting according to the XRP Ledger's [amendment process](https://xrpl.org/amendments.html), which enables protocol changes following two weeks of >80% support from trusted validators. - -If you operate an XRP Ledger server, upgrade to version 2.2.0 by June 17, 2024 to ensure service continuity. The exact time that protocol changes take effect depends on the voting decisions of the decentralized network. - -If you operate a Clio server, Clio needs to be updated to 2.1.2 before updating to rippled 2.2.0. Clio will be blocked if it is not updated. - -## Changelog - -### Amendments and New Features -(These are changes which may impact or be useful to end users. For example, you may be able to update your code/workflow to take advantage of these changes.) - -- **featurePriceOracle** amendment: Implements a price oracle as defined in the [XLS-47](https://github.com/XRPLF/XRPL-Standards/blob/master/XLS-47d-PriceOracles/README.md) spec. A Price Oracle is used to bring real-world data, such as market prices, onto the blockchain, enabling dApps to access and utilize information that resides outside the blockchain. [#4789](https://github.com/XRPLF/rippled/pull/4789) - -- **fixEmptyDID** amendment: Modifies the behavior of the DID amendment: adds an additional check to ensure that DIDs are non-empty when created, and returns a `tecEMPTY_DID` error if the DID would be empty. [#4950](https://github.com/XRPLF/rippled/pull/4950) - -- **fixXChainRewardRounding** amendment: Modifies the behavior of the XChainBridge amendment: fixes rounding so reward shares are always rounded down, even when the `fixUniversalNumber` amendment is active. [#4933](https://github.com/XRPLF/rippled/pull/4933) - -- **fixPreviousTxnID** amendment: Adds `PreviousTxnID` and `PreviousTxnLgrSequence` as fields to all ledger entries that did not already have them included (`DirectoryNode`, `Amendments`, `FeeSettings`, `NegativeUNL`, and `AMM`). Existing ledger entries will gain the fields whenever transactions modify those entries. [#4751](https://github.com/XRPLF/rippled/pull/4751). - -- **fixAMMv1_1** amendment: Fixes AMM offer rounding and low quality order book offers from blocking the AMM. [#4983](https://github.com/XRPLF/rippled/pull/4983) - -- Add a non-admin version of `feature` API method. [#4781](https://github.com/XRPLF/rippled/pull/4781) - -### Bug Fixes and Performance Improvements -(These are behind-the-scenes improvements, such as internal changes to the code, which are not expected to impact end users.) - -- Optimize the base58 encoder and decoder. The algorithm is now about 10 times faster for encoding and 15 times faster for decoding. [#4327](https://github.com/XRPLF/rippled/pull/4327) - -- Optimize the `account_tx` SQL query. [#4955](https://github.com/XRPLF/rippled/pull/4955) - -- Don't reach consensus as quickly if no other proposals are seen. [#4763](https://github.com/XRPLF/rippled/pull/4763) - -- Fix a potential deadlock in the database module. [#4989](https://github.com/XRPLF/rippled/pull/4989) - -- Enforce no duplicate slots from incoming connections. [#4944](https://github.com/XRPLF/rippled/pull/4944) - -- Fix an order book update variable swap. [#4890](https://github.com/XRPLF/rippled/pull/4890) - -### Docs and Build System - -- Add unit test to raise the test coverage of the AMM. [#4971](https://github.com/XRPLF/rippled/pull/4971) - -- Improve test coverage reporting. [#4977](https://github.com/XRPLF/rippled/pull/4977) - -### GitHub - -The public source code repository for `rippled` is hosted on GitHub at . - -We welcome all contributions and invite everyone to join the community of XRP Ledger developers to help build the Internet of Value. - - -## Credits - -The following people contributed directly to this release: - -Alex Kremer -Alloy Networks <45832257+alloynetworks@users.noreply.github.com> -Bronek Kozicki -Chenna Keshava -Denis Angell -Ed Hennis -Gregory Tsipenyuk -Howard Hinnant -John Freeman -Mark Travis -Mayukha Vadari -Michael Legleux -Nik Bougalis -Olek <115580134+oleks-rip@users.noreply.github.com> -Scott Determan -Snoppy - -Bug Bounties and Responsible Disclosures: - -We welcome reviews of the `rippled` code and urge researchers to responsibly disclose any issues they may find. - -To report a bug, please send a detailed report to: - - -## Version 2.1.1 - -The `rippled` 2.1.1 release fixes a critical bug in the integration of AMMs with the payment engine. - -[Sign Up for Future Release Announcements](https://groups.google.com/g/ripple-server) - - - - -## Action Required - -One new amendment is now open for voting according to the XRP Ledger's [amendment process](https://xrpl.org/amendments.html), which enables protocol changes following two weeks of >80% support from trusted validators. - -If you operate an XRP Ledger server, upgrade to version 2.1.1 by April 8, 2024 to ensure service continuity. The exact time that protocol changes take effect depends on the voting decisions of the decentralized network. - -## Changelog - -### Amendments - -- **fixAMMOverflowOffer**: Fix improper handling of large synthetic AMM offers in the payment engine. Due to the importance of this fix, the default vote in the source code has been set to YES. For information on how to configure your validator's amendment voting, see [Configure Amendment Voting](https://xrpl.org/docs/infrastructure/configuration/configure-amendment-voting). - -# Introducing XRP Ledger version 2.1.0 - -Version 2.1.0 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. This release adds a bug fix, build improvements, and introduces the `fixNFTokenReserve` and `fixInnerObjTemplate` amendments. - -[Sign Up for Future Release Announcements](https://groups.google.com/g/ripple-server) - - - - -## Action Required - -Two new amendments are now open for voting according to the XRP Ledger's [amendment process](https://xrpl.org/amendments.html), which enables protocol changes following two weeks of >80% support from trusted validators. - -If you operate an XRP Ledger server, upgrade to version 2.1.0 by March 5, 2024 to ensure service continuity. The exact time that protocol changes take effect depends on the voting decisions of the decentralized network. - -## Changelog - -### Amendments -(These are changes which may impact or be useful to end users. For example, you may be able to update your code/workflow to take advantage of these changes.) - -- **fixNFTokenReserve**: Adds a check to the `NFTokenAcceptOffer` transactor to see if the `OwnerCount` changed. If it did, it checks that the reserve requirement is met. [#4767](https://github.com/XRPLF/rippled/pull/4767) - -- **fixInnerObjTemplate**: Adds an `STObject` constructor overload that includes an additional boolean argument to set the inner object template; currently, the inner object template isn't set upon object creation. In some circumstances, this causes a `tefEXCEPTION` error when trying to access the AMM `sfTradingFee` and `sfDiscountedFee` fields in the inner objects of `sfVoteEntry` and `sfAuctionSlot`. [#4906](https://github.com/XRPLF/rippled/pull/4906) - - -### Bug Fixes and Performance Improvements -(These are behind-the-scenes improvements, such as internal changes to the code, which are not expected to impact end users.) - -- Fixed a bug that prevented the gRPC port info from being specified in the `rippled` config file. [#4728](https://github.com/XRPLF/rippled/pull/4728) - - -### Docs and Build System - -- Added unit tests to check that payees and payers aren't the same account. [#4860](https://github.com/XRPLF/rippled/pull/4860) - -- Removed a workaround that bypassed Windows CI unit test failures. [#4871](https://github.com/XRPLF/rippled/pull/4871) - -- Updated library names to be platform-agnostic in Conan recipes. [#4831](https://github.com/XRPLF/rippled/pull/4831) - -- Added headers required in the Conan package to build xbridge witness servers. [#4885](https://github.com/XRPLF/rippled/pull/4885) - -- Improved object lifetime management when creating a temporary `Rules` object, fixing a crash in Windows unit tests. [#4917](https://github.com/XRPLF/rippled/pull/4917) - -### GitHub - -The public source code repository for `rippled` is hosted on GitHub at . - -We welcome all contributions and invite everyone to join the community of XRP Ledger developers to help build the Internet of Value. - - -## Credits - -The following people contributed directly to this release: - -- Bronek Kozicki -- CJ Cobb -- Chenna Keshava B S <21219765+ckeshava@users.noreply.github.com> -- Ed Hennis -- Elliot Lee -- Gregory Tsipenyuk -- John Freeman -- Michael Legleux -- Ryan Molley -- Shawn Xie <35279399+shawnxie999@users.noreply.github.com> - - -Bug Bounties and Responsible Disclosures: - -We welcome reviews of the `rippled` code and urge researchers to responsibly disclose any issues they may find. - -To report a bug, please send a detailed report to: - -# Introducing XRP Ledger version 2.0.1 - -Version 2.0.1 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. This release includes minor fixes, unit test improvements, and doc updates. - -[Sign Up for Future Release Announcements](https://groups.google.com/g/ripple-server) - - - - -## Action Required - -If you operate an XRP Ledger server, upgrade to version 2.0.1 to take advantage of the changes included in this update. Nodes on version 1.12 should upgrade as soon as possible. - - -## Changelog - - -### Changes -(These are changes which may impact or be useful to end users. For example, you may be able to update your code/workflow to take advantage of these changes.) - -- Updated the `send_queue_limit` to 500 in the default `rippled` config to handle increased transaction loads. [#4867](https://github.com/XRPLF/rippled/pull/4867) - - -### Bug Fixes and Performance Improvements -(These are behind-the-scenes improvements, such as internal changes to the code, which are not expected to impact end users.) - -- Fixed an assertion that occurred when `rippled` was under heavy websocket client load. [#4848](https://github.com/XRPLF/rippled/pull/4848) - -- Improved lifetime management of serialized type ledger entries to improve memory usage. [#4822](https://github.com/XRPLF/rippled/pull/4822) - -- Fixed a clang warning about deprecated sprintf usage. [#4747](https://github.com/XRPLF/rippled/pull/4747) - - -### Docs and Build System - -- Added `DeliverMax` to more JSONRPC tests. [#4826](https://github.com/XRPLF/rippled/pull/4826) - -- Updated the pull request template to include a `Type of Change` checkbox and additional contextual questions. [#4875](https://github.com/XRPLF/rippled/pull/4875) - -- Updated help messages for unit tests pattern matching. [#4846](https://github.com/XRPLF/rippled/pull/4846) - -- Improved the time it take to generate coverage reports. [#4849](https://github.com/XRPLF/rippled/pull/4849) - -- Fixed broken links in the Conan build docs. [#4699](https://github.com/XRPLF/rippled/pull/4699) - -- Spurious codecov uploads are now retried if there's an error uploading them the first time. [#4896](https://github.com/XRPLF/rippled/pull/4896) - - -### GitHub - -The public source code repository for `rippled` is hosted on GitHub at . - -We welcome all contributions and invite everyone to join the community of XRP Ledger developers to help build the Internet of Value. - - -## Credits - -The following people contributed directly to this release: - -- Bronek Kozicki -- Chenna Keshava B S <21219765+ckeshava@users.noreply.github.com> -- Ed Hennis -- Elliot Lee -- Lathan Britz -- Mark Travis -- nixer89 - -Bug Bounties and Responsible Disclosures: - -We welcome reviews of the `rippled` code and urge researchers to responsibly disclose any issues they may find. - -To report a bug, please send a detailed report to: - -# Introducing XRP Ledger version 2.0.0 - -Version 2.0.0 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. This release adds new features and bug fixes, and introduces these amendments: - -- `DID` -- `XChainBridge` -- `fixDisallowIncomingV1` -- `fixFillOrKill` - -[Sign Up for Future Release Announcements](https://groups.google.com/g/ripple-server) - - - - -## Action Required - -Four new amendments are now open for voting according to the XRP Ledger's [amendment process](https://xrpl.org/amendments.html), which enables protocol changes following two weeks of >80% support from trusted validators. - -If you operate an XRP Ledger server, upgrade to version 2.0.0 by January 22, 2024 to ensure service continuity. The exact time that protocol changes take effect depends on the voting decisions of the decentralized network. - - -## Changelog - - -### Amendments, New Features, and Changes -(These are changes which may impact or be useful to end users. For example, you may be able to update your code/workflow to take advantage of these changes.) - -- **XChainBridge**: Introduces cross-chain bridges, enabling interoperability between the XRP Ledger and sidechains. [#4292](https://github.com/XRPLF/rippled/pull/4292) - -- **DID**: Introduces decentralized identifiers. [#4636](https://github.com/XRPLF/rippled/pull/4636) - -- **fixDisallowIncomingV1**: Fixes an issue that occurs when users try to authorize a trustline while the `lsfDisallowIncomingTrustline` flag is enabled on their account. [#4721](https://github.com/XRPLF/rippled/pull/4721) - -- **fixFillOrKill**: Fixes an issue introduced in the `flowCross` amendment. The `tfFillOrKill` and `tfSell` flags are now properly handled to allow offers to cross in certain scenarios. [#4694](https://github.com/XRPLF/rippled/pull/4694) - -- **API v2 released with these changes:** - - - Accepts currency codes in ASCII, using the full alphabet. [#4566](https://github.com/XRPLF/rippled/pull/4566) - - Added test to verify the `check` field is a string. [#4630](https://github.com/XRPLF/rippled/pull/4630) - - Added errors for malformed `account_tx` and `noripple_check` fields. [#4620](https://github.com/XRPLF/rippled/pull/4620) - - Added errors for malformed `gateway_balances` and `channel_authorize` requests. [#4618](https://github.com/XRPLF/rippled/pull/4618) - - Added a `DeliverMax` alias to `Amount` and removed `Amount`. [#4733](https://github.com/XRPLF/rippled/pull/4733) - - Removed `tx_history` and `ledger_header` methods. Also updated `RPC::Handler` to allow for version-specific methods. [#4759](https://github.com/XRPLF/rippled/pull/4759) - - Standardized the JSON serialization format of transactions. [#4727](https://github.com/XRPLF/rippled/issues/4727) - - Bumped API support to v2, but kept the command-line interface for `rippled` and unit tests at v1. [#4803](https://github.com/XRPLF/rippled/pull/4803) - - Standardized `ledger_index` to return as a number. [#4820](https://github.com/XRPLF/rippled/pull/4820) - -- Added a `server_definitions` command that returns an SDK-compatible `definitions.json` file, generated from the `rippled` instance currently running. [#4703](https://github.com/XRPLF/rippled/pull/4703) - -- Improved unit test command line input and run times. [#4634](https://github.com/XRPLF/rippled/pull/4634) - -- Added the link compression setting to the the `rippled-example.cfg` file. [#4753](https://github.com/XRPLF/rippled/pull/4753) - -- Changed the reserved hook error code name from `tecHOOK_ERROR` to `tecHOOK_REJECTED`. [#4559](https://github.com/XRPLF/rippled/pull/4559) - - -### Bug Fixes and Performance Improvements -(These are behind-the-scenes improvements, such as internal changes to the code, which are not expected to impact end users.) - -- Simplified `TxFormats` common fields logic. [#4637](https://github.com/XRPLF/rippled/pull/4637) - -- Improved transaction throughput by asynchronously writing batches to *NuDB*. [#4503](https://github.com/XRPLF/rippled/pull/4503) - -- Removed 2 unused functions. [#4708](https://github.com/XRPLF/rippled/pull/4708) - -- Removed an unused variable that caused clang 14 build errors. [#4672](https://github.com/XRPLF/rippled/pull/4672) - -- Fixed comment about return value of `LedgerHistory::fixIndex`. [#4574](https://github.com/XRPLF/rippled/pull/4574) - -- Updated `secp256k1` to 0.3.2. [#4653](https://github.com/XRPLF/rippled/pull/4653) - -- Removed built-in SNTP clock issues. [#4628](https://github.com/XRPLF/rippled/pull/4628) - -- Fixed amendment flapping. This issue usually occurred when an amendment was on the verge of gaining majority, but a validator not in favor of the amendment went offline. [#4410](https://github.com/XRPLF/rippled/pull/4410) - -- Fixed asan stack-use-after-scope issue. [#4676](https://github.com/XRPLF/rippled/pull/4676) - -- Transactions and pseudo-transactions share the same `commonFields` again. [#4715](https://github.com/XRPLF/rippled/pull/4715) - -- Reduced boilerplate in `applySteps.cpp`. When a new transactor is added, only one function needs to be modified now. [#4710](https://github.com/XRPLF/rippled/pull/4710) - -- Removed an incorrect assert. [#4743](https://github.com/XRPLF/rippled/pull/4743) - -- Replaced some asserts in `PeerFinder::Logic` with `LogicError` to better indicate the nature of server crashes. [#4562](https://github.com/XRPLF/rippled/pull/4562) - -- Fixed an issue with enabling new amendments on a network with an ID greater than 1024. [#4737](https://github.com/XRPLF/rippled/pull/4737) - - -### Docs and Build System - -- Updated `rippled-example.cfg` docs to clarify usage of *ssl_cert* vs *ssl_chain*. [#4667](https://github.com/XRPLF/rippled/pull/4667) - -- Updated `BUILD.md`: - - Made the `environment.md` link easier to find. Also made it easier to find platform-specific info. [#4507](https://github.com/XRPLF/rippled/pull/4507) - - Fixed typo. [#4718](https://github.com/XRPLF/rippled/pull/4718) - - Updated the minimum compiler requirements. [#4700](https://github.com/XRPLF/rippled/pull/4700) - - Added note about enabling `XRPFees`. [#4741](https://github.com/XRPLF/rippled/pull/4741) - -- Updated `API-CHANGELOG.md`: - - Explained API v2 is releasing with `rippled` 2.0.0. [#4633](https://github.com/XRPLF/rippled/pull/4633) - - Clarified the location of the `signer_lists` field in the `account_info` response for API v2. [#4724](https://github.com/XRPLF/rippled/pull/4724) - - Added documentation for the new `DeliverMax` field. [#4784](https://github.com/XRPLF/rippled/pull/4784) - - Removed references to API v2 being "in progress" and "in beta". [#4828](https://github.com/XRPLF/rippled/pull/4828) - - Clarified that all breaking API changes will now occur in API v3 or later. [#4773](https://github.com/XRPLF/rippled/pull/4773) - -- Fixed a mistake in the overlay README. [#4635](https://github.com/XRPLF/rippled/pull/4635) - -- Fixed an early return from `RippledRelease.cmake` that prevented targets from being created during packaging. [#4707](https://github.com/XRPLF/rippled/pull/4707) - -- Fixed a build error with Intel Macs. [#4632](https://github.com/XRPLF/rippled/pull/4632) - -- Added `.build` to `.gitignore`. [#4722](https://github.com/XRPLF/rippled/pull/4722) - -- Fixed a `uint is not universally defined` Windows build error. [#4731](https://github.com/XRPLF/rippled/pull/4731) - -- Reenabled Windows CI build with Artifactory support. [#4596](https://github.com/XRPLF/rippled/pull/4596) - -- Fixed output of remote step in Nix workflow. [#4746](https://github.com/XRPLF/rippled/pull/4746) - -- Fixed a broken link in `conan.md`. [#4740](https://github.com/XRPLF/rippled/pull/4740) - -- Added a `python` call to fix the `pip` upgrade command in Windows CI. [#4768](https://github.com/XRPLF/rippled/pull/4768) - -- Added an API Impact section to `pull_request_template.md`. [#4757](https://github.com/XRPLF/rippled/pull/4757) - -- Set permissions for the Doxygen workflow. [#4756](https://github.com/XRPLF/rippled/pull/4756) - -- Switched to Unity builds to speed up Windows CI. [#4780](https://github.com/XRPLF/rippled/pull/4780) - -- Clarified what makes consensus healthy in `FeeEscalation.md`. [#4729](https://github.com/XRPLF/rippled/pull/4729) - -- Removed a dependency on the header for unit tests. [#4788](https://github.com/XRPLF/rippled/pull/4788) - -- Fixed a clang `unused-but-set-variable` warning. [#4677](https://github.com/XRPLF/rippled/pull/4677) - -- Removed an unused Dockerfile. [#4791](https://github.com/XRPLF/rippled/pull/4791) - -- Fixed unit tests to work with API v2. [#4785](https://github.com/XRPLF/rippled/pull/4785) - -- Added support for the mold linker on Linux. [#4807](https://github.com/XRPLF/rippled/pull/4807) - -- Updated Linux distribtuions `rippled` smoke tests run on. [#4813](https://github.com/XRPLF/rippled/pull/4813) - -- Added codename `bookworm` to the distribution matrix during Artifactory uploads, enabling Debian 12 clients to install `rippled` packages. [#4836](https://github.com/XRPLF/rippled/pull/4836) - -- Added a workaround for compilation errors with GCC 13 and other compilers relying on libstdc++ version 13. [#4817](https://github.com/XRPLF/rippled/pull/4817) - -- Fixed a minor typo in the code comments of `AMMCreate.h`. [4821](https://github.com/XRPLF/rippled/pull/4821) - - -### GitHub - -The public source code repository for `rippled` is hosted on GitHub at . - -We welcome all contributions and invite everyone to join the community of XRP Ledger developers to help build the Internet of Value. - - -## Credits - -The following people contributed directly to this release: - -- Bronek Kozicki -- Chenna Keshava B S <21219765+ckeshava@users.noreply.github.com> -- Denis Angell -- Ed Hennis -- Elliot Lee -- Florent <36513774+florent-uzio@users.noreply.github.com> -- ForwardSlashBack <142098649+ForwardSlashBack@users.noreply.github.com> -- Gregory Tsipenyuk -- Howard Hinnant -- Hussein Badakhchani -- Jackson Mills -- John Freeman -- Manoj Doshi -- Mark Pevec -- Mark Travis -- Mayukha Vadari -- Michael Legleux -- Nik Bougalis -- Peter Chen <34582813+PeterChen13579@users.noreply.github.com> -- Rome Reginelli -- Scott Determan -- Scott Schurr -- Sophia Xie <106177003+sophiax851@users.noreply.github.com> -- Stefan van Kessel -- pwang200 <354723+pwang200@users.noreply.github.com> -- shichengsg002 <147461171+shichengsg002@users.noreply.github.com> -- sokkaofthewatertribe <140777955+sokkaofthewatertribe@users.noreply.github.com> - -Bug Bounties and Responsible Disclosures: - -We welcome reviews of the rippled code and urge researchers to responsibly disclose any issues they may find. - -To report a bug, please send a detailed report to: - - -# Introducing XRP Ledger version 1.12.0 - -Version 1.12.0 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. This release adds new features and bug fixes, and introduces these amendments: - -- `AMM` -- `Clawback` -- `fixReducedOffersV1` - -[Sign Up for Future Release Announcements](https://groups.google.com/g/ripple-server) - - - -## Action Required - -Three new amendments are now open for voting according to the XRP Ledger's [amendment process](https://xrpl.org/amendments.html), which enables protocol changes following two weeks of >80% support from trusted validators. - -If you operate an XRP Ledger server, upgrade to version 1.12.0 by September 20, 2023 to ensure service continuity. The exact time that protocol changes take effect depends on the voting decisions of the decentralized network. - - -## Install / Upgrade - -On supported platforms, see the [instructions on installing or updating `rippled`](https://xrpl.org/install-rippled.html). - -The XRPL Foundation publishes portable binaries, which are drop-in replacements for the `rippled` daemon. [See information and downloads for the portable binaries](https://github.com/XRPLF/rippled-portable-builds#portable-builds-of-the-rippled-server). This will work on most distributions, including Ubuntu 16.04, 18.04, 20.04, and 22.04; CentOS; and others. Please test and open issues on GitHub if there are problems. - - -## Changelog - -### Amendments, New Features, and Changes -(These are changes which may impact or be useful to end users. For example, you may be able to update your code/workflow to take advantage of these changes.) - -- **`AMM`**: Introduces an automated market maker (AMM) protocol to the XRP Ledger's decentralized exchange, enabling you to trade assets without a counterparty. For more information about AMMs, see: [Automated Market Maker](https://opensource.ripple.com/docs/xls-30d-amm/amm-uc/). [#4294](https://github.com/XRPLF/rippled/pull/4294) - -- **`Clawback`**: Adds a setting, *Allow Clawback*, which lets an issuer recover, or _claw back_, tokens that they previously issued. Issuers cannot enable this setting if they have issued tokens already. For additional documentation on this feature, see: [#4553](https://github.com/XRPLF/rippled/pull/4553). - -- **`fixReducedOffersV1`**: Reduces the occurrence of order books that are blocked by reduced offers. [#4512](https://github.com/XRPLF/rippled/pull/4512) - -- Added WebSocket and RPC port info to `server_info` responses. [#4427](https://github.com/XRPLF/rippled/pull/4427) - -- Removed the deprecated `accepted`, `seqNum`, `hash`, and `totalCoins` fields from the `ledger` method. [#4244](https://github.com/XRPLF/rippled/pull/4244) - - -### Bug Fixes and Performance Improvements -(These are behind-the-scenes improvements, such as internal changes to the code, which are not expected to impact end users.) - -- Added a pre-commit hook that runs the clang-format linter locally before committing changes. To install this feature, see: [CONTRIBUTING](https://github.com/XRPLF/xrpl-dev-portal/blob/master/CONTRIBUTING.md). [#4599](https://github.com/XRPLF/rippled/pull/4599) - -- In order to make it more straightforward to catch and handle overflows: changed the output type of the `mulDiv()` function from `std::pair` to `std::optional`. [#4243](https://github.com/XRPLF/rippled/pull/4243) - -- Updated `Handler::Condition` enum values to make the code less brittle. [#4239](https://github.com/XRPLF/rippled/pull/4239) - -- Renamed `ServerHandlerImp` to `ServerHandler`. [#4516](https://github.com/XRPLF/rippled/pull/4516), [#4592](https://github.com/XRPLF/rippled/pull/4592) - -- Replaced hand-rolled code with `std::from_chars` for better maintainability. [#4473](https://github.com/XRPLF/rippled/pull/4473) - -- Removed an unused `TypedField` move constructor. [#4567](https://github.com/XRPLF/rippled/pull/4567) - - -### Docs and Build System - -- Updated checkout versions to resolve warnings during GitHub jobs. [#4598](https://github.com/XRPLF/rippled/pull/4598) - -- Fixed an issue with the Debian package build. [#4591](https://github.com/XRPLF/rippled/pull/4591) - -- Updated build instructions with additional steps to take after updating dependencies. [#4623](https://github.com/XRPLF/rippled/pull/4623) - -- Updated contributing doc to clarify that beta releases should also be pushed to the `release` branch. [#4589](https://github.com/XRPLF/rippled/pull/4589) - -- Enabled the `BETA_RPC_API` flag in the default unit tests config, making the API v2 (beta) available to unit tests. [#4573](https://github.com/XRPLF/rippled/pull/4573) - -- Conan dependency management. - - Fixed package definitions for Conan. [#4485](https://github.com/XRPLF/rippled/pull/4485) - - Updated build dependencies to the most recent versions in Conan Center. [#4595](https://github.com/XRPLF/rippled/pull/4595) - - Updated Conan recipe for NuDB. [#4615](https://github.com/XRPLF/rippled/pull/4615) - -- Added binary hardening and linker flags to enhance security during the build process. [#4603](https://github.com/XRPLF/rippled/pull/4603) - -- Added an Artifactory to the `nix` workflow to improve build times. [#4556](https://github.com/XRPLF/rippled/pull/4556) - -- Added quality-of-life improvements to workflows, using new [concurrency control](https://docs.github.com/en/actions/using-jobs/using-concurrency) features. [#4597](https://github.com/XRPLF/rippled/pull/4597) - - -[Full Commit Log](https://github.com/XRPLF/rippled/compare/1.11.0...1.12.0) - - -### GitHub - -The public source code repository for `rippled` is hosted on GitHub at . - -We welcome all contributions and invite everyone to join the community of XRP Ledger developers to help build the Internet of Value. - - -## Credits - -The following people contributed directly to this release: - -- Alphonse N. Mousse <39067955+a-noni-mousse@users.noreply.github.com> -- Arihant Kothari -- Chenna Keshava B S <21219765+ckeshava@users.noreply.github.com> -- Denis Angell -- Ed Hennis -- Elliot Lee -- Gregory Tsipenyuk -- Howard Hinnant -- Ikko Eltociear Ashimine -- John Freeman -- Manoj Doshi -- Mark Travis -- Mayukha Vadari -- Michael Legleux -- Peter Chen <34582813+PeterChen13579@users.noreply.github.com> -- RichardAH -- Rome Reginelli -- Scott Schurr -- Shawn Xie <35279399+shawnxie999@users.noreply.github.com> -- drlongle - -Bug Bounties and Responsible Disclosures: - -We welcome reviews of the rippled code and urge researchers to responsibly disclose any issues they may find. - -To report a bug, please send a detailed report to: - - -# Introducing XRP Ledger version 1.11.0 - -Version 1.11.0 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. - -This release reduces memory usage, introduces the `fixNFTokenRemint` amendment, and adds new features and bug fixes. For example, the new NetworkID field in transactions helps to prevent replay attacks with side-chains. - -[Sign Up for Future Release Announcements](https://groups.google.com/g/ripple-server) - - - -## Action Required - -The `fixNFTokenRemint` amendment is now open for voting according to the XRP Ledger's [amendment process](https://xrpl.org/amendments.html), which enables protocol changes following two weeks of >80% support from trusted validators. - -If you operate an XRP Ledger server, upgrade to version 1.11.0 by July 5 to ensure service continuity. The exact time that protocol changes take effect depends on the voting decisions of the decentralized network. - - -## Install / Upgrade - -On supported platforms, see the [instructions on installing or updating `rippled`](https://xrpl.org/install-rippled.html). - - -## What's Changed - -### New Features and Improvements - -* Allow port numbers be be specified using a either a colon or a space by @RichardAH in https://github.com/XRPLF/rippled/pull/4328 -* Eliminate memory allocation from critical path: by @nbougalis in https://github.com/XRPLF/rippled/pull/4353 -* Make it easy for projects to depend on libxrpl by @thejohnfreeman in https://github.com/XRPLF/rippled/pull/4449 -* Add the ability to mark amendments as obsolete by @ximinez in https://github.com/XRPLF/rippled/pull/4291 -* Always create the FeeSettings object in genesis ledger by @ximinez in https://github.com/XRPLF/rippled/pull/4319 -* Log exception messages in several locations by @drlongle in https://github.com/XRPLF/rippled/pull/4400 -* Parse flags in account_info method by @drlongle in https://github.com/XRPLF/rippled/pull/4459 -* Add NFTokenPages to account_objects RPC by @RichardAH in https://github.com/XRPLF/rippled/pull/4352 -* add jss fields used by clio `nft_info` by @ledhed2222 in https://github.com/XRPLF/rippled/pull/4320 -* Introduce a slab-based memory allocator and optimize SHAMapItem by @nbougalis in https://github.com/XRPLF/rippled/pull/4218 -* Add NetworkID field to transactions to help prevent replay attacks on and from side-chains by @RichardAH in https://github.com/XRPLF/rippled/pull/4370 -* If present, set quorum based on command line. by @mtrippled in https://github.com/XRPLF/rippled/pull/4489 -* API does not accept seed or public key for account by @drlongle in https://github.com/XRPLF/rippled/pull/4404 -* Add `nftoken_id`, `nftoken_ids` and `offer_id` meta fields into NFT `Tx` responses by @shawnxie999 in https://github.com/XRPLF/rippled/pull/4447 - -### Bug Fixes - -* fix(gateway_balances): handle overflow exception by @RichardAH in https://github.com/XRPLF/rippled/pull/4355 -* fix(ValidatorSite): handle rare null pointer dereference in timeout by @ximinez in https://github.com/XRPLF/rippled/pull/4420 -* RPC commands understand markers derived from all ledger object types by @ximinez in https://github.com/XRPLF/rippled/pull/4361 -* `fixNFTokenRemint`: prevent NFT re-mint: by @shawnxie999 in https://github.com/XRPLF/rippled/pull/4406 -* Fix a case where ripple::Expected returned a json array, not a value by @scottschurr in https://github.com/XRPLF/rippled/pull/4401 -* fix: Ledger data returns an empty list (instead of null) when all entries are filtered out by @drlongle in https://github.com/XRPLF/rippled/pull/4398 -* Fix unit test ripple.app.LedgerData by @drlongle in https://github.com/XRPLF/rippled/pull/4484 -* Fix the fix for std::result_of by @thejohnfreeman in https://github.com/XRPLF/rippled/pull/4496 -* Fix errors for Clang 16 by @thejohnfreeman in https://github.com/XRPLF/rippled/pull/4501 -* Ensure that switchover vars are initialized before use: by @seelabs in https://github.com/XRPLF/rippled/pull/4527 -* Move faulty assert by @ximinez in https://github.com/XRPLF/rippled/pull/4533 -* Fix unaligned load and stores: (#4528) by @seelabs in https://github.com/XRPLF/rippled/pull/4531 -* fix node size estimation by @dangell7 in https://github.com/XRPLF/rippled/pull/4536 -* fix: remove redundant moves by @ckeshava in https://github.com/XRPLF/rippled/pull/4565 - -### Code Cleanup and Testing - -* Replace compare() with the three-way comparison operator in base_uint, Issue and Book by @drlongle in https://github.com/XRPLF/rippled/pull/4411 -* Rectify the import paths of boost::function_output_iterator by @ckeshava in https://github.com/XRPLF/rippled/pull/4293 -* Expand Linux test matrix by @thejohnfreeman in https://github.com/XRPLF/rippled/pull/4454 -* Add patched recipe for SOCI by @thejohnfreeman in https://github.com/XRPLF/rippled/pull/4510 -* Switch to self-hosted runners for macOS by @thejohnfreeman in https://github.com/XRPLF/rippled/pull/4511 -* [TRIVIAL] Add missing includes by @seelabs in https://github.com/XRPLF/rippled/pull/4555 - -### Docs - -* Refactor build instructions by @thejohnfreeman in https://github.com/XRPLF/rippled/pull/4381 -* Add install instructions for package managers by @thejohnfreeman in https://github.com/XRPLF/rippled/pull/4472 -* Fix typo by @solmsted in https://github.com/XRPLF/rippled/pull/4508 -* Update environment.md by @sappenin in https://github.com/XRPLF/rippled/pull/4498 -* Update BUILD.md by @oeggert in https://github.com/XRPLF/rippled/pull/4514 -* Trivial: add comments for NFToken-related invariants by @scottschurr in https://github.com/XRPLF/rippled/pull/4558 - -## New Contributors -* @drlongle made their first contribution in https://github.com/XRPLF/rippled/pull/4411 -* @ckeshava made their first contribution in https://github.com/XRPLF/rippled/pull/4293 -* @solmsted made their first contribution in https://github.com/XRPLF/rippled/pull/4508 -* @sappenin made their first contribution in https://github.com/XRPLF/rippled/pull/4498 -* @oeggert made their first contribution in https://github.com/XRPLF/rippled/pull/4514 - -**Full Changelog**: https://github.com/XRPLF/rippled/compare/1.10.1...1.11.0 - - -### GitHub - -The public source code repository for `rippled` is hosted on GitHub at . - -We welcome all contributions and invite everyone to join the community of XRP Ledger developers to help build the Internet of Value. - -### Credits - -The following people contributed directly to this release: -- Alloy Networks <45832257+alloynetworks@users.noreply.github.com> -- Brandon Wilson -- Chenna Keshava B S <21219765+ckeshava@users.noreply.github.com> -- David Fuelling -- Denis Angell -- Ed Hennis -- Elliot Lee -- John Freeman -- Mark Travis -- Nik Bougalis -- RichardAH -- Scott Determan -- Scott Schurr -- Shawn Xie <35279399+shawnxie999@users.noreply.github.com> -- drlongle -- ledhed2222 -- oeggert <117319296+oeggert@users.noreply.github.com> -- solmsted - - -Bug Bounties and Responsible Disclosures: -We welcome reviews of the rippled code and urge researchers to -responsibly disclose any issues they may find. - -To report a bug, please send a detailed report to: - - bugs@xrpl.org - - -# Introducing XRP Ledger version 1.10.1 - -Version 1.10.1 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. This release restores packages for Ubuntu 18.04. - -Compared to version 1.10.0, the only C++ code change fixes an edge case in Reporting Mode. - -If you are already running version 1.10.0, then upgrading to version 1.10.1 is generally not required. - -[Sign Up for Future Release Announcements](https://groups.google.com/g/ripple-server) - - - -## Install / Upgrade - -On supported platforms, see the [instructions on installing or updating `rippled`](https://xrpl.org/install-rippled.html). - -## Changelog - -- [`da18c86cbf`](https://github.com/ripple/rippled/commit/da18c86cbfea1d8fe6940035f9103e15890d47ce) Build packages with Ubuntu 18.04 -- [`f7b3ddd87b`](https://github.com/ripple/rippled/commit/f7b3ddd87b8ef093a06ab1420bea57ed1e77643a) Reporting Mode: Do not attempt to acquire missing data from peer network (#4458) - -### GitHub - -The public source code repository for `rippled` is hosted on GitHub at . - -We welcome all contributions and invite everyone to join the community of XRP Ledger developers to help build the Internet of Value. - -### Credits - -The following people contributed directly to this release: - -- John Freeman -- Mark Travis -- Michael Legleux - -Bug Bounties and Responsible Disclosures: -We welcome reviews of the rippled code and urge researchers to -responsibly disclose any issues they may find. - -To report a bug, please send a detailed report to: - - bugs@xrpl.org - - -# Introducing XRP Ledger version 1.10.0 - -Version 1.10.0 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. This release introduces six new amendments, detailed below, and cleans up code to improve performance. - -[Sign Up for Future Release Announcements](https://groups.google.com/g/ripple-server) - - - -## Action Required - -Six new amendments are now open for voting according to the XRP Ledger's [amendment process](https://xrpl.org/amendments.html), which enables protocol changes following two weeks of >80% support from trusted validators. - -If you operate an XRP Ledger server, upgrade to version 1.10.0 by March 21 to ensure service continuity. The exact time that protocol changes take effect depends on the voting decisions of the decentralized network. - - -## Install / Upgrade - -On supported platforms, see the [instructions on installing or updating `rippled`](https://xrpl.org/install-rippled.html). - - -## New Amendments - -- **`featureImmediateOfferKilled`**: Changes the response code of an `OfferCreate` transaction with the `tfImmediateOrCancel` flag to return `tecKILLED` when no funds are moved. The previous return code of `tecSUCCESS` was unintuitive. [#4157](https://github.com/XRPLF/rippled/pull/4157) - -- **`featureDisallowIncoming`**: Enables an account to block incoming checks, payment channels, NFToken offers, and trust lines. [#4336](https://github.com/XRPLF/rippled/pull/4336) - -- **`featureXRPFees`**: Simplifies transaction cost calculations to use XRP directly, rather than calculating indirectly in "fee units" and translating the results to XRP. Updates all instances of "fee units" in the protocol and ledger data to be drops of XRP instead. [#4247](https://github.com/XRPLF/rippled/pull/4247) - -- **`fixUniversalNumber`**: Simplifies and unifies the code for decimal floating point math. In some cases, this provides slightly better accuracy than the previous code, resulting in calculations whose least significant digits are different than when calculated with the previous code. The different results may cause other edge case differences where precise calculations are used, such as ranking of offers or processing of payments that use several different paths. [#4192](https://github.com/XRPLF/rippled/pull/4192) - -- **`fixNonFungibleTokensV1_2`**: This amendment is a combination of NFToken fixes. [#4417](https://github.com/XRPLF/rippled/pull/4417) - - Fixes unburnable NFTokens when it has over 500 offers. [#4346](https://github.com/XRPLF/rippled/pull/4346) - - Fixes 3 NFToken offer acceptance issues. [#4380](https://github.com/XRPLF/rippled/pull/4380) - - Prevents brokered sales of NFTokens to owners. [#4403](https://github.com/XRPLF/rippled/pull/4403) - - Only allows the destination to settle NFToken offers through brokerage. [#4399](https://github.com/XRPLF/rippled/pull/4399) - -- **`fixTrustLinesToSelf`**: Trust lines must be between two different accounts, but two exceptions exist because of a bug that briefly existed. This amendment removes those trust lines. [69bb2be](https://github.com/XRPLF/rippled/pull/4270/commits/69bb2be446e3cc24c694c0835b48bd2ecd3d119e) - - -## Changelog - - -### New Features and Improvements - -- **Improve Handshake in the peer protocol**: Switched to using a cryptographically secure PRNG for the Instance Cookie. `rippled` now uses hex encoding for the `Closed-Ledger` and `Previous-Ledger` fields in the Handshake. Also added `--newnodeid` and `--nodeid` command line options. [5a15229](https://github.com/XRPLF/rippled/pull/4270/commits/5a15229eeb13b69c8adf1f653b88a8f8b9480546) - -- **RPC tooBusy response now has 503 HTTP status code**: Added ripplerpc 3.0, enabling RPC tooBusy responses to return relevant HTTP status codes. This is a non-breaking change that only applies to JSON-RPC when you include `"ripplerpc": "3.0"` in the request. [#4143](https://github.com/XRPLF/rippled/pull/4143) - -- **Use the Conan package manager**: Added a `conanfile.py` and Conan recipe for Snappy. Removed the RocksDB recipe from the repo; you can now get it from Conan Center. [#4367](https://github.com/XRPLF/rippled/pull/4367), [c2b03fe](https://github.com/XRPLF/rippled/commit/c2b03fecca19a304b37467b01fa78593d3dce3fb) - -- **Update Build Instructions**: Updated the build instructions to build with the Conan package manager and restructured info for easier comprehension. [#4376](https://github.com/XRPLF/rippled/pull/4376), [#4383](https://github.com/XRPLF/rippled/pull/4383) - -- **Revise CONTRIBUTING**: Updated code contribution guidelines. `rippled` is an open source project and contributions are very welcome. [#4382](https://github.com/XRPLF/rippled/pull/4382) - -- **Update documented pathfinding configuration defaults**: `417cfc2` changed the default Path Finding configuration values, but missed updating the values documented in rippled-example.cfg. Updated those defaults and added recommended values for nodes that want to support advanced pathfinding. [#4409](https://github.com/XRPLF/rippled/pull/4409) - -- **Remove gRPC code previously used for the Xpring SDK**: Removed gRPC code used for the Xpring SDK. The gRPC API is also enabled locally by default in `rippled-example.cfg`. This API is used for [Reporting Mode](https://xrpl.org/build-run-rippled-in-reporting-mode.html) and [Clio](https://github.com/XRPLF/clio). [28f4cc7](https://github.com/XRPLF/rippled/pull/4321/commits/28f4cc7817c2e477f0d7e9ade8f07a45ff2b81f1) - -- **Switch from C++17 to C++20**: Updated `rippled` to use C++20. [92d35e5](https://github.com/XRPLF/rippled/pull/4270/commits/92d35e54c7de6bbe44ff6c7c52cc0765b3f78258) - -- **Support for Boost 1.80.0:**: [04ef885](https://github.com/XRPLF/rippled/pull/4321/commits/04ef8851081f6ee9176783ad3725960b8a931ebb) - -- **Reduce default reserves to 10/2**: Updated the hard-coded default reserves to match the current settings on Mainnet. [#4329](https://github.com/XRPLF/rippled/pull/4329) - -- **Improve self-signed certificate generation**: Improved speed and security of TLS certificate generation on fresh startup. [0ecfc7c](https://github.com/XRPLF/rippled/pull/4270/commits/0ecfc7cb1a958b731e5f184876ea89ae2d4214ee) - - -### Bug Fixes - -- **Update command-line usage help message**: Added `manifest` and `validator_info` to the `rippled` CLI usage statement. [b88ed5a](https://github.com/XRPLF/rippled/pull/4270/commits/b88ed5a8ec2a0735031ca23dc6569d54787dc2f2) - -- **Work around gdb bug by changing a template parameter**: Added a workaround for a bug in gdb, where unsigned template parameters caused issues with RTTI. [#4332](https://github.com/XRPLF/rippled/pull/4332) - -- **Fix clang 15 warnings**: [#4325](https://github.com/XRPLF/rippled/pull/4325) - -- **Catch transaction deserialization error in doLedgerGrpc**: Fixed an issue in the gRPC API, so `Clio` can extract ledger headers and state objects from specific transactions that can't be deserialized by `rippled` code. [#4323](https://github.com/XRPLF/rippled/pull/4323) - -- **Update dependency: gRPC**: New Conan recipes broke the old version of gRPC, so the dependency was updated. [#4407](https://github.com/XRPLF/rippled/pull/4407) - -- **Fix Doxygen workflow**: Added options to build documentation that don't depend on the library dependencies of `rippled`. [#4372](https://github.com/XRPLF/rippled/pull/4372) - -- **Don't try to read SLE with key 0 from the ledger**: Fixed the `preclaim` function to check for 0 in `NFTokenSellOffer` and `NFTokenBuyOffer` before calling `Ledger::read`. This issue only affected debug builds. [#4351](https://github.com/XRPLF/rippled/pull/4351) - -- **Update broken link to hosted Doxygen content**: [5e1cb09](https://github.com/XRPLF/rippled/pull/4270/commits/5e1cb09b8892e650f6c34a66521b6b1673bd6b65) - - -### Code Cleanup - -- **Prevent unnecessary `shared_ptr` copies by accepting a value in `SHAMapInnerNode::setChild`**: [#4266](https://github.com/XRPLF/rippled/pull/4266) - -- **Release TaggedCache object memory outside the lock**: [3726f8b](https://github.com/XRPLF/rippled/pull/4321/commits/3726f8bf31b3eab8bab39dce139656fd705ae9a0) - -- **Rename SHAMapStoreImp::stopping() to healthWait()**: [7e9e910](https://github.com/XRPLF/rippled/pull/4321/commits/7e9e9104eabbf0391a0837de5630af17a788e233) - -- **Improve wrapper around OpenSSL RAND**: [7b3507b](https://github.com/XRPLF/rippled/pull/4270/commits/7b3507bb873495a974db33c57a888221ddabcacc) - -- **Improve AccountID string conversion caching**: Improved memory cache usage. [e2eed96](https://github.com/XRPLF/rippled/pull/4270/commits/e2eed966b0ecb6445027e6a023b48d702c5f4832) - -- **Build the command map at compile time**: [9aaa0df](https://github.com/XRPLF/rippled/pull/4270/commits/9aaa0dff5fd422e5f6880df8e20a1fd5ad3b4424) - -- **Avoid unnecessary copying and dynamic memory allocations**: [d318ab6](https://github.com/XRPLF/rippled/pull/4270/commits/d318ab612adc86f1fd8527a50af232f377ca89ef) - -- **Use constexpr to check memo validity**: [e67f905](https://github.com/XRPLF/rippled/pull/4270/commits/e67f90588a9050162881389d7e7d1d0fb31066b0) - -- **Remove charUnHex**: [83ac141](https://github.com/XRPLF/rippled/pull/4270/commits/83ac141f656b1a95b5661853951ebd95b3ffba99) - -- **Remove deprecated AccountTxOld.cpp**: [ce64f7a](https://github.com/XRPLF/rippled/pull/4270/commits/ce64f7a90f99c6b5e68d3c3d913443023de061a6) - -- **Remove const_cast usage**: [23ce431](https://github.com/XRPLF/rippled/pull/4321/commits/23ce4318768b718c82e01004d23f1abc9a9549ff) - -- **Remove inaccessible code paths and outdated data format wchar_t**: [95fabd5](https://github.com/XRPLF/rippled/pull/4321/commits/95fabd5762a4917753c06268192e4d4e4baef8e4) - -- **Improve move semantics in Expected**: [#4326](https://github.com/XRPLF/rippled/pull/4326) - - -### GitHub - -The public source code repository for `rippled` is hosted on GitHub at . - -We welcome all contributions and invite everyone to join the community of XRP Ledger developers to help build the Internet of Value. - -### Credits - -The following people contributed directly to this release: - -- Alexander Kremer -- Alloy Networks <45832257+alloynetworks@users.noreply.github.com> -- CJ Cobb <46455409+cjcobb23@users.noreply.github.com> -- Chenna Keshava B S -- Crypto Brad Garlinghouse -- Denis Angell -- Ed Hennis -- Elliot Lee -- Gregory Popovitch -- Howard Hinnant -- J. Scott Branson <18340247+crypticrabbit@users.noreply.github.com> -- John Freeman -- ledhed2222 -- Levin Winter <33220502+levinwinter@users.noreply.github.com> -- manojsdoshi -- Nik Bougalis -- RichardAH -- Scott Determan -- Scott Schurr -- Shawn Xie <35279399+shawnxie999@users.noreply.github.com> - -Security Bug Bounty Acknowledgements: -- Aaron Hook -- Levin Winter - -Bug Bounties and Responsible Disclosures: -We welcome reviews of the rippled code and urge researchers to -responsibly disclose any issues they may find. - -To report a bug, please send a detailed report to: - - bugs@xrpl.org - - -# Introducing XRP Ledger version 1.9.4 - -Version 1.9.4 of `rippled`, the reference implementation of the XRP Ledger protocol is now available. This release introduces an amendment that removes the ability for an NFT issuer to indicate that trust lines should be automatically created for royalty payments from secondary sales of NFTs, in response to a bug report that indicated how this functionality could be abused to mount a denial of service attack against the issuer. - -## Action Required - -This release introduces a new amendment to the XRP Ledger protocol, **`fixRemoveNFTokenAutoTrustLine`** to mitigate a potential denial-of-service attack against NFT issuers that minted NFTs and allowed secondary trading of those NFTs to create trust lines for any asset. - -This amendment is open for voting according to the XRP Ledger's [amendment process](https://xrpl.org/amendments.html), which enables protocol changes following two weeks of >80% support from trusted validators. - -If you operate an XRP Ledger server, then you should upgrade to version 1.9.4 within two weeks, to ensure service continuity. The exact time that protocol changes take effect depends on the voting decisions of the decentralized network. - -For more information about NFTs on the XRP Ledger, see [NFT Conceptual Overview](https://xrpl.org/nft-conceptual-overview.html). - - -## Install / Upgrade - -On supported platforms, see the [instructions on installing or updating `rippled`](https://xrpl.org/install-rippled.html). - -## Changelog - -## Contributions - -The primary change in this release is the following bug fix: - -- **Introduce fixRemoveNFTokenAutoTrustLine amendment**: Introduces the `fixRemoveNFTokenAutoTrustLine` amendment, which disables the `tfTrustLine` flag, which a malicious attacker could exploit to mount denial-of-service attacks against NFT issuers that specified the flag on their NFTs. ([#4301](https://github.com/XRPLF/rippled/4301)) - - -### GitHub - -The public source code repository for `rippled` is hosted on GitHub at . - -We welcome all contributions and invite everyone to join the community of XRP Ledger developers and help us build the Internet of Value. - -### Credits - -The following people contributed directly to this release: - -- Scott Schurr -- Howard Hinnant -- Scott Determan -- Ikko Ashimine - - -# Introducing XRP Ledger version 1.9.3 - -Version 1.9.3 of `rippled`, the reference server implementation of the XRP Ledger protocol is now available. This release corrects minor technical flaws with the code that loads configured amendment votes after a startup and the copy constructor of `PublicKey`. - -## Install / Upgrade - -On supported platforms, see the [instructions on installing or updating `rippled`](https://xrpl.org/install-rippled.html). - -## Changelog - -## Contributions - -This release contains the following bug fixes: - -- **Change by-value to by-reference to persist vote**: A minor technical flaw, caused by use of a copy instead of a reference, resulted in operator-configured "yes" votes to not be properly loaded after a restart. ([#4256](https://github.com/XRPLF/rippled/pull/4256)) -- **Properly handle self-assignment of PublicKey**: The `PublicKey` copy assignment operator mishandled the case where a `PublicKey` would be assigned to itself, and could result in undefined behavior. - -### GitHub - -The public source code repository for `rippled` is hosted on GitHub at . - -We welcome contributions, big and small, and invite everyone to join the community of XRP Ledger developers and help us build the Internet of Value. - -### Credits - -The following people contributed directly to this release: - -- Howard Hinnant -- Crypto Brad Garlinghouse -- Wo Jake <87929946+wojake@users.noreply.github.com> - - -# Introducing XRP Ledger version 1.9.2 - -Version 1.9.2 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. This release includes several fixes and improvements, including a second new fix amendment to correct a bug in Non-Fungible Tokens (NFTs) code, a new API method for order book changes, less noisy logging, and other small fixes. - - - - -## Action Required - -This release introduces a two new amendments to the XRP Ledger protocol. The first, **fixNFTokenNegOffer**, fixes a bug in code associated with the **NonFungibleTokensV1** amendment, originally introduced in [version 1.9.0](https://xrpl.org/blog/2022/rippled-1.9.0.html). The second, **NonFungibleTokensV1_1**, is a "roll-up" amendment that enables the **NonFungibleTokensV1** feature plus the two fix amendments associated with it, **fixNFTokenDirV1** and **fixNFTokenNegOffer**. - -If you want to enable NFT code on the XRP Ledger Mainnet, you can vote in favor of only the **NonFungibleTokensV1_1** amendment to support enabling the feature and fixes together, without risk that the unfixed NFT code may become enabled first. - -These amendments are now open for voting according to the XRP Ledger's [amendment process](https://xrpl.org/amendments.html), which enables protocol changes following two weeks of >80% support from trusted validators. - -If you operate an XRP Ledger server, then you should upgrade to version 1.9.2 within two weeks, to ensure service continuity. The exact time that protocol changes take effect depends on the voting decisions of the decentralized network. - -For more information about NFTs on the XRP Ledger, see [NFT Conceptual Overview](https://xrpl.org/nft-conceptual-overview.html). - -## Install / Upgrade - -On supported platforms, see the [instructions on installing or updating `rippled`](https://xrpl.org/install-rippled.html). - -## Changelog - -This release contains the following features and improvements. - -- **Introduce fixNFTokenNegOffer amendment.** This amendment fixes a bug in the Non-Fungible Tokens (NFTs) functionality provided by the NonFungibleTokensV1 amendment (not currently enabled on Mainnet). The bug allowed users to place offers to buy tokens for negative amounts of money when using Brokered Mode. Anyone who accepted such an offer would transfer the token _and_ pay money. This amendment explicitly disallows offers to buy or sell NFTs for negative amounts of money, and returns an appropriate error code. This also corrects the error code returned when placing offers to buy or sell NFTs for negative amounts in Direct Mode. ([8266d9d](https://github.com/XRPLF/rippled/commit/8266d9d598d19f05e1155956b30ca443c27e119e)) -- **Introduce `NonFungibleTokensV1_1` amendment.** This amendment encompasses three NFT-related amendments: the original NonFungibleTokensV1 amendment (from version 1.9.0), the fixNFTokenDirV1 amendment (from version 1.9.1), and the new fixNFTokenNegOffer amendment from this release. This amendment contains no changes other than enabling those three amendments together; this allows validators to vote in favor of _only_ enabling the feature and fixes at the same time. ([59326bb](https://github.com/XRPLF/rippled/commit/59326bbbc552287e44b3a0d7b8afbb1ddddb3e3b)) -- **Handle invalid port numbers.** If the user specifies a URL with an invalid port number, the server would silently attempt to use port 0 instead. Now it raises an error instead. This affects admin API methods and config file parameters for downloading history shards and specifying validator list sites. ([#4213](https://github.com/XRPLF/rippled/pull/4213)) -- **Reduce log noisiness.** Decreased the severity of benign log messages in several places: "addPathsForType" messages during regular operation, expected errors during unit tests, and missing optional documentation components when compiling from source. ([#4178](https://github.com/XRPLF/rippled/pull/4178), [#4166](https://github.com/XRPLF/rippled/pull/4166), [#4180](https://github.com/XRPLF/rippled/pull/4180)) -- **Fix race condition in history shard implementation and support clang's ThreadSafetyAnalysis tool.** Added build settings so that developers can use this feature of the clang compiler to analyze the code for correctness, and fix an error found by this tool, which was the source of rare crashes in unit tests. ([#4188](https://github.com/XRPLF/rippled/pull/4188)) -- **Prevent crash when rotating a database with missing data.** When rotating databases, a missing entry could cause the server to crash. While there should never be a missing database entry, this change keeps the server running by aborting database rotation. ([#4182](https://github.com/XRPLF/rippled/pull/4182)) -- **Fix bitwise comparison in OfferCreate.** Fixed an expression that incorrectly used a bitwise comparison for two boolean values rather than a true boolean comparison. The outcome of the two comparisons is equivalent, so this is not a transaction processing change, but the bitwise comparison relied on compilers to implicitly fix the expression. ([#4183](https://github.com/XRPLF/rippled/pull/4183)) -- **Disable cluster timer when not in a cluster.** Disabled a timer that was unused on servers not running in clustered mode. The functionality of clustered servers is unchanged. ([#4173](https://github.com/XRPLF/rippled/pull/4173)) -- **Limit how often to process peer discovery messages.** In the peer-to-peer network, servers periodically share IP addresses of their peers with each other to facilitate peer discovery. It is not necessary to process these types of messages too often; previously, the code tracked whether it needed to process new messages of this type but always processed them anyway. With this change, the server no longer processes peer discovery messages if it has done so recently. ([#4202](https://github.com/XRPLF/rippled/pull/4202)) -- **Improve STVector256 deserialization.** Optimized the processing of this data type in protocol messages. This data type is used in several types of ledger entry that are important for bookkeeping, including directory pages that track other ledger types, amendments tracking, and the ledger hashes history. ([#4204](https://github.com/XRPLF/rippled/pull/4204)) -- **Fix and refactor spinlock code.** The spinlock code, which protects the `SHAMapInnerNode` child lists, had a mistake that allowed the same child to be repeatedly locked under some circumstances. Fixed this bug and improved the spinlock code to make it easier to use correctly and easier to verify that the code works correctly. ([#4201](https://github.com/XRPLF/rippled/pull/4201)) -- **Improve comments and contributor documentation.** Various minor documentation changes including some to reflect the fact that the source code repository is now owned by the XRP Ledger Foundation. ([#4214](https://github.com/XRPLF/rippled/pull/4214), [#4179](https://github.com/XRPLF/rippled/pull/4179), [#4222](https://github.com/XRPLF/rippled/pull/4222)) -- **Introduces a new API book_changes to provide information in a format that is useful for building charts that highlight DEX activity at a per-ledger level.** ([#4212](https://github.com/XRPLF/rippled/pull/4212)) - -## Contributions - -### GitHub - -The public source code repository for `rippled` is hosted on GitHub at . - -We welcome contributions, big and small, and invite everyone to join the community of XRP Ledger developers and help us build the Internet of Value. - -### Credits - -The following people contributed directly to this release: - -- Chenna Keshava B S -- Ed Hennis -- Ikko Ashimine -- Nik Bougalis -- Richard Holland -- Scott Schurr -- Scott Determan - -For a real-time view of all lifetime contributors, including links to the commits made by each, please visit the "Contributors" section of the GitHub repository: . - -# Introducing XRP Ledger version 1.9.1 - -Version 1.9.1 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. This release includes several important fixes, including a fix for a syncing issue from 1.9.0, a new fix amendment to correct a bug in the new Non-Fungible Tokens (NFTs) code, and a new amendment to allow multi-signing by up to 32 signers. - - - - -## Action Required - -This release introduces two new amendments to the XRP Ledger protocol. These amendments are now open for voting according to the XRP Ledger's [amendment process](https://xrpl.org/amendments.html), which enables protocol changes following two weeks of >80% support from trusted validators. - -If you operate an XRP Ledger server, then you should upgrade to version 1.9.1 within two weeks, to ensure service continuity. The exact time that protocol changes take effect depends on the voting decisions of the decentralized network. - -The **fixNFTokenDirV1** amendment fixes a bug in code associated with the **NonFungibleTokensV1** amendment, so the fixNFTokenDirV1 amendment should be enabled first. All validator operators are encouraged to [configure amendment voting](https://xrpl.org/configure-amendment-voting.html) to oppose the NonFungibleTokensV1 amendment until _after_ the fixNFTokenDirV1 amendment has become enabled. For more information about NFTs on the XRP Ledger, see [NFT Conceptual Overview](https://xrpl.org/nft-conceptual-overview.html). - -The **ExpandedSignerList** amendment extends the ledger's built-in multi-signing functionality so that each list can contain up to 32 entries instead of the current limit of 8. Additionally, this amendment allows each signer to have an arbitrary 256-bit data field associated with it. This data can be used to identify the signer or provide other metadata that is useful for organizations, smart contracts, or other purposes. - -## Install / Upgrade - -On supported platforms, see the [instructions on installing or updating `rippled`](https://xrpl.org/install-rippled.html). - -## Changelog - -This release contains the following features and improvements. - -## New Features and Amendments - -- **Introduce fixNFTokenDirV1 Amendment** - This amendment fixes an off-by-one error that occurred in some corner cases when determining which `NFTokenPage` an `NFToken` object belongs on. It also adjusts the constraints of `NFTokenPage` invariant checks, so that certain error cases fail with a suitable error code such as `tecNO_SUITABLE_TOKEN_PAGE` instead of failing with a `tecINVARIANT_FAILED` error code. ([#4155](https://github.com/ripple/rippled/pull/4155)) - -- **Introduce ExpandedSignerList Amendment** - This amendment expands the maximum signer list size to 32 entries and allows each signer to have an optional 256-bit `WalletLocator` field containing arbitrary data. ([#4097](https://github.com/ripple/rippled/pull/4097)) - -- **Pause online deletion rather than canceling it if the server fails health check** - The server stops performing online deletion of old ledger history if the server fails its internal health check during this time. Online deletion can now resume after the server recovers, rather than having to start over. ([#4139](https://github.com/ripple/rippled/pull/4139)) - - -## Bug Fixes and Performance Improvements - -- **Fix performance issues introduced in 1.9.0** - Readjusts some parameters of the ledger acquisition engine to revert some changes introduced in 1.9.0 that had adverse effects on some systems, including causing some systems to fail to sync to the network. ([#4152](https://github.com/ripple/rippled/pull/4152)) - -- **Improve Memory Efficiency of Path Finding** - Finding paths for cross-currency payments is a resource-intensive operation. While that remains true, this fix improves memory usage of pathfinding by discarding trust line results that cannot be used before those results are fully loaded or cached. ([#4111](https://github.com/ripple/rippled/pull/4111)) - -- **Fix incorrect CMake behavior on Windows when platform is unspecified or x64** - Fixes handling of platform selection when using the cmake-gui tool to build on Windows. The generator expects `Win64` but the GUI only provides `x64` as an option, which raises an error. This fix only raises an error if the platform is `Win32` instead, allowing the generation of solution files to succeed. ([#4150](https://github.com/ripple/rippled/pull/4150)) - -- **Fix test failures with newer MSVC compilers on Windows** - Fixes some cases where the API handler code used string pointer comparisons, which may not work correctly with some versions of the MSVC compiler. ([#4149](https://github.com/ripple/rippled/pull/4149)) - -- **Update minimum Boost version to 1.71.0** - This release is compatible with Boost library versions 1.71.0 through 1.77.0. The build configuration and documentation have been updated to reflect this. ([#4134](https://github.com/ripple/rippled/pull/4134)) - -- **Fix unit test failures for DatabaseDownloader** - Increases a timeout in the `DatabaseDownloader` code and adjusts unit tests so that the code does not return spurious failures, and more data is logged if it does fail. ([#4021](https://github.com/ripple/rippled/pull/4021)) - -- **Refactor relational database interface** - Improves code comments, naming, and organization of the module that interfaces with relational databases (such as the SQLite database used for tracking transaction history). ([#3965](https://github.com/ripple/rippled/pull/3965)) - - -## Contributions - -### GitHub - -The public source code repository for `rippled` is hosted on GitHub at . - -We welcome contributions, big and small, and invite everyone to join the community of XRP Ledger developers and help us build the Internet of Value. - - -### Credits - -The following people contributed directly to this release: - -- Devon White -- Ed Hennis -- Gregory Popovitch -- Mark Travis -- Manoj Doshi -- Nik Bougalis -- Richard Holland -- Scott Schurr - -For a real-time view of all lifetime contributors, including links to the commits made by each, please visit the "Contributors" section of the GitHub repository: . - -We welcome external contributions and are excited to see the broader XRP Ledger community continue to grow and thrive. - - -# Change log - -- API version 2 will now return `signer_lists` in the root of the `account_info` response, no longer nested under `account_data`. - -# Releases - -## Version 1.9.0 -This is the 1.9.0 release of `rippled`, the reference implementation of the XRP Ledger protocol. This release brings several features and improvements. - -### New and Improved Features -- **Introduce NFT support (XLS020):** This release introduces support for non-fungible tokens, currently available to the developer community for broader review and testing. Developers can create applications that allow users to mint, transfer, and ultimately burn (if desired) NFTs on the XRP Ledger. You can try out the new NFT transactions using the [nft-devnet](https://xrpl.org/xrp-testnet-faucet.html). Note that some fields and error codes from earlier releases of the supporting code have been refactored for this release, shown in the Code Refactoring section, below. [70779f](https://github.com/ripple/rippled/commit/70779f6850b5f33cdbb9cf4129bc1c259af0013e) - -- **Simplify the Job Queue:** This is a refactor aimed at cleaning up and simplifying the existing job queue. Currently, all jobs are canceled at the same time and in the same way, so this commit removes the unnecessary per-job cancellation token. [#3656](https://github.com/ripple/rippled/pull/3656) - -- **Optimize trust line caching:** The existing trust line caching code was suboptimal in that it stored redundant information, pinned SLEs into memory, and required multiple memory allocations per cached object. This commit eliminates redundant data, reduces the size of cached objects and unpinning SLEs from memory, and uses value types to avoid the need for `std::shared_ptr`. As a result of these changes, the effective size of a cached object includes the overhead of the memory allocator, and the `std::shared_ptr` should be reduced by at least 64 bytes. This is significant, as there can easily be tens of millions of these objects. [4d5459](https://github.com/ripple/rippled/commit/4d5459d041da8f5a349c5f458d664e5865e1f1b5) - -- **Incremental improvements to pathfinding memory usage:** This commit aborts background pathfinding when closed or disconnected, exits the pathfinding job thread if there are no requests left, does not create the path find a job if there are no requests, and refactors to remove the circular dependency between InfoSub and PathRequest. [#4111](https://github.com/ripple/rippled/pull/4111) - -- **Improve deterministic transaction sorting in TxQ:** This commit ensures that transactions with the same fee level are sorted by TxID XORed with the parent ledger hash, the TxQ is re-sorted after every ledger, and attempts to future-proof the TxQ tie-breaking test. [#4077](https://github.com/ripple/rippled/pull/4077) - -- **Improve stop signaling for Application:** [34ca45](https://github.com/ripple/rippled/commit/34ca45713244d0defc39549dd43821784b2a5c1d) - -- **Eliminate SHAMapInnerNode lock contention:** The `SHAMapInnerNode` class had a global mutex to protect the array of node children. Profiling suggested that around 4% of all attempts to lock the global would block. This commit removes that global mutex, and replaces it with a new per-node 16-way spinlock (implemented so as not to affect the size of an inner node object), effectively eliminating the lock contention. [1b9387](https://github.com/ripple/rippled/commit/1b9387eddc1f52165d3243d2ace9be0c62495eea) - -- **Improve ledger-fetching logic:** When fetching ledgers, the existing code would isolate the peer that sent the most useful responses, and issue follow-up queries only to that peer. This commit increases the query aggressiveness, and changes the mechanism used to select which peers to issue follow-up queries to so as to more evenly spread the load among those peers that provided useful responses. [48803a](https://github.com/ripple/rippled/commit/48803a48afc3bede55d71618c2ee38fd9dbfd3b0) - -- **Simplify and improve order book tracking:** The order book tracking code would use `std::shared_ptr` to track the lifetime of objects. This commit changes the logic to eliminate the overhead of `std::shared_ptr` by using value types, resulting in significant memory savings. [b9903b](https://github.com/ripple/rippled/commit/b9903bbcc483a384decf8d2665f559d123baaba2) - -- **Negative cache support for node store:** This commit allows the cache to service requests for nodes that were previously looked up but not found, reducing the need to perform I/O in several common scenarios. [3eb8aa](https://github.com/ripple/rippled/commit/3eb8aa8b80bd818f04c99cee2cfc243192709667) - -- **Improve asynchronous database handlers:** This commit optimizes the way asynchronous node store operations are processed, both by reducing the number of times locks are held and by minimizing the number of memory allocations and data copying. [6faaa9](https://github.com/ripple/rippled/commit/6faaa91850d6b2eb9fbf16c1256bf7ef11ac4646) - -- **Cleanup AcceptedLedger and AcceptedLedgerTx:** This commit modernizes the `AcceptedLedger` and `AcceptedLedgerTx` classes, reduces their memory footprint, and reduces unnecessary dynamic memory allocations. [8f5868](https://github.com/ripple/rippled/commit/8f586870917818133924bf2e11acab5321c2b588) - -### Code Refactoring - -This release includes name changes in the NFToken API for SFields, RPC return labels, and error codes for clarity and consistency. To refactor your code, migrate the names of these items to the new names as listed below. - -#### `SField` name changes: -* `TokenTaxon -> NFTokenTaxon` -* `MintedTokens -> MintedNFTokens` -* `BurnedTokens -> BurnedNFTokens` -* `TokenID -> NFTokenID` -* `TokenOffers -> NFTokenOffers` -* `BrokerFee -> NFTokenBrokerFee` -* `Minter -> NFTokenMinter` -* `NonFungibleToken -> NFToken` -* `NonFungibleTokens -> NFTokens` -* `BuyOffer -> NFTokenBuyOffer` -* `SellOffer -> NFTokenSellOffer` -* `OfferNode -> NFTokenOfferNode` - -#### RPC return labels -* `tokenid -> nft_id` -* `index -> nft_offer_index` - -#### Error codes -* `temBAD_TRANSFER_FEE -> temBAD_NFTOKEN_TRANSFER_FEE` -* `tefTOKEN_IS_NOT_TRANSFERABLE -> tefNFTOKEN_IS_NOT_TRANSFERABLE` -* `tecNO_SUITABLE_PAGE -> tecNO_SUITABLE_NFTOKEN_PAGE` -* `tecBUY_SELL_MISMATCH -> tecNFTOKEN_BUY_SELL_MISMATCH` -* `tecOFFER_TYPE_MISMATCH -> tecNFTOKEN_OFFER_TYPE_MISMATCH` -* `tecCANT_ACCEPT_OWN_OFFER -> tecCANT_ACCEPT_OWN_NFTOKEN_OFFER` - - -### Bug Fixes -- **Fix deletion of orphan node store directories:** Orphaned node store directories should only be deleted if the proper node store directories are confirmed to exist. [06e87e](https://github.com/ripple/rippled/commit/06e87e0f6add5b880d647e14ab3d950decfcf416) - -## Version 1.8.5 -This is the 1.8.5 release of `rippled`, the reference implementation of the XRP Ledger protocol. This release includes fixes and updates for stability and security, and improvements to build scripts. There are no user-facing API or protocol changes in this release. - -### Bug Fixes - -This release contains the following bug fixes and under-the-hood improvements: - -- **Correct TaggedPointer move constructor:** Fixes a bug in unused code for the TaggedPointer class. The old code would fail if a caller explicitly tried to remove a child that is not actually part of the node. (227a12d) - -- **Ensure protocol buffer prerequisites are present:** The build scripts and packages now properly handle Protobuf packages and various packages. Prior to this change, building on Ubuntu 21.10 Impish Indri would fail unless the `libprotoc-dev` package was installed. (e06465f) - -- **Improve handling of endpoints during peer discovery.** This hardens and improves handling of incoming messages on the peer protocol. (289bc0a) - -- **Run tests on updated linux distros:** Test builds now run on Rocky Linux 8, Fedora 34 and 35, Ubuntu 18, 20, and 22, and Debian 9, 10, and 11. (a9ee802) - -- **Avoid dereferencing empty optional in ReportingETL:** Fixes a bug in Reporting Mode that could dereference an empty optional value when throwing an error. (cdc215d) - -- **Correctly add GIT_COMMIT_HASH into version string:** When building the server from a non-tagged release, the build files now add the commit ID in a way that follows the semantic-versioning standard, and correctly handle the case where the commit hash ID cannot be retrieved. (d23d37f) - -- **Update RocksDB to version 6.27.3:** Updates the version of RocksDB included in the server from 6.7.3 (which was released on 2020-03-18) to 6.27.3 (released 2021-12-10). - - - -## Version 1.8.4 -This is the 1.8.4 release of `rippled`, the reference implementation of the XRP Ledger protocol. - -This release corrects a technical flaw introduced with 1.8.3 that may result in failures if the newly-introduced 'fast loading' is enabled. The release also adjusts default parameters used to configure the pathfinding engine to reduce resource usage. - -### Bug Fixes -- **Adjust mutex scope in `walkMapParallel`**: This commit corrects a technical flaw introduced with commit [7c12f0135897361398917ad2c8cda888249d42ae] that would result in undefined behavior if the server operator configured their server to use the 'fast loading' mechanism introduced with 1.8.3. - -- **Adjust pathfinding configuration defaults**: This commit adjusts the default configuration of the pathfinding engine, to account for the size of the XRP Ledger mainnet. Unless explicitly overriden, the changes mean that pathfinding operations will return fewer, shallower paths than previous releases. - - -## Version 1.8.3 -This is the 1.8.3 release of `rippled`, the reference implementation of the XRP Ledger protocol. - -This release implements changes that improve the syncing performance of peers on the network, adds countermeasures to several routines involving LZ4 to defend against CVE-2021-3520, corrects a minor technical flaw that would result in the server not using a cache for nodestore operations, and adjusts tunable values to optimize disk I/O. - -### Summary of Issues -Recently, servers in the XRP Ledger network have been taking an increasingly long time to sync back to the network after restartiningg. This is one of several releases which will be made to improve on this issue. - - -### Bug Fixes - -- **Parallel ledger loader & I/O performance improvements**: This commit makes several changes that, together, should decrease the time needed for a server to sync to the network. To make full use of this change, `rippled` needs to be using storage with high IOPS and operators need to explicitly enable this behavior by adding the following to their config file, under the `[node_db]` stanza: - - [node_db] - ... - fast_load=1 - -Note that when 'fast loading' is enabled the server will not open RPC and WebSocket interfaces until after the initial load is completed. Because of this, it may appear unresponsive or down. - -- **Detect CVE-2021-3520 when decompressing using LZ4**: This commit adds code to detect LZ4 payloads that may result in out-of-bounds memory accesses. - -- **Provide sensible default values for nodestore cache:**: The nodestore includes a built-in cache to reduce the disk I/O load but, by default, this cache was not initialized unless it was explicitly configured by the server operator. This commit introduces sensible defaults based on the server's configured node size. - -- **Adjust the number of concurrent ledger data jobs**: Processing a large amount of data at once can effectively bottleneck a server's I/O subsystem. This commits helps optimize I/O performance by controlling how many jobs can concurrently process ledger data. - -- **Two small SHAMapSync improvements**: This commit makes minor changes to optimize the way memory is used and control the amount of background I/O performed when attempting to fetch missing `SHAMap` nodes. - -## Version 1.8.2 -Ripple has released version 1.8.2 of rippled, the reference server implementation of the XRP Ledger protocol. This release addresses the full transaction queues and elevated transaction fees issue observed on the XRP ledger, and also provides some optimizations and small fixes to improve the server's performance overall. - -### Summary of Issues -Recently, servers in the XRP Ledger network have had full transaction queues and transactions paying low fees have mostly not been able to be confirmed through the queue. After investigation, it was discovered that a large influx of transactions to the network caused it to raise the transaction costs to be proposed in the next ledger block, and defer transactions paying lower costs to later ledgers. The first part worked as designed, but deferred transactions were not being confirmed as the ledger had capacity to process them. - -The root cause was that there were very many low-cost transactions that different servers in the network received in a different order due to incidental differences in timing or network topology, which caused validators to propose different sets of low-cost transactions from the queue. Since none of these transactions had support from a majority of validators, they were removed from the proposed transaction set. Normally, any transactions removed from a proposed transaction set are supposed to be retried in the next ledger, but servers attempted to put these deferred transactions into their transaction queues first, which had filled up. As a result, the deferred transactions were discarded, and the network was only able to confirm transactions that paid high costs. - -### Bug Fixes - -- **Address elevated transaction fees**: This change addresses the full queue problems in two ways. First, it puts deferred transactions directly into the open ledger, rather than transaction queue. This reverts a subset of the changes from [ximinez@62127d7](https://github.com/ximinez/rippled/commit/62127d725d801641bfaa61dee7d88c95e48820c5). A transaction that is in the open ledger but doesn't get validated should stay in the open ledger so that it can be proposed again right away. Second, it changes the order in which transactions are pulled from the transaction queue to increase the overlap in servers' initial transaction consensus proposals. Like the old rules, transactions paying higher fee levels are selected first. Unlike the old rules, transactions paying the same fee level are ordered by transaction ID / hash ascending. (Previously, transactions paying the same fee level were unsorted, resulting in each server having a different order.) - -- **Add ignore_default option to account_lines API**: This flag, if present, suppresses the output of incoming trust lines in the default state. This is primarily motivated by observing that users often have many unwanted incoming trust lines in a default state, which are not useful in the vast majority of cases. Being able to suppress those when doing `account_lines` saves bandwidth and resources. ([#3980](https://github.com/ripple/rippled/pull/3980)) - -- **Make I/O and prefetch worker threads configurable**: This commit adds the ability to specify **io_workers** and **prefetch_workers** in the config file which can be used to specify the number of threads for processing raw inbound and outbound IO and configure the number of threads for performing node store prefetching. ([#3994](https://github.com/ripple/rippled/pull/3994)) - -- **Enforce account RPC limits by objects traversed**: This changes the way the account_objects API method counts and limits the number of objects it returns. Instead of limiting results by the number of objects found, it counts by the number of objects traversed. Additionally, the default and maximum limits for non-admin connections have been decreased. This reduces the amount of work that one API call can do so that public API servers can share load more effectively. ([#4032](https://github.com/ripple/rippled/pull/4032)) - -- **Fix a crash on shutdown**: The NuDB backend class could throw an error in its destructor, resulting in a crash while the server was shutting down gracefully. This crash was harmless but resulted in false alarms and noise when tracking down other possible crashes. ([#4017](https://github.com/ripple/rippled/pull/4017)) - -- **Improve reporting of job queue in admin server_info**: The server_info command, when run with admin permissions, provides information about jobs in the server's job queue. This commit provides more descriptive names and more granular categories for many jobs that were previously all identified as "clientCommand". ([#4031](https://github.com/ripple/rippled/pull/4031)) - -- **Improve full & compressed inner node deserialization**: Remove a redundant copy operation from low-level SHAMap deserialization. ([#4004](https://github.com/ripple/rippled/pull/4004)) - -- **Reporting mode: only forward to P2P nodes that are synced**: Previously, reporting mode servers forwarded to any of their configured P2P nodes at random. This commit improves the selection so that it only chooses from P2P nodes that are fully synced with the network. ([#4028](https://github.com/ripple/rippled/pull/4028)) - -- **Improve handling of HTTP X-Forwarded-For and Forwarded headers**: Fixes the way the server handles IPv6 addresses in these HTTP headers. ([#4009](https://github.com/ripple/rippled/pull/4009), [#4030](https://github.com/ripple/rippled/pull/4030)) - -- **Other minor improvements to logging and Reporting Mode.** - - -## Version 1.8.0 -Ripple has released version 1.8.0 of rippled, the reference server implementation of the XRP Ledger protocol. This release brings several features and improvements. - -### New and Improved Features - -- **Improve History Sharding**: Shards of ledger history are now assembled in a deterministic way so that any server can make a binary-identical shard for a given range of ledgers. This makes it possible to retrieve a shard from multiple sources in parallel, then verify its integrity by comparing checksums with peers' checksums for the same shard. Additionally, there's a new admin RPC command to import ledger history from the shard store, and the crawl_shards command has been expanded with more info. ([#2688](https://github.com/ripple/rippled/issues/2688), [#3726](https://github.com/ripple/rippled/pull/3726), [#3875](https://github.com/ripple/rippled/pull/3875)) -- **New CheckCashMakesTrustLine Amendment**: If enabled, this amendment will change the CheckCash transaction type so that cashing a check for an issued token automatically creates a trust line to hold the token, similar to how purchasing a token in the decentralized exchange creates a trust line to hold the token. This change provides a way for issuers to send tokens to a user before that user has set up a trust line, but without forcing anyone to hold tokens they don't want. ([#3823](https://github.com/ripple/rippled/pull/3823)) -- **Automatically determine the node size**: The server now selects an appropriate `[node_size]` configuration value by default if it is not explicitly specified. This parameter tunes various settings to the specs of the hardware that the server is running on, especially the amount of RAM and the number of CPU threads available in the system. Previously the server always chose the smallest value by default. -- **Improve transaction relaying logic**: Previously, the server relayed every transaction to all its peers (except the one that it received the transaction from). To reduce redundant messages, the server now relays transactions to a subset of peers using a randomized algorithm. Peers can determine whether there are transactions they have not seen and can request them from a peer that has them. It is expected that this feature will further reduce the bandwidth needed to operate a server. -- **Improve the Byzantine validator detector**: This expands the detection capabilities of the Byzantine validation detector. Previously, the server only monitored validators on its own UNL. Now, the server monitors for Byzantine behavior in all validations it sees. -- **Experimental tx stream with history for sidechains**: Adds an experimental subscription stream for sidechain federators to track messages on the main chain in canonical order. This stream is expected to change or be replaced in future versions as work on sidechains matures. -- **Support Debian 11 Bullseye**: This is the first release that is compatible with Debian Linux version 11.x, "Bullseye." The .deb packages now use absolute paths only, for compatibility with Bullseye's stricter package requirements. ([#3909](https://github.com/ripple/rippled/pull/3909)) -- **Improve Cache Performance**: The server uses a new storage structure for several in-memory caches for greatly improved overall performance. The process of purging old data from these caches, called "sweeping", was time-consuming and blocked other important activities necessary for maintaining ledger state and participating in consensus. The new structure divides the caches into smaller partitions that can be swept in parallel. -- **Amendment default votes:** Introduces variable default votes per amendment. Previously the server always voted "yes" on any new amendment unless an admin explicitly configured a voting preference for that amendment. Now the server's default vote can be "yes" or "no" in the source code. This should allow a safer, more gradual roll-out of new amendments, as new releases can be configured to understand a new amendment but not vote for it by default. ([#3877](https://github.com/ripple/rippled/pull/3877)) -- **More fields in the `validations` stream:** The `validations` subscription stream in the API now reports additional fields that were added to validation messages by the HardenedValidations amendment. These fields make it easier to detect misconfigurations such as multiple servers sharing a validation key pair. ([#3865](https://github.com/ripple/rippled/pull/3865)) -- **Reporting mode supports `validations` and `manifests` streams:** In the API it is now possible to connect to these streams when connected to a servers running in reporting. Previously, attempting to subscribe to these streams on a reporting server failed with the error `reportingUnsupported`. ([#3905](https://github.com/ripple/rippled/pull/3905)) - -### Bug Fixes - -- **Clarify the safety of NetClock::time_point arithmetic**: * NetClock::rep is uint32_t and can be error-prone when used with subtraction. * Fixes [#3656](https://github.com/ripple/rippled/pull/3656) -- **Fix out-of-bounds reserve, and some minor optimizations** -- **Fix nested locks in ValidatorSite** -- **Fix clang warnings about copies vs references** -- **Fix reporting mode build issue** -- **Fix potential deadlock in Validator sites** -- **Use libsecp256k1 instead of OpenSSL for key derivation**: The deterministic key derivation code was still using calls to OpenSSL. This replaces the OpenSSL-based routines with new libsecp256k1-based implementations -- **Improve NodeStore to ShardStore imports**: This runs the import process in a background thread while preventing online_delete from removing ledgers pending import -- **Simplify SHAMapItem construction**: The existing class offered several constructors which were mostly unnecessary. This eliminates all existing constructors and introduces a single new one, taking a `Slice`. The internal buffer is switched from `std::vector` to `Buffer` to save a minimum of 8 bytes (plus the buffer slack that is inherent in `std::vector`) per SHAMapItem instance. -- **Redesign stoppable objects**: Stoppable is no longer an abstract base class, but a pattern, modeled after the well-understood `std::thread`. The immediate benefits are less code, less synchronization, less runtime work, and (subjectively) more readable code. The end goal is to adhere to RAII in our object design, and this is one necessary step on that path. - -## Version 1.7.3 - -This is the 1.7.3 release of `rippled`, the reference implementation of the XRP Ledger protocol. This release addresses an OOB memory read identified by Guido Vranken, as well as an unrelated issue identified by the Ripple C++ team that could result in incorrect use of SLEs. Additionally, this version also introduces the `NegativeUNL` amendment, which corresponds to the feature which was introduced with the 1.6.0 release. - -## Action Required - -If you operate an XRP Ledger server, then you should upgrade to version 1.7.3 at your earliest convenience to mitigate the issues addressed in this hotfix. If a sufficient majority of servers on the network upgrade, the `NegativeUNL` amendment may gain a majority, at which point a two week activation countdown will begin. If the `NegativeUNL` amendment activates, servers running versions of `rippled` prior to 1.7.3 will become [amendment blocked](https://xrpl.org/amendments.html#amendment-blocked). - -### Bug Fixes - -- **Improve SLE usage in check cashing**: Fixes a situation which could result in the incorrect use of SLEs. -- **Address OOB in base58 decoder**: Corrects a technical flaw that could allow an out-of-bounds memory read in the Base58 decoder. -- **Add `NegativeUNL` as a supported amendment**: Introduces an amendment for the Negative UNL feature introduced in `rippled` 1.6.0. - -## Version 1.7.2 - -This the 1.7.2 release of rippled, the reference server implementation of the XRP Ledger protocol. This release protects against the security issue [CVE-2021-3499](https://www.openssl.org/news/secadv/20210325.txt) affecting OpenSSL, adds an amendment to fix an issue with small offers not being properly removed from order books in some cases, and includes various other minor fixes. -Version 1.7.2 supersedes version 1.7.1 and adds fixes for more issues that were discovered during the release cycle. - -## Action Required - -This release introduces a new amendment to the XRP Ledger protocol: `fixRmSmallIncreasedQOffers`. This amendments is now open for voting according to the XRP Ledger's amendment process, which enables protocol changes following two weeks of >80% support from trusted validators. -If you operate an XRP Ledger server, then you should upgrade to version 1.7.2 within two weeks, to ensure service continuity. The exact time that protocol changes take effect depends on the voting decisions of the decentralized network. -If you operate an XRP Ledger validator, please learn more about this amendment so you can make informed decisions about how your validator votes. If you take no action, your validator begins voting in favor of any new amendments as soon as it has been upgraded. - -### Bug Fixes - -- **fixRmSmallIncreasedQOffers Amendment:** This amendment fixes an issue where certain small offers can be left at the tip of an order book without being consumed or removed when appropriate and causes some payments and Offers to fail when they should have succeeded [(#3827)](https://github.com/ripple/rippled/pull/3827). -- **Adjust OpenSSL defaults and mitigate CVE-2021-3499:** Prior to this fix, servers compiled against a vulnerable version of OpenSSL could have a crash triggered by a malicious network connection. This fix disables renegotiation support in OpenSSL so that the rippled server is not vulnerable to this bug regardless of the OpenSSL version used to compile the server. This also removes support for deprecated TLS versions 1.0 and 1.1 and ciphers that are not part of TLS 1.2 [(#79e69da)](https://github.com/ripple/rippled/pull/3843/commits/79e69da3647019840dca49622621c3d88bc3883f). -- **Support HTTP health check in reporting mode:** Enables the Health Check special method when running the server in the new Reporting Mode introduced in 1.7.0 [(9c8cadd)](https://github.com/ripple/rippled/pull/3843/commits/9c8caddc5a197bdd642556f8beb14f06d53cdfd3). -- **Maintain compatibility for forwarded RPC responses:** Fixes a case in API responses from servers in Reporting Mode, where requests that were forwarded to a P2P-mode server would have the result field nested inside another result field [(8579eb0)](https://github.com/ripple/rippled/pull/3843/commits/8579eb0c191005022dcb20641444ab471e277f67). -- **Add load_factor in reporting mode:** Adds a load_factor value to the server info method response when running the server in Reporting Mode so that the response is compatible with the format returned by servers in P2P mode (the default) [(430802c)](https://github.com/ripple/rippled/pull/3843/commits/430802c1cf6d4179f2249a30bfab9eff8e1fa748). -- **Properly encode metadata from tx RPC command:** Fixes a problem where transaction metadata in the tx API method response would be in JSON format even when binary was requested [(7311629)](https://github.com/ripple/rippled/pull/3843/commits/73116297aa94c4acbfc74c2593d1aa2323b4cc52). -- **Updates to Windows builds:** When building on Windows, use vcpkg 2021 by default and add compatibility with MSVC 2019 [(36fe196)](https://github.com/ripple/rippled/pull/3843/commits/36fe1966c3cd37f668693b5d9910fab59c3f8b1f), [(30fd458)](https://github.com/ripple/rippled/pull/3843/commits/30fd45890b1d3d5f372a2091d1397b1e8e29d2ca). - -## Version 1.7.0 - -Ripple has released version 1.7.0 of `rippled`, the reference server implementation of the XRP Ledger protocol. -This release [significantly improves memory usage](https://blog.ripplex.io/how-ripples-c-team-cut-rippleds-memory-footprint-down-to-size/), introduces a protocol amendment to allow out-of-order transaction execution with Tickets, and brings several other features and improvements. - -## Upgrading (SPECIAL ACTION REQUIRED) -If you use the precompiled binaries of rippled that Ripple publishes for supported platforms, please note that Ripple has renewed the GPG key used to sign these packages. -If you are upgrading from a previous install, you must download and trust the renewed key. Automatic upgrades will not work until you have re-trusted the key. -### Red Hat Enterprise Linux / CentOS - -Perform a [manual upgrade](https://xrpl.org/update-rippled-manually-on-centos-rhel.html). When prompted, confirm that the key's fingerprint matches the following example, then press `y` to accept the updated key: - -``` -$ sudo yum install rippled -Loaded plugins: fastestmirror -Loading mirror speeds from cached hostfile -* base: mirror.web-ster.com -* epel: mirrors.syringanetworks.net -* extras: ftp.osuosl.org -* updates: mirrors.vcea.wsu.edu -ripple-nightly/signature | 650 B 00:00:00 -Retrieving key from https://repos.ripple.com/repos/rippled-rpm/nightly/repodata/repomd.xml.key -Importing GPG key 0xCCAFD9A2: -Userid : "TechOps Team at Ripple " -Fingerprint: c001 0ec2 05b3 5a33 10dc 90de 395f 97ff ccaf d9a2 -From : https://repos.ripple.com/repos/rippled-rpm/nightly/repodata/repomd.xml.key -Is this ok [y/N]: y -``` - -### Ubuntu / Debian - -Download and trust the updated public key, then perform a [manual upgrade](https://xrpl.org/update-rippled-manually-on-ubuntu.html) as follows: - -``` -wget -q -O - "https://repos.ripple.com/repos/api/gpg/key/public" | \ - sudo apt-key add - -sudo apt -y update -sudo apt -y install rippled -``` - -### New and Improved Features - -- **Rework deferred node logic and async fetch behavior:** This change significantly improves ledger sync and fetch times while reducing memory consumption. (https://blog.ripplex.io/how-ripples-c-team-cut-rippleds-memory-footprint-down-to-size/) -- **New Ticket feature:** Tickets are a mechanism to prepare and send certain transactions outside of the normal sequence order. This version reworks and completes the implementation for Tickets after more than 6 years of development. This feature is now open for voting as the newly-introduced `TicketBatch` amendment, which replaces the previously-proposed `Tickets` amendment. The specification for this change can be found at: [xrp-community/standards-drafts#16](https://github.com/xrp-community/standards-drafts/issues/16) -- **Add Reporting Mode:** The server can be compiled to operate in a new mode that serves API requests for validated ledger data without connecting directly to the peer-to-peer network. (The server needs a gRPC connection to another server that is on the peer-to-peer network.) Reporting Mode servers can share access to ledger data via Apache Cassandra and PostgreSQL to more efficiently serve API requests while peer-to-peer servers specialize in broadcasting and processing transactions. -- **Optimize relaying of validation and proposal messages:** Servers typically receive multiple copies of any given message from directly connected peers; in particular, consensus proposal and validation messages are often relayed with extremely high redundancy. For servers with several peers, this can cause redundant work. This commit introduces experimental code that attempts to optimize the relaying of proposals and validations by allowing servers to instruct their peers to "squelch" delivery of selected proposals and validations. This change is considered experimental at this time and is disabled by default because the functioning of the consensus network depends on messages propagating with high reliability through the constantly-changing peer-to-peer network. Server operators who wish to test the optimized code can enable it in their server config file. -- **Report server domain to other servers:** Server operators now have the option to configure a domain name to be associated with their servers. The value is communicated to other servers and is also reported via the `server_info` API. The value is meant for third-party applications and tools to group servers together. For example, a tool that visualizes the network's topology can show how many servers are operated by different stakeholders. An operator can claim any domain, so tools should use the [xrp-ledger.toml file](https://xrpl.org/xrp-ledger-toml.html) to confirm that the domain also claims ownership of the servers. -- **Improve handling of peers that aren't synced:** When evaluating the fitness and usefulness of an outbound peer, the code would incorrectly calculate the amount of time that the peer spent in a non-useful state. This release fixes the calculation and makes the timeout values configurable by server operators. Two new options are introduced in the 'overlay' stanza of the config file. -- **Persist API-configured voting settings:** Previously, the amendments that a server would vote in support of or against could be configured both via the configuration file and via the ["feature" API method](https://xrpl.org/feature.html). Changes made in the configuration file were only loaded at server startup; changes made via the command line take effect immediately but were not persisted across restarts. Starting with this release, changes made via the API are saved to the wallet.db database file so that they persist even if the server is restarted. -Amendment voting in the config file is deprecated. The first time the server starts with v1.7.0 or higher, it reads any amendment voting settings in the config file and saves the settings to the database; on later restarts the server prints a warning message and ignores the [amendments] and [veto_amendments] stanzas of the config file. -Going forward, use the [feature method](https://xrpl.org/feature.html) to view and configure amendment votes. If you want to use the config file to configure amendment votes, add a line to the [rpc_startup] stanza such as the following: -[rpc_startup] -{ "command": "feature", "feature": "FlowSortStrands", "vetoed": true } -- **Support UNLs with future effective dates:** Updates the format for the recommended validator list file format, allowing publishers to pre-publish the next recommended UNL while the current one is still valid. The server is still backwards compatible with the previous format, but the new format removes some uncertainty during the transition from one list to the next. Also, starting with this release, the server locks down and reports an error if it has no valid validator list. You can clear the error by loading a validator list from a file or by configuring a different UNL and restarting; the error also goes away on its own if the server is able to obtain a trusted validator list from the network (for example, after an network outage resolves itself). -- **Improve manifest relaying:** Servers now propagate change messages for validators' ephemeral public keys ("manifests") on a best-effort basis, to make manifests more available throughout the peer-to-peer network. Previously, the server would only relay manifests from validators it trusts locally, which made it difficult to detect and track validators that are not broadly trusted. -- **Implement ledger forward replay feature:** The server can now sync up to the network by "playing forward" transactions from a previously saved ledger until it catches up to the network. Compared with the default behavior of fetching the latest state and working backwards, forward replay can save time and bandwidth by reconstructing previous ledgers' state data rather than downloading the pre-calculated results from the network. As an added bonus, forward replay confirms that the rest of the network followed the same transaction processing rules as the local server when processing the intervening ledgers. This feature is considered experimental this time and can be enabled with an option in the config file. -- **Make the transaction job queue limit adjustable:** The server uses a job queue to manage tasks, with limits on how many jobs of a particular type can be queued. The previously hard-coded limit associated with transactions is now configurable. Server operators can increase the number of transactions their server is able to queue, which may be useful if your server has a large memory capacity or you expect an influx of transactions. (https://github.com/ripple/rippled/issues/3556) -- **Add public_key to the Validator List method response:** The [Validator List method](https://xrpl.org/validator-list.html) can be used to request a recommended validator list from a rippled instance. The response now includes the public key of the requested list. (https://github.com/ripple/rippled/issues/3392) -- **Server operators can now configure maximum inbound and outbound peers separately:** The new `peers_in_max` and `peers_out_max` config options allow server operators to independently control the maximum number of inbound and outbound peers the server allows. [70c4ecc] -- **Improvements to shard downloading:** Previously the download_shard command could only load shards over HTTPS. Compressed shards can now also be downloaded over plain HTTP. The server fully checks the data for integrity and consistency, so the encryption is not strictly necessary. When initiating multiple shard downloads, the server now returns an error if there is not enough space to store all the shards currently being downloaded. -- **The manifest command is now public:** The manifest API method returns public information about a given validator. The required permissions have been changed so it is now part of the public API. - -### Bug Fixes - -- **Implement sticky DNS resolution for validator list retrieval:** When attempting to load a validator list from a configured site, attempt to reuse the last IP that was successfully used if that IP is still present in the DNS response. (https://github.com/ripple/rippled/issues/3494). -- **Improve handling of RPC ledger_index argument:** You can now provide the `ledger_index` as a numeric string. This allows you to copy and use the numeric string `ledger_index` value returned by certain RPC commands. Previously you could only send native JSON numbers or shortcut strings such as "validated" in the `ledger_index` field. (https://github.com/ripple/rippled/issues/3533) -- **Fix improper promotion of bool on return** [6968da1] -- **Fix ledger sequence on copynode** [ef53197] -- **Fix parsing of node public keys in `manifest` CLI:** The previous code attempts to validate the provided node public key using a function that assumes that the encoded public key is for an account. This causes the parsing to fail. This commit fixes #3317 (https://github.com/ripple/rippled/issues/3317) by letting the caller specify the type of the public key being checked. -- **Fix idle peer timer:** Fixes a bug where a function to remove idle peers was called every second instead of every 4 seconds. #3754 (https://github.com/ripple/rippled/issues/3754) -- **Add database counters:** Fix bug where DatabaseRotateImp::getBackend and ::sync utilized the writable backend without a lock. ::getBackend was replaced with ::getCounters. -- **Improve online_delete configuration and DB tuning** [6e9051e] -- **Improve handling of burst writes in NuDB database** ( https://github.com/ripple/rippled/pull/3662 ) -- **Fix excessive logging after disabling history shards.** Previously if you configured the server with a shard store, then disabled it, the server output excessive warning messages about the shard limit being exceeded. -- **Fixed some issues with negotiating link compression.** ( https://github.com/ripple/rippled/pull/3705 ) -- **Fixed a potential thread deadlock with history sharding.** ( https://github.com/ripple/rippled/pull/3683 ) -- **Various fixes to typos and comments, refactoring, and build system improvements** - -## Version 1.6.0 - -This release introduces several new features including changes to the XRP Ledger's consensus mechanism to make it even more robust in -adverse conditions, as well as numerous bug fixes and optimizations. - -### New and Improved Features - -- Initial implementation of Negative UNL functionality: This change can improve the liveness of the network during periods of network instability, by allowing servers to track which validators are temporarily offline and to adjust quorum calculations to match. This change requires an amendment, but the amendment is not in the **1.6.0** release. Ripple expects to run extensive public testing for Negative UNL functionality on the Devnet in the coming weeks. If public testing satisfies all requirements across security, reliability, stability, and performance, then the amendment could be included in a version 2.0 release. [[#3380](https://github.com/ripple/rippled/pull/3380)] -- Validation Hardening: This change allows servers to detect accidental misconfiguration of validators, as well as potentially Byzantine behavior by malicious validators. Servers can now log a message to notify operators if they detect a single validator issuing validations for multiple, incompatible ledger versions, or validations from multiple servers sharing a key. As part of this update, validators report the version of `rippled` they are using, as well as the hash of the last ledger they consider to be fully validated, in validation messages. [[#3291](https://github.com/ripple/rippled/pull/3291)] ![Amendment: Required](https://img.shields.io/badge/Amendment-Required-red) -- Software Upgrade Monitoring & Notification: After the `HardenedValidations` amendment is enabled and the validators begin reporting the versions of `rippled` they are running, a server can check how many of the validators on its UNL run a newer version of the software than itself. If more than 60% of a server's validators are running a newer version, the server writes a message to notify the operator to consider upgrading their software. [[#3447](https://github.com/ripple/rippled/pull/3447)] -- Link Compression: Beginning with **1.6.0**, server operators can enable support for compressing peer-to-peer messages. This can save bandwidth at a cost of higher CPU usage. This support is disabled by default and should prove useful for servers with a large number of peers. [[#3287](https://github.com/ripple/rippled/pull/3287)] -- Unconditionalize Amendments that were enabled in 2017: This change removes legacy code which the network has not used since 2017. This change limits the ability to [replay](https://github.com/xrp-community/standards-drafts/issues/14) ledgers that rely on the pre-2017 behavior. [[#3292](https://github.com/ripple/rippled/pull/3292)] -- New Health Check Method: Perform a simple HTTP request to get a summary of the health of the server: Healthy, Warning, or Critical. [[#3365](https://github.com/ripple/rippled/pull/3365)] -- Start work on API version 2. Version 2 of the API will be part of a future release. The first breaking change will be to consolidate several closely related error messages that can occur when the server is not synced into a single "notSynced" error message. [[#3269](https://github.com/ripple/rippled/pull/3269)] -- Improved shard concurrency: Improvements to the shard engine have helped reduce the lock scope on all public functions, increasing the concurrency of the code. [[#3251](https://github.com/ripple/rippled/pull/3251)] -- Default Port: In the config file, the `[ips_fixed]` and `[ips]` stanzas now use the [IANA-assigned port](https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=2459) for the XRP Ledger protocol (2459) when no port is specified. The `connect` API method also uses the same port by default. [[#2861](https://github.com/ripple/rippled/pull/2861)]. -- Improve proposal and validation relaying. The peer-to-peer protocol always relays trusted proposals and validations (as part of the [consensus process](https://xrpl.org/consensus.html)), but only relays _untrusted_ proposals and validations in certain circumstances. This update adds configuration options so server operators can fine-tune how their server handles untrusted proposals and validations, and changes the default behavior to prioritize untrusted validations higher than untrusted proposals. [[#3391](https://github.com/ripple/rippled/pull/3391)] -- Various Build and CI Improvements including updates to RocksDB 6.7.3 [[#3356](https://github.com/ripple/rippled/pull/3356)], NuDB 2.0.3 [[#3437](https://github.com/ripple/rippled/pull/3437)], adjusting CMake settings so that rippled can be built as a submodule [[#3449](https://github.com/ripple/rippled/pull/3449)], and adding Travis CI settings for Ubuntu Bionic Beaver [[#3319](https://github.com/ripple/rippled/pull/3319)]. -- Better documentation in the config file for online deletion and database tuning. [[#3429](https://github.com/ripple/rippled/pull/3429)] - - -### Bug Fixes - -- Fix the 14 day timer to enable amendment to start at the correct quorum size [[#3396](https://github.com/ripple/rippled/pull/3396)] -- Improve online delete backend lock which addresses a possibility in the online delete process where one or more backend shared pointer references may become invalid during rotation. [[#3342](https://github.com/ripple/rippled/pull/3342)] -- Address an issue that can occur during the loading of validator tokens, where a deliberately malformed token could cause the server to crash during startup. [[#3326](https://github.com/ripple/rippled/pull/3326)] -- Add delivered amount to GetAccountTransactionHistory. The delivered_amount field was not being populated when calling GetAccountTransactionHistory. In contrast, the delivered_amount field was being populated when calling GetTransaction. This change populates delivered_amount in the response to GetAccountTransactionHistory, and adds a unit test to make sure the results delivered by GetTransaction and GetAccountTransactionHistory match each other. [[#3370](https://github.com/ripple/rippled/pull/3370)] -- Fix build issues for GCC 10 [[#3393](https://github.com/ripple/rippled/pull/3393)] -- Fix historical ledger acquisition - this fixes an issue where historical ledgers were acquired only since the last online deletion interval instead of the configured value to allow deletion.[[#3369](https://github.com/ripple/rippled/pull/3369)] -- Fix build issue with Docker [#3416](https://github.com/ripple/rippled/pull/3416)] -- Add Shard family. The App Family utilizes a single shared Tree Node and Full Below cache for all history shards. This can create a problem when acquiring a shard that shares an account state node that was recently cached from another shard operation. The new Shard Family class solves this issue by managing separate Tree Node and Full Below caches for each shard. [#3448](https://github.com/ripple/rippled/pull/3448)] -- Amendment table clean up which fixes a calculation issue with majority. [#3428](https://github.com/ripple/rippled/pull/3428)] -- Add the `ledger_cleaner` command to rippled command line help [[#3305](https://github.com/ripple/rippled/pull/3305)] -- Various typo and comments fixes. - - -## Version 1.5.0 - -The `rippled` 1.5.0 release introduces several improvements and new features, including support for gRPC API, API versioning, UNL propagation via the peer network, new RPC methods `validator_info` and `manifest`, augmented `submit` method, improved `tx` method response, improved command line parsing, improved handshake protocol, improved package building and various other minor bug fixes and improvements. - -This release also introduces two new amendments: `fixQualityUpperBound` and `RequireFullyCanonicalSig`. - -Several improvements to the sharding system are currently being evaluated for inclusion into the upcoming 1.6 release of `rippled`. These changes are incompatible with shards generated by previous versions of the code. -Additionally, an issue with the existing sharding engine can result in a server running versions 1.4 or 1.5 of the software to experience a deadlock and automatically restart when running with the sharding feature enabled. -At this time, the developers recommend running with sharding disabled, pending the improvements scheduled to be introduced with 1.6. For more information on how to disable sharding, please visit https://xrpl.org/configure-history-sharding.html - - -**New and Updated Features** -- The `RequireFullyCanonicalSig` amendment which changes the signature requirements for the XRP Ledger protocol so that non-fully-canonical signatures are no longer valid. This protects against transaction malleability on all transactions, instead of just transactions with the tfFullyCanonicalSig flag enabled. Without this amendment, a transaction is malleable if it uses a secp256k1 signature and does not have tfFullyCanonicalSig enabled. Most signing utilities enable tfFullyCanonicalSig by default, but there are exceptions. With this amendment, no single-signed transactions are malleable. (Multi-signed transactions may still be malleable if signers provide more signatures than are necessary.) All transactions must use the fully canonical form of the signature, regardless of the tfFullyCanonicalSig flag. Signing utilities that do not create fully canonical signatures are not supported. All of Ripple's signing utilities have been providing fully-canonical signatures exclusively since at least 2014. For more information. [`ec137044a`](https://github.com/ripple/rippled/commit/ec137044a014530263cd3309d81643a5a3c1fdab) -- Native [gRPC API](https://grpc.io/) support. Currently, this API provides a subset of the full `rippled` [API](https://xrpl.org/rippled-api.html). You can enable the gRPC API on your server with a new configuration stanza. [`7d867b806`](https://github.com/ripple/rippled/commit/7d867b806d70fc41fb45e3e61b719397033b272c) -- API Versioning which allows for future breaking change of RPC methods to co-exist with existing versions. [`2aa11fa41`](https://github.com/ripple/rippled/commit/2aa11fa41d4a7849ae6a5d7a11df6f367191e3ef) -- Nodes now receive and broadcast UNLs over the peer network under various conditions. [`2c71802e3`](https://github.com/ripple/rippled/commit/2c71802e389a59118024ea0152123144c084b31c) -- Augmented `submit` method to include additional details on the status of the command. [`79e9085dd`](https://github.com/ripple/rippled/commit/79e9085dd1eb72864afe841225b78ec96e72b5ca) -- Improved `tx` method response with additional details on ledgers searched. [`47501b7f9`](https://github.com/ripple/rippled/commit/47501b7f99d4103d9ad405e399169fc251161548) -- New `validator_info` method which returns information pertaining to the current validator's keys, manifest sequence, and domain. [`3578acaf0`](https://github.com/ripple/rippled/commit/3578acaf0b5f2d27ddc33f5b4cc81d21be1903ae) -- New `manifest` method which looks up manifest information for the specified key (either master or ephemeral). [`3578acaf0`](https://github.com/ripple/rippled/commit/3578acaf0b5f2d27ddc33f5b4cc81d21be1903ae) -- Introduce handshake protocol for compression negotiation (compression is not implemented at this point) and other minor improvements. [`f6916bfd4`](https://github.com/ripple/rippled/commit/f6916bfd429ce654e017ae9686cb023d9e05408b) -- Remove various old conditionals introduced by amendments. [`(51ed7db00`](https://github.com/ripple/rippled/commit/51ed7db0027ba822739bd9de6f2613f97c1b227b), [`6e4945c56)`](https://github.com/ripple/rippled/commit/6e4945c56b1a1c063b32921d7750607587ec3063) -- Add `getRippledInfo` info gathering script to `rippled` Linux packages. [`acf4b7889`](https://github.com/ripple/rippled/commit/acf4b78892074303cb1fa22b778da5e7e7eddeda) - -**Bug Fixes and Improvements** -- The `fixQualityUpperBound` amendment which fixes a bug in unused code for estimating the ratio of input to output of individual steps in cross-currency payments. [`9d3626fec`](https://github.com/ripple/rippled/commit/9d3626fec5b610100f401dc0d25b9ec8e4a9a362) -- `tx` method now properly fetches all historical tx if they are incorporated into a validated ledger under rules that applied at the time. [`11cf27e00`](https://github.com/ripple/rippled/commit/11cf27e00698dbfc099b23463927d1dac829ed19) -- Fix to how `fail_hard` flag is handled with the `submit` method - transactions that are submitted with the `fail_hard` flag that result in any TER code besides tesSUCCESS is neither queued nor held. [`cd9732b47`](https://github.com/ripple/rippled/commit/cd9732b47a9d4e95bcb74e048d2c76fa118b80fb) -- Remove unused `Beast` code. [`172ead822`](https://github.com/ripple/rippled/commit/172ead822159a3c1f9b73217da4316df48851ab6) -- Lag ratchet code fix to use proper ephemeral public keys instead of the long-term master public keys.[`6529d3e6f`](https://github.com/ripple/rippled/commit/6529d3e6f7333fc5226e5aa9ae65f834cb93dfe5) - - -## Version 1.4.0 - -The `rippled` 1.4.0 release introduces several improvements and new features, including support for deleting accounts, improved peer slot management, improved CI integration and package building and support for [C++17](https://en.wikipedia.org/wiki/C%2B%2B17) and [Boost 1.71](https://www.boost.org/users/history/version_1_71_0.html). Finally, this release removes the code for the `SHAMapV2` amendment which failed to gain majority support and has been obsoleted by other improvements. - -**New and Updated Features** -- The `DeletableAccounts` amendment which, if enabled, will make it possible for users to delete unused or unneeded accounts, recovering the account's reserve. -- Support for improved management of peer slots and the ability to add and removed reserved connections without requiring a restart of the server. -- Tracking and reporting of cumulative and instantaneous peer bandwidth usage. -- Preliminary support for post-processing historical shards after downloading to index their contents. -- Reporting the master public key alongside the ephemeral public key in the `validation` stream [subscriptions](https://xrpl.org/subscribe.html). -- Reporting consensus phase changes in the `server` stream [subscription](https://xrpl.org/subscribe.html). - -**Bug Fixes** -- The `fixPayChanRecipientOwnerDir` amendment which corrects a minor technical flaw that would cause a payment channel to not appear in the recipient's owner directory, which made it unnecessarily difficult for users to enumerate all their payment channels. -- The `fixCheckThreading` amendment which corrects a minor technical flaw that caused checks to not be properly threaded against the account of the check's recipient. -- Respect the `ssl_verify` configuration option in the `SSLHTTPDownloader` and `HTTPClient` classes. -- Properly update the `server_state` when a server detects a disagreement between itself and the network. -- Allow Ed25519 keys to be used with the `channel_authorize` command. - -## Version 1.3.1 - -The `rippled` 1.3.1 release improves the built-in deadlock detection code, improves logging during process startup, changes the package build pipeline and improves the build documentation. - -**New and Updated Features** - -This release has no new features. - -**Bug Fixes** -- Add a LogicError when a deadlock is detected (355a7b04) -- Improve logging during process startup (7c24f7b1) - -## Version 1.3.0 -The `rippled` 1.3.0 release introduces several new features and overall improvements to the codebase, including the `fixMasterKeyAsRegularKey` amendment, code to adjust the timing of the consensus process and support for decentralized validator domain verification. The release also includes miscellaneous improvements including in the transaction censorship detection code, transaction validation code, manifest parsing code, configuration file parsing code, log file rotation code, and in the build, continuous integration, testing and package building pipelines. - -**New and Updated Features** -- The `fixMasterKeyAsRegularKey` amendment which, if enabled, will correct a technical flaw that allowed setting an account's regular key to the account's master key. -- Code that allows validators to adjust the timing of the consensus process in near-real-time to account for connection delays. -- Support for decentralized validator domain verification by adding support for a "domain" field in manifests. - -**Bug Fixes** -- Improve ledger trie ancestry tracking to reduce unnecessary error messages. -- More efficient detection of dry paths in the payment engine. Although not a transaction-breaking change, this should reduce spurious error messages in the log files. - -## Version 1.2.4 - -The `rippled` 1.2.4 release improves the way that shard crawl requests are routed and the robustness of configured validator list retrieval by imposing a 20 second timeout. - -**New and Updated Features** - -This release has no new features. - -**Bug Fixes** - -- Use public keys when routing shard crawl requests -- Enforce a 20s timeout when making validator list requests (RIPD-1737) - -## Version 1.2.3 - -The `rippled` 1.2.3 release corrects a technical flaw which in some circumstances can cause a null pointer dereference that can crash the server. - -**New and Updated Features** - -This release has no new features. - -**Bug Fixes** - -- Fix a technical flaw which in some circumstances can cause a null pointer dereference that can crash the server. - -## Version 1.2.2 - -The `rippled` 1.2.2 release corrects a technical flaw in the fee escalation -engine which could cause some fee metrics to be calculated incorrectly. In some -circumstances this can potentially cause the server to crash. - -**New and Updated Features** - -This release has no new features. - -**Bug Fixes** - -- Fix a technical flaw in the fee escalation engine which could cause some fee metrics to be calculated incorrectly (4c06b3f86) - -## Version 1.2.1 - -The `rippled` 1.2.1 release introduces several fixes including a change in the -information reported via the enhanced crawl functionality introduced in the -1.2.0 release, a fix for a potential race condition when processing a status -change message for a peer, and for a technical flaw that could cause a server -to not properly detect that it had lost all its peers. - -The release also adds the `delivered_amount` field to more responses to simplify -the handling of payment or check cashing transactions. - -**New and Updated Features** - -This release has no new features. - -**Bug Fixes** - -- Fix a race condition during `TMStatusChange` handling (c8249981) -- Properly transition state to disconnected (9d027394) -- Display validator status only in response to admin requests (2d6a518a) -- Add the `delivered_amount` to more RPC commands (f2756914) - - -## Version 1.2.0 - -The `rippled` 1.2.0 release introduces the MultisignReserve Amendment, which -reduces the reserve requirement associated with signer lists. This release also -includes incremental improvements to the code that handles offers. Furthermore, -`rippled` now also has the ability to automatically detect transaction -censorship attempts and issue warnings of increasing severity for transactions -that should have been included in a closed ledger after several rounds of -consensus. - -**New and Updated Features** - -- Reduce the account reserve for a Multisign SignerList (6572fc8) -- Improve transaction error condition handling (4104778) -- Allow servers to automatically detect transaction censorship attempts (945493d) -- Load validator list from file (c1a0244) -- Add RPC command shard crawl (17e0d09) -- Add RPC Call unit tests (eeb9d92) -- Grow the open ledger expected transactions quickly (7295cf9) -- Avoid dispatching multiple fetch pack threads (4dcb3c9) -- Remove unused function in AutoSocket.h (8dd8433) -- Update TxQ developer docs (e14f913) -- Add user defined literals for megabytes and kilobytes (cd1c5a3) -- Make the FeeEscalation Amendment permanent (58f786c) -- Remove undocumented experimental options from RPC sign (a96cb8f) -- Improve RPC error message for fee command (af1697c) -- Improve ledger_entry command’s inconsistent behavior (63e167b) - -**Bug Fixes** - -- Accept redirects from validator list sites (7fe1d4b) -- Implement missing string conversions for JSON (c0e9418) -- Eliminate potential undefined behavior (c71eb45) -- Add safe_cast to sure no overflow in casts between enums and integral types (a7e4541) - -## Version 1.1.2 - -The `rippled` 1.1.2 release introduces a fix for an issue that could have -prevented cluster peers from successfully bypassing connection limits when -connecting to other servers on the same cluster. Additionally, it improves -logic used to determine what the preferred ledger is during suboptimal -network conditions. - -**New and Updated Features** - -This release has no new features. - -**Bug Fixes** - -- Properly bypass connection limits for cluster peers (#2795, #2796) -- Improve preferred ledger calculation (#2784) - -## Version 1.1.1 - -The `rippled` 1.1.1 release adds support for redirections when retrieving -validator lists and changes the way that validators with an expired list -behave. Additionally, informational commands return more useful information -to allow server operators to determine the state of their server - -**New and Updated Features** - -- Enhance status reporting when using the `server_info` and `validators` commands (#2734) -- Accept redirects from validator list sites: (#2715) - -**Bug Fixes** - -- Properly handle expired validator lists when validating (#2734) - - - -## Version 1.1.0 - -The `rippled` 1.1.0 release release includes the `DepositPreAuth` amendment, which combined with the previously released `DepositAuth` amendment, allows users to pre-authorize incoming transactions to accounts, by whitelisting sender addresses. The 1.1.0 release also includes incremental improvements to several previously released features (`fix1515` amendment), deprecates support for the `sign` and `sign_for` commands from the rippled API and improves invariant checking for enhanced security. - -Ripple recommends that all server operators upgrade to XRP Ledger version 1.1.0 by Thursday, 2018-09-27, to ensure service continuity. - -**New and Updated Features** - -- Add `DepositPreAuth` ledger type and transaction (#2513) -- Increase fault tolerance and raise validation quorum to 80%, which fixes issue 2604 (#2613) -- Support ipv6 for peer and RPC comms (#2321) -- Refactor ledger replay logic (#2477) -- Improve Invariant Checking (#2532) -- Expand SQLite potential storage capacity (#2650) -- Replace UptimeTimer with UptimeClock (#2532) -- Don’t read Amount field if it is not present (#2566) -- Remove Transactor:: mFeeDue member variable (#2586) -- Remove conditional check for using Boost.Process (#2586) -- Improve charge handling in NoRippleCheckLimits test (#2629) -- Migrate more code into the chrono type system (#2629) -- Supply ConsensusTimer with milliseconds for finer precision (#2629) -- Refactor / modernize Cmake (#2629) -- Add delimiter when appending to cmake_cxx_flags (#2650) -- Remove using namespace declarations at namespace scope in headers (#2650) - -**Bug Fixes** - -- Deprecate the ‘sign’ and ‘sign_for’ APIs (#2657) -- Use liquidity from strands that consume too many offers, which will be enabled on fix1515 Amendment (#2546) -- Fix a corner case when decoding base64 (#2605) -- Trim space in Endpoint::from_string (#2593) -- Correctly suppress sent messages (#2564) -- Detect when a unit test child process crashes (#2415) -- Handle WebSocket construction exceptions (#2629) -- Improve JSON exception handling (#2605) -- Add missing virtual destructors (#2532) - - -## Version 1.0.0. - -The `rippled` 1.0.0 release includes incremental improvements to several previously released features. - -**New and Updated Features** - -- The **history sharding** functionality has been improved. Instances can now use the shard store to satisfy ledger requests. -- Change permessage-deflate and compress defaults (RIPD-506) -- Update validations on UNL change (RIPD-1566) - -**Bug Fixes** - -- Add `check`, `escrow`, and `pay_chan` to `ledger_entry` (RIPD-1600) -- Clarify Escrow semantics (RIPD-1571) - - -## Version 0.90.1 - -The `rippled` 0.90.1 release includes fixes for issues reported by external security researchers. These issues, when exploited, could cause a rippled instance to restart or, in some circumstances, stop executing. While these issues can result in a denial of service attack, none affect the integrity of the XRP Ledger and no user funds, including XRP, are at risk. - -**New and Updated Features** - -This release has no new features. - -**Bug Fixes** - -- Address issues identified by external review: - - Verify serialized public keys more strictly before using them - (RIPD-1617, RIPD-1619, RIPD-1621) - - Eliminate a potential out-of-bounds memory access in the base58 - encoding/decoding logic (RIPD-1618) - - Avoid invoking undefined behavior in memcpy (RIPD-1616) - - Limit STVar recursion during deserialization (RIPD-1603) -- Use lock when creating a peer shard rangeset - - -## Version 0.90.0 - -The `rippled` 0.90.0 release introduces several features and enhancements that improve the reliability, scalability and security of the XRP Ledger. - -Highlights of this release include: - -- The `DepositAuth` amendment, which lets an account strictly reject any incoming money from transactions sent by other accounts. -- The `Checks` amendment, which allows users to create deferred payments that can be cancelled or cashed by their intended recipients. -- **History Sharding**, which allows `rippled` servers to distribute historical ledger data if they agree to dedicate storage for segments of ledger history. -- New **Preferred Ledger by Branch** semantics which improve the logic that allow a server to decide which ledger it should base future ledgers on when there are multiple candidates. - -**New and Updated Features** - -- Add support for Deposit Authorization account root flag ([#2239](https://github.com/ripple/rippled/issues/2239)) -- Implement history shards ([#2258](https://github.com/ripple/rippled/issues/2258)) -- Preferred ledger by branch ([#2300](https://github.com/ripple/rippled/issues/2300)) -- Redesign Consensus Simulation Framework ([#2209](https://github.com/ripple/rippled/issues/2209)) -- Tune for higher transaction processing ([#2294](https://github.com/ripple/rippled/issues/2294)) -- Optimize queries for `account_tx` to work around SQLite query planner ([#2312](https://github.com/ripple/rippled/issues/2312)) -- Allow `Journal` to be copied/moved ([#2292](https://github.com/ripple/rippled/issues/2292)) -- Cleanly report invalid `[server]` settings ([#2305](https://github.com/ripple/rippled/issues/2305)) -- Improve log scrubbing ([#2358](https://github.com/ripple/rippled/issues/2358)) -- Update `rippled-example.cfg` ([#2307](https://github.com/ripple/rippled/issues/2307)) -- Force json commands to be objects ([#2319](https://github.com/ripple/rippled/issues/2319)) -- Fix cmake clang build for sanitizers ([#2325](https://github.com/ripple/rippled/issues/2325)) -- Allow `account_objects` RPC to filter by “check” ([#2356](https://github.com/ripple/rippled/issues/2356)) -- Limit nesting of json commands ([#2326](https://github.com/ripple/rippled/issues/2326)) -- Unit test that `sign_for` returns a correct hash ([#2333](https://github.com/ripple/rippled/issues/2333)) -- Update Visual Studio build instructions ([#2355](https://github.com/ripple/rippled/issues/2355)) -- Force boost static linking for MacOS builds ([#2334](https://github.com/ripple/rippled/issues/2334)) -- Update MacOS build instructions ([#2342](https://github.com/ripple/rippled/issues/2342)) -- Add dev docs generation to Jenkins ([#2343](https://github.com/ripple/rippled/issues/2343)) -- Poll if process is still alive in Test.py ([#2290](https://github.com/ripple/rippled/issues/2290)) -- Remove unused `beast::currentTimeMillis()` ([#2345](https://github.com/ripple/rippled/issues/2345)) - - -**Bug Fixes** -- Improve error message on mistyped command ([#2283](https://github.com/ripple/rippled/issues/2283)) -- Add missing includes ([#2368](https://github.com/ripple/rippled/issues/2368)) -- Link boost statically only when requested ([#2291](https://github.com/ripple/rippled/issues/2291)) -- Unit test logging fixes ([#2293](https://github.com/ripple/rippled/issues/2293)) -- Fix Jenkins pipeline for branches ([#2289](https://github.com/ripple/rippled/issues/2289)) -- Avoid AppVeyor stack overflow ([#2344](https://github.com/ripple/rippled/issues/2344)) -- Reduce noise in log ([#2352](https://github.com/ripple/rippled/issues/2352)) - - -## Version 0.81.0 - -The `rippled` 0.81.0 release introduces changes that improve the scalability of the XRP Ledger and transitions the recommended validator configuration to a new hosted site, as described in Ripple's [Decentralization Strategy Update](https://ripple.com/dev-blog/decentralization-strategy-update/) post. - -**New and Updated Features** - -- New hosted validator configuration. - - -**Bug Fixes** - -- Optimize queries for account_tx to work around SQLite query planner ([#2312](https://github.com/ripple/rippled/issues/2312)) - - -## Version 0.80.2 - -The `rippled` 0.80.2 release introduces changes that improve the scalability of the XRP Ledger. - -**New and Updated Features** - -This release has no new features. - -**Bug Fixes** - -- Do not dispatch a transaction received from a peer for processing if it has already been dispatched within the past ten seconds. -- Increase the number of transaction handlers that can be in flight in the job queue and decrease the relative cost for peers to share transaction and ledger data. -- Make better use of resources by adjusting the number of threads we initialize, by reverting commit [#68b8ffd](https://github.com/ripple/rippled/commit/68b8ffdb638d07937f841f7217edeb25efdb3b5d). - -## Version 0.80.1 - -The `rippled` 0.80.1 release provides several enhancements in support of published validator lists and corrects several bugs. - -**New and Updated Features** - -- Allow including validator manifests in published list ([#2278](https://github.com/ripple/rippled/issues/2278)) -- Add validator list RPC commands ([#2242](https://github.com/ripple/rippled/issues/2242)) -- Support [SNI](https://en.wikipedia.org/wiki/Server_Name_Indication) when querying published list sites and use Windows system root certificates ([#2275](https://github.com/ripple/rippled/issues/2275)) -- Grow TxQ expected size quickly, shrink slowly ([#2235](https://github.com/ripple/rippled/issues/2235)) - -**Bug Fixes** - -- Make consensus quorum unreachable if validator list expires ([#2240](https://github.com/ripple/rippled/issues/2240)) -- Properly use ledger hash to break ties when determing working ledger for consensus ([#2257](https://github.com/ripple/rippled/issues/2257)) -- Explictly use std::deque for missing node handler in SHAMap code ([#2252](https://github.com/ripple/rippled/issues/2252)) -- Verify validator token manifest matches private key ([#2268](https://github.com/ripple/rippled/issues/2268)) - - -## Version 0.80.0 - -The `rippled` 0.80.0 release introduces several enhancements that improve the reliability, scalability and security of the XRP Ledger. - -Highlights of this release include: - -- The `SortedDirectories` amendment, which allows the entries stored within a page to be sorted, and corrects a technical flaw that could, in some edge cases, prevent an empty intermediate page from being deleted. -- Changes to the UNL and quorum rules - + Use a fixed size UNL if the total listed validators are below threshold - + Ensure a quorum of 0 cannot be configured - + Set a quorum to provide Byzantine fault tolerance until a threshold of total validators is exceeded, at which time the quorum is 80% - -**New and Updated Features** - -- Improve directory insertion and deletion ([#2165](https://github.com/ripple/rippled/issues/2165)) -- Move consensus thread safety logic from the generic implementation in Consensus into the RCL adapted version RCLConsensus ([#2106](https://github.com/ripple/rippled/issues/2106)) -- Refactor Validations class into a generic version that can be adapted ([#2084](https://github.com/ripple/rippled/issues/2084)) -- Make minimum quorum Byzantine fault tolerant ([#2093](https://github.com/ripple/rippled/issues/2093)) -- Make amendment blocked state thread-safe and simplify a constructor ([#2207](https://github.com/ripple/rippled/issues/2207)) -- Use ledger hash to break ties ([#2169](https://github.com/ripple/rippled/issues/2169)) -- Refactor RangeSet ([#2113](https://github.com/ripple/rippled/issues/2113)) - -**Bug Fixes** - -- Fix an issue where `setAmendmentBlocked` is only called when processing the `EnableAmendment` transaction for the amendment ([#2137](https://github.com/ripple/rippled/issues/2137)) -- Track escrow in recipient's owner directory ([#2212](https://github.com/ripple/rippled/issues/2212)) - -## Version 0.70.2 - -The `rippled` 0.70.2 release corrects an emergent behavior which causes large numbers of transactions to get -stuck in different nodes' open ledgers without being passed on to validators, resulting in a spike in the open -ledger fee on those nodes. - -**New and Updated Features** - -This release has no new features. - -**Bug Fixes** - -- Recent fee rises and TxQ issues ([#2215](https://github.com/ripple/rippled/issues/2215)) - - -## Version 0.70.1 - -The `rippled` 0.70.1 release corrects a technical flaw in the newly refactored consensus code that could cause a node to get stuck in consensus due to stale votes from a -peer, and allows compiling `rippled` under the 1.1.x releases of OpenSSL. - -**New and Updated Features** - -This release has no new features. - -**Bug Fixes** - -- Allow compiling against OpenSSL 1.1.0 ([#2151](https://github.com/ripple/rippled/pull/2151)) -- Log invariant check messages at "fatal" level ([2154](https://github.com/ripple/rippled/pull/2154)) -- Fix the consensus code to update all disputed transactions after a node changes a position ([2156](https://github.com/ripple/rippled/pull/2156)) - - -## Version 0.70.0 - -The `rippled` 0.70.0 release introduces several enhancements that improve the reliability, scalability and security of the network. - -Highlights of this release include: - -- The `FlowCross` amendment, which streamlines offer crossing and autobrigding logic by leveraging the new “Flow” payment engine. -- The `EnforceInvariants` amendment, which can safeguard the integrity of the XRP Ledger by introducing code that executes after every transaction and ensures that the execution did not violate key protocol rules. -- `fix1373`, which addresses an issue that would cause payments with certain path specifications to not be properly parsed. - -**New and Updated Features** - -- Implement and test invariant checks for transactions (#2054) -- TxQ: Functionality to dump all queued transactions (#2020) -- Consensus refactor for simulation/cleanup (#2040) -- Payment flow code should support offer crossing (#1884) -- make `Config` init extensible via lambda (#1993) -- Improve Consensus Documentation (#2064) -- Refactor Dependencies & Unit Test Consensus (#1941) -- `feature` RPC test (#1988) -- Add unit Tests for handlers/TxHistory.cpp (#2062) -- Add unit tests for handlers/AccountCurrenciesHandler.cpp (#2085) -- Add unit test for handlers/Peers.cpp (#2060) -- Improve logging for Transaction affects no accounts warning (#2043) -- Increase logging in PeerImpl fail (#2043) -- Allow filtering of ledger objects by type in RPC (#2066) - -**Bug Fixes** - -- Fix displayed warning when generating brain wallets (#2121) -- Cmake build does not append '+DEBUG' to the version info for non-unity builds -- Crossing tiny offers can misbehave on RCL -- `asfRequireAuth` flag not always obeyed (#2092) -- Strand creating is incorrectly accepting invalid paths -- JobQueue occasionally crashes on shutdown (#2025) -- Improve pseudo-transaction handling (#2104) - -## Version 0.60.3 - -The `rippled` 0.60.3 release helps to increase the stability of the network under heavy load. - -**New and Updated Features** - -This release has no new features. - -**Bug Fixes** - -Server overlay improvements ([#2110](https://github.com/ripple/rippled/pull/2011)) - -## Version 0.60.2 - -The `rippled` 0.60.2 release further strengthens handling of cases associated with a previously patched exploit, in which NoRipple flags were being bypassed by using offers. - -**New and Updated Features** - -This release has no new features. - -**Bug Fixes** - -Prevent the ability to bypass the `NoRipple` flag using offers ([#7cd4d78](https://github.com/ripple/rippled/commit/4ff40d4954dfaa237c8b708c2126cb39566776da)) - -## Version 0.60.1 - -The `rippled` 0.60.1 release corrects a technical flaw that resulted from using 32-bit space identifiers instead of the protocol-defined 16-bit values for Escrow and Payment Channel ledger entries. rippled version 0.60.1 also fixes a problem where the WebSocket timeout timer would not be cancelled when certain errors occurred during subscription streams. Ripple requires upgrading to rippled version 0.60.1 immediately. - -**New and Updated Feature** - -This release has no new features. - -**Bug Fixes** - -Correct calculation of Escrow and Payment Channel indices. -Fix WebSocket timeout timer issues. - -## Version 0.60.0 - -The `rippled` 0.60.0 release introduces several enhancements that improve the reliability and scalability of the Ripple Consensus Ledger (RCL), including features that add ledger interoperability by improving Interledger Protocol compatibility. Ripple recommends that all server operators upgrade to version 0.60.0 by Thursday, 2017-03-30, for service continuity. - -Highlights of this release include: - -- `Escrow` (previously called `SusPay`) which permits users to cryptographically escrow XRP on RCL with an expiration date, and optionally a hashlock crypto-condition. Ripple expects Escrow to be enabled via an Amendment named [`Escrow`](https://ripple.com/build/amendments/#escrow) on Thursday, 2017-03-30. See below for details. -- Dynamic UNL Lite, which allows `rippled` to automatically adjust which validators it trusts based on recommended lists from trusted publishers. - -**New and Updated Features** - -- Add `Escrow` support (#2039) -- Dynamize trusted validator list and quorum (#1842) -- Simplify fee handling during transaction submission (#1992) -- Publish server stream when fee changes (#2016) -- Replace manifest with validator token (#1975) -- Add validator key revocations (#2019) -- Add `SecretKey` comparison operator (#2004) -- Reduce `LEDGER_MIN_CONSENSUS` (#2013) -- Update libsecp256k1 and Beast B30 (#1983) -- Make `Config` extensible via lambda (#1993) -- WebSocket permessage-deflate integration (#1995) -- Do not close socket on a foreign thread (#2014) -- Update build scripts to support latest boost and ubuntu distros (#1997) -- Handle protoc targets in scons ninja build (#2022) -- Specify syntax version for ripple.proto file (#2007) -- Eliminate protocol header dependency (#1962) -- Use gnu gold or clang lld linkers if available (#2031) -- Add tests for `lookupLedger` (#1989) -- Add unit test for `get_counts` RPC method (#2011) -- Add test for `transaction_entry` request (#2017) -- Unit tests of RPC "sign" (#2010) -- Add failure only unit test reporter (#2018) - -**Bug Fixes** - -- Enforce rippling constraints during payments (#2049) -- Fix limiting step re-execute bug (#1936) -- Make "wss" work the same as "wss2" (#2033) -- Config test uses unique directories for each test (#1984) -- Check for malformed public key on payment channel (#2027) -- Send a websocket ping before timing out in server (#2035) - - -## Version 0.50.3 - -The `rippled` 0.50.3 release corrects a reported exploit that would allow a combination of trust lines and order books in a payment path to bypass the blocking effect of the [`NoRipple`](https://ripple.com/build/understanding-the-noripple-flag/) flag. Ripple recommends that all server operators immediately upgrade to version 0.50.3. - -**New and Updated Features** - -This release has no new features. - -**Bug Fixes** - -Correct a reported exploit that would allow a combination of trust lines and order books in a payment path to bypass the blocking effect of the “NoRipple” flag. - - -## Version 0.50.2 - -The `rippled` 0.50.2 release adjusts the default TLS cipher list and corrects a flaw that would not allow an SSL handshake to properly complete if the port was configured using the `wss` keyword. Ripple recommends upgrading to 0.50.2 only if server operators are running rippled servers that accept client connections over TLS. - -**New and Updated Features** - -This release has no new features. - -**Bug Fixes** - -Adjust the default cipher list and correct a flaw that would not allow an SSL handshake to properly complete if the port was configured using the `wss` keyword (#1985) - - -## Version 0.50.0 - -The `rippled` 0.50.0 release includes TickSize, which allows gateways to set a "tick size" for assets they issue to help promote faster price discovery and deeper liquidity, as well as reduce transaction spam and ledger churn on RCL. Ripple expects TickSize to be enabled via an Amendment called TickSize on Tuesday, 2017-02-21. - -You can [update to the new version](https://ripple.com/build/rippled-setup/#updating-rippled) on Red Hat Enterprise Linux 7 or CentOS 7 using yum. For other platforms, please [compile the new version from source](https://wiki.ripple.com/Rippled_build_instructions). - -**New and Updated Features** - -**Tick Size** - -Currently, offers on RCL can differ by as little as one part in a quadrillion. This means that there is essentially no value to placing an offer early, as an offer placed later at a microscopically better price gets priority over it. The [TickSize](https://ripple.com/build/amendments/#ticksize) Amendment solves this problem by introducing a minimum tick size that a price must move for an offer to be considered to be at a better price. The tick size is controlled by the issuers of the assets involved. - -This change lets issuers quantize the exchange rates of offers to use a specified number of significant digits. Gateways must enable a TickSize on their account for this feature to benefit them. A single AccountSet transaction may set a `TickSize` parameter. Legal values are 0 and 3-15 inclusive. Zero removes the setting. 3-15 allow that many decimal digits of precision in the pricing of offers for assets issued by this account. It will still be possible to place an offer to buy or sell any amount of an asset and the offer will still keep that amount as exactly as it does now. If an offer involves two assets that each have a tick size, the smaller number of significant figures (larger ticks) controls. - -For asset pairs with XRP, the tick size imposed, if any, is the tick size of the issuer of the non-XRP asset. For asset pairs without XRP, the tick size imposed, if any, is the smaller of the two issuer's configured tick sizes. - -The tick size is imposed by rounding the offer quality down to the nearest tick and recomputing the non-critical side of the offer. For a buy, the amount offered is rounded down. For a sell, the amount charged is rounded up. - -The primary expected benefit of the TickSize amendment is the reduction of bots fighting over the tip of the order book, which means: -- Quicker price discovery as outpricing someone by a microscopic amount is made impossible (currently bots can spend hours outbidding each other with no significant price movement) -- A reduction in offer creation and cancellation spam -- Traders can't outbid by a microscopic amount -- More offers left on the books as priority - -We also expect larger tick sizes to benefit market makers in the following ways: -- They increase the delta between the fair market value and the trade price, ultimately reducing spreads -- They prevent market makers from consuming each other's offers due to slight changes in perceived fair market value, which promotes liquidity -- They promote faster price discovery by reducing the back and forths required to move the price by traders who don't want to move the price more than they need to -- They reduce transaction spam by reducing fighting over the tip of the order book and reducing the need to change offers due to slight price changes -- They reduce ledger churn and metadata sizes by reducing the number of indexes each order book must have -- They allow the order book as presented to traders to better reflect the actual book since these presentations are inevitably aggregated into ticks - -**Hardened TLS configuration** - -This release updates the default TLS configuration for rippled. The new release supports only 2048-bit DH parameters and defines a new default set of modern ciphers to use, removing support for ciphers and hash functions that are no longer considered secure. - -Server administrators who wish to have different settings can configure custom global and per-port cipher suites in the configuration file using the `ssl_ciphers` directive. - -**0.50.0 Change Log** - -Remove websocketpp support (#1910) - -Increase OpenSSL requirements & harden default TLS cipher suites (#1913) - -Move test support sources out of ripple directory (#1916) - -Enhance ledger header RPC commands (#1918) - -Add support for tick sizes (#1922) - -Port discrepancy-test.coffee to c++ (#1930) - -Remove redundant call to `clearNeedNetworkLedger` (#1931) - -Port freeze-test.coffee to C++ unit test. (#1934) - -Fix CMake docs target to work if `BOOST_ROOT` is not set (#1937) - -Improve setup for account_tx paging test (#1942) - -Eliminate npm tests (#1943) - -Port uniport js test to cpp (#1944) - -Enable amendments in genesis ledger (#1944) - -Trim ledger data in Discrepancy_test (#1948) - -Add `current_ledger` field to `fee` result (#1949) - -Cleanup unit test support code (#1953) - -Add ledger save / load tests (#1955) - -Remove unused websocket files (#1957) - -Update RPC handler role/usage (#1966) - -**Bug Fixes** - -Validator's manifest not forwarded beyond directly connected peers (#1919) - -**Upcoming Features** - -We expect the previously announced Suspended Payments feature, which introduces new transaction types to the Ripple protocol that will permit users to cryptographically escrow XRP on RCL, to be enabled via the [SusPay](https://ripple.com/build/amendments/#suspay) Amendment on Tuesday, 2017-02-21. - -Also, we expect support for crypto-conditions, which are signature-like structures that can be used with suspended payments to support ILP integration, to be included in the next rippled release scheduled for March. - -Lastly, we do not have an update on the previously announced changes to the hash tree structure that rippled uses to represent a ledger, called [SHAMapV2](https://ripple.com/build/amendments/#shamapv2). At the time of activation, this amendment will require brief scheduled allowable unavailability while the changes to the hash tree structure are computed by the network. We will keep the community updated as we progress towards this date (TBA). - - -## Version 0.40.1 - -The `rippled` 0.40.1 release increases SQLite database limits in all rippled servers. Ripple recommends upgrading to 0.40.1 only if server operators are running rippled servers with full-history of the ledger. There are no new or updated features in the 0.40.1 release. - -You can update to the new version on Red Hat Enterprise Linux 7 or CentOS 7 using yum. For other platforms, please compile the new version from source. - -**New and Updated Features** - -This release has no new features. - -**Bug Fixes** - -Increase SQLite database limits to prevent full-history servers from crashing when restarting. (#1961) - -## Version 0.40.0 - -The `rippled` 0.40.0 release includes Suspended Payments, a new transaction type on the Ripple network that functions similar to an escrow service, which permits users cryptographically escrow XRP on RCL with an expiration date. Ripple expects Suspended Payments to be enabled via an Amendment named [SusPay](https://ripple.com/build/amendments/#suspay) on Tuesday, 2017-01-17. - -You can update to the new version on Red Hat Enterprise Linux 7 or CentOS 7 using yum. For other platforms, please compile the new version from source. - -**New and Updated Features** - -Previously, Ripple announced the introduction of Payment Channels during the release of rippled version 0.33.0, which permit scalable, off-ledger checkpoints of high volume, low value payments flowing in a single direction. This was the first step in a multi-phase effort to make RCL more scalable and to support Interledger Protocol (ILP). Ripple expects Payment Channels to be enabled via an Amendment called [PayChan](https://ripple.com/build/amendments/#paychan) on a future date to be determined. - -In the second phase towards making RCL more scalable and compatible with ILP, Ripple is introducing Suspended Payments, a new transaction type on the Ripple network that functions similar to an escrow service, which permits users to cryptographically escrow XRP on RCL with an expiration date. Ripple expects Suspended Payments to be enabled via an Amendment named [SusPay](https://ripple.com/build/amendments/#suspay) on Tuesday, 2017-01-17. - -A Suspended Payment can be created, which deducts the funds from the sending account. It can then be either fulfilled or canceled. It can only be fulfilled if the fulfillment transaction makes it into a ledger with a CloseTime lower than the expiry date of the transaction. It can be canceled with a transaction that makes it into a ledger with a CloseTime greater than the expiry date of the transaction. - -In the third phase towards making RCL more scalable and compatible with ILP, Ripple plans to introduce additional library support for crypto-conditions, which are distributable event descriptions written in a standard format that describe how to recognize a fulfillment message without saying exactly what the fulfillment is. Fulfillments are cryptographically verifiable messages that prove an event occurred. If you transmit a fulfillment, then everyone who has the condition can agree that the condition has been met. Fulfillment requires the submission of a signature that matches the condition (message hash and public key). This format supports multiple algorithms, including different hash functions and cryptographic signing schemes. Crypto-conditions can be nested in multiple levels, with each level possibly having multiple signatures. - -Lastly, we do not have an update on the previously announced changes to the hash tree structure that rippled uses to represent a ledger, called [SHAMapV2](https://ripple.com/build/amendments/#shamapv2). This will require brief scheduled allowable downtime while the changes to the hash tree structure are propagated by the network. We will keep the community updated as we progress towards this date (TBA). - -Consensus refactor (#1874) - -Bug Fixes - -Correct an issue in payment flow code that did not remove an unfunded offer (#1860) - -Sign validator manifests with both ephemeral and master keys (#1865) - -Correctly parse multi-buffer JSON messages (#1862) - - -## Version 0.33.0 - -The `rippled` 0.33.0 release includes an improved version of the payment code, which we expect to be activated via Amendment on Wednesday, 2016-10-20 with the name [Flow](https://ripple.com/build/amendments/#flow). We are also introducing XRP Payment Channels, a new structure in the ledger designed to support [Interledger Protocol](https://interledger.org/) trust lines as balances get substantial, which we expect to be activated via Amendment on a future date (TBA) with the name [PayChan](https://ripple.com/build/amendments/#paychan). Lastly, we will be introducing changes to the hash tree structure that rippled uses to represent a ledger, which we expect to be available via Amendment on a future date (TBA) with the name [SHAMapV2](https://ripple.com/build/amendments/#shamapv2). - -You can [update to the new version](https://ripple.com/build/rippled-setup/#updating-rippled) on Red Hat Enterprise Linux 7 or CentOS 7 using yum. For other platforms, please [compile the new version from source](https://wiki.ripple.com/Rippled_build_instructions). - -** New and Updated Features ** - -A fixed version of the new payment processing engine, which we initially announced on Friday, 2016-07-29, is expected to be available via Amendment on Wednesday, 2016-10-20 with the name [Flow](https://ripple.com/build/amendments/#flow). The new payments code adds no new features, but improves efficiency and robustness in payment handling. - -The Flow code may occasionally produce slightly different results than the old payment processing engine due to the effects of floating point rounding. - -We will be introducing changes to the hash tree structure that rippled uses to represent a ledger, which we expect to be activated via Amendment on a future date (TBA) with the name [SHAMapV2](https://ripple.com/build/amendments/#shamapv2). The new structure is more compact and efficient than the previous version. This affects how ledger hashes are calculated, but has no other user-facing consequences. The activation of the SHAMapV2 amendment will require brief scheduled allowable downtime, while the changes to the hash tree structure are propagated by the network. We will keep the community updated as we progress towards this date (TBA). - -In an effort to make RCL more scalable and to support Interledger Protocol (ILP) trust lines as balances get more substantial, we’re introducing XRP Payment Channels, a new structure in the ledger, which we expect to be available via Amendment on a future date (TBA) with the name [PayChan](https://ripple.com/build/amendments/#paychan). - -XRP Payment Channels permit scalable, intermittent, off-ledger settlement of ILP trust lines for high volume payments flowing in a single direction. For bidirectional channels, an XRP Payment Channel can be used in each direction. The recipient can claim any unpaid balance at any time. The owner can top off the channel as needed. The owner must wait out a delay to close the channel to give the recipient a chance to supply any claims. The total amount paid increases monotonically as newer claims are issued. - -The initial concept behind payment channels was discussed as early as 2011 and the first implementation was done by Mike Hearn in bitcoinj. Recent work being done by Lightning Network has showcased examples of the many use cases for payment channels. The introduction of XRP Payment Channels allows for a more efficient integration between RCL and ILP to further support enterprise use cases for high volume payments. - -Added `getInfoRippled.sh` support script to gather health check for rippled servers [RIPD-1284] - -The `account_info` command can now return information about queued transactions - [RIPD-1205] - -Automatically-provided sequence numbers now consider the transaction queue - [RIPD-1206] - -The `server_info` and `server_state` commands now include the queue-related escalated fee factor in the load_factor field of the response - [RIPD-1207] - -A transaction with a high transaction cost can now cause transactions from the same sender queued in front of it to get into the open ledger if the transaction costs are high enough on average across all such transactions. - [RIPD-1246] - -Reorganization: Move `LoadFeeTrack` to app/tx and clean up functions - [RIPD-956] - -Reorganization: unit test source files - [RIPD-1132] - -Reorganization: NuDB stand-alone repository - [RIPD-1163] - -Reorganization: Add `BEAST_EXPECT` to Beast - [RIPD-1243] - -Reorganization: Beast 64-bit CMake/Bjam target on Windows - [RIPD-1262] - -** Bug Fixes ** - -`PaymentSandbox::balanceHook` can return the wrong issuer, which could cause the transfer fee to be incorrectly by-passed in rare circumstances. [RIPD-1274, #1827] - -Prevent concurrent write operations in websockets [#1806] - -Add HTTP status page for new websocket implementation [#1855] - - -## Version 0.32.1 - -The `rippled` 0.32.1 release includes an improved version of the payment code, which we expect to be available via Amendment on Wednesday, 2016-08-24 with the name FlowV2, and a completely new implementation of the WebSocket protocol for serving clients. - -You can [update to the new version](https://ripple.com/build/rippled-setup/#updating-rippled) on Red Hat Enterprise Linux 7 or CentOS 7 using yum. For other platforms, please [compile the new version from source](https://wiki.ripple.com/Rippled_build_instructions). - -**New and Updated Features** - -An improved version of the payment processing engine, which we expect to be available via Amendment on Wednesday, 2016-08-24 with the name “FlowV2”. The new payments code adds no new features, but improves efficiency and robustness in payment handling. - -The FlowV2 code may occasionally produce slightly different results than the old payment processing engine due to the effects of floating point rounding. Once FlowV2 is enabled on the network then old servers without the FlowV2 amendment will lose sync more frequently because of these differences. - -**Beast WebSocket** - -A completely new implementation of the WebSocket protocol for serving clients is available as a configurable option for `rippled` administrators. To enable this new implementation, change the “protocol” field in `rippled.cfg` from “ws” to “ws2” (or from “wss” to “wss2” for Secure WebSockets), as illustrated in this example: - - [port_ws_public] - port = 5006 - ip = 0.0.0.0 - protocol = wss2 - -The new implementation paves the way for increased reliability and future performance when submitting commands over WebSocket. The behavior and syntax of commands should be identical to the previous implementation. Please report any issues to support@ripple.com. A future version of rippled will remove the old WebSocket implementation, and use only the new one. - -**Bug fixes** - -Fix a non-exploitable, intermittent crash in some client pathfinding requests (RIPD-1219) - -Fix a non-exploitable crash caused by a race condition in the HTTP server. (RIPD-1251) - -Fix bug that could cause a previously fee queued transaction to not be relayed after being in the open ledger for an extended time without being included in a validated ledger. Fix bug that would allow an account to have more than the allowed limit of transactions in the fee queue. Fix bug that could crash debug builds in rare cases when replacing a dropped transaction. (RIPD-1200) - -Remove incompatible OS X switches in Test.py (RIPD-1250) - -Autofilling a transaction fee (sign / submit) with the experimental `x-queue-okay` parameter will use the user’s maximum fee if the open ledger fee is higher, improving queue position, and giving the tx more chance to succeed. (RIPD-1194) - - - -## Version 0.32.0 - -The `rippled` 0.32.0 release improves transaction queue which now supports batching and can hold up to 10 transactions per account, allowing users to queue multiple transactions for processing when the network load is high. Additionally, the `server_info` and `server_state` commands now include information on transaction cost multipliers and the fee command is available to unprivileged users. We advise rippled operators to upgrade immediately. - -You can update to the new version on Red Hat Enterprise Linux 7 or CentOS 7 using yum. For other platforms, please compile the new version from source. - -**New and Updated Features** - -- Allow multiple transactions per account in transaction queue (RIPD-1048). This also introduces a new transaction engine code, `telCAN_NOT_QUEUE`. -- Charge pathfinding consumers per source currency (RIPD-1019): The IP address used to perform pathfinding operations is now charged an additional resource increment for each source currency in the path set. -- New implementation of payment processing engine. This implementation is not yet enabled by default. -- Include amendments in validations subscription -- Add C++17 compatibility -- New WebSocket server implementation with Beast.WebSocket library. The new library offers a stable, high-performance websocket server implementation. To take advantage of this implementation, change websocket protocol under rippled.cfg from wss and ws to wss2 and ws2 under `[port_wss_admin]` and `[port_ws_public]` stanzas: -``` - [port_wss_admin] - port = 51237 - ip = 127.0.0.1 - admin = 127.0.0.1 - protocol = wss2 - - [port_ws_public] - port = 51233 - ip = 0.0.0.0 - protocol = wss2, ws2 -``` -- The fee command is now public (RIPD-1113) -- The fee command checks open ledger rules (RIPD-1183) -- Log when number of available file descriptors is insufficient (RIPD-1125) -- Publish all validation fields for signature verification -- Get quorum and trusted master validator keys from validators.txt -- Standalone mode uses temp DB files by default (RIPD-1129): If a [database_path] is configured, it will always be used, and tables will be upgraded on startup. -- Include config manifest in server_info admin response (RIPD-1172) - -**Bug fixes** - -- Fix history acquire check (RIPD-1112) -- Correctly handle connections that fail security checks (RIPD-1114) -- Fix secured Websocket closing -- Reject invalid MessageKey in SetAccount handler (RIPD-308, RIPD-990) -- Fix advisory delete effect on history acquisition (RIPD-1112) -- Improve websocket send performance (RIPD-1158) -- Fix XRP bridge payment bug (RIPD-1141) -- Improve error reporting for wallet_propose command. Also include a warning if the key used may be an insecure, low-entropy key. (RIPD-1110) - -**Deprecated features** - -- Remove obsolete sendGetPeers support (RIPD-164) -- Remove obsolete internal command (RIPD-888) - - - - -## Version 0.31.2 - -The `rippled` 0.31.2 release corrects issues with the fee escalation algorithm. We advise `rippled` operators to upgrade immediately. - -You can [update to the new version](https://ripple.com/build/rippled-setup/#updating-rippled) on Red Hat Enterprise Linux 7 or CentOS 7 using yum. For other platforms, please [compile the new version from source](https://wiki.ripple.com/Rippled_build_instructions). - -**New and Updated Features** - -This release has no new features. - -**Bug Fixes** - -- A defect in the fee escalation algorithm that caused network fees to escalate more rapidly than intended has been corrected. (RIPD-1177) -- The minimum local fee advertised by validators will no longer be adjusted upwards. - - - -## Version 0.31.1 - -The `rippled` 0.31.1 release contains one important bug fix. We advise `rippled` operators to upgrade immediately. - -You can [update to the new version](https://ripple.com/build/rippled-setup/#updating-rippled) on Red Hat Enterprise Linux 7 or CentOS 7 using yum. For other platforms, please [compile the new version from source](https://wiki.ripple.com/Rippled_build_instructions). - -**New and Updated Features** - -This release has no new features. - -**Bug Fixes** - -`rippled` 0.31.1 contains the following fix: - -- Correctly handle ledger validations with no `LedgerSequence` field. Previous versions of `rippled` incorrectly assumed that the optional validation field would always be included. Current versions of the software always include the field, and gracefully handle its absence. - - - -## Version 0.31.0 - -`rippled` 0.31.0 has been released. - -You can [update to the new version](https://ripple.com/build/rippled-setup/#updating-rippled) on Red Hat Enterprise Linux 7 or CentOS 7 using yum. - -For other platforms, please [compile the new version from source](https://wiki.ripple.com/Rippled_build_instructions). Use the `git log` command to confirm you have the correct source tree. The first log entry should be the change setting the version: - - - commit a5d58566386fd86ae4c816c82085fe242b255d2c - Author: Nik Bougalis - Date: Sun Apr 17 18:02:02 2016 -0700 - - Set version to 0.31.0 - - -**Warnings** - -Please expect a one-time delay when starting 0.31.0 while certain database indices are being built or rebuilt. The delay can be up to five minutes, during which CPU will spike and the server will appear unresponsive (no response to RPC, etc.). - -Additionally, `rippled` 0.31.0 now checks at start-up time that it has sufficient open file descriptors available, and shuts down with an error message if it does not. Previous versions of `rippled` could run out of file descriptors unexpectedly during operation. If you get a file-descriptor error message, increase the number of file descriptors available to `rippled` (for example, editing `/etc/security/limits.conf`) and restart. - -**New and Updated Features** - -`rippled` 0.31.0 has the following new or updated features: - -- (New) [**Amendments**](https://ripple.com/build/amendments/) - A consensus-based system for introducing changes to transaction processing. -- (New) [**Multi-Signing**](https://ripple.com/build/transactions/#multi-signing) - (To be enabled as an amendment) Allow transactions to be authorized by a list of signatures. (RIPD-182) -- (New) **Transaction queue and FeeEscalation** - (To be enabled as an amendment) Include or defer transactions based on the [transaction cost](https://ripple.com/build/transaction-cost/) offered, for better behavior in DDoS conditions. (RIPD-598) -- (Updated) Validations subscription stream now includes `ledger_index` field. (DEC-564) -- (Updated) You can request SignerList information in the `account_info` command (RIPD-1061) - -**Closed Issues** - -`rippled` 0.31.0 has the following fixes and improvements: - -- Improve held transaction submission -- Update SQLite from 3.8.11.1 to 3.11.0 -- Allow random seed with specified wallet_propose key_type (RIPD-1030) -- Limit pathfinding source currency limits (RIPD-1062) -- Speed up out of order transaction processing (RIPD-239) -- Pathfinding optimizations -- Streamlined UNL/validator list: The new code removes the ability to specify domain names in the [validators] configuration block, and no longer supports the [validators_site] option. -- Add websocket client -- Add description of rpcSENDMAX_MALFORMED error -- Convert PathRequest to use std::chrono (RIPD-1069) -- Improve compile-time OpenSSL version check -- Clear old Validations during online delete (RIPD-870) -- Return correct error code during unfunded offer cross (RIPD-1082) -- Report delivered_amount for legacy account_tx queries. -- Improve error message when signing fails (RIPD-1066) -- Fix websocket deadlock - - - - -## Version 0.30.1 - -rippled 0.30.1 has been released. The commit can be found on GitHub at: - -Prior to building, please confirm you have the correct source tree with the **git log** command. The first log entry should be the change setting the version: - - commit c717006c44126aa0edb3a36ca29ee30e7a72c1d3 - Author: Nik Bougalis - Date: Wed Feb 3 14:49:07 2016 -0800 - - Set version to 0.30.1 - -This release incorporates a number of important features, bugfixes and functional improvements. Please refer to the [Git commit history](https://github.com/ripple/rippled/commits/0.30.1) for more detailed information. - -**Release Overview** - -The rippled team is proud to release rippled version 0.30.1. This version contains a several minor new features as well as significant improvements to the consensus algorithm that make it work faster and with more consistency. In the time we have been testing the new release on our validators, these changes have led to increased agreement and shorter close times between ledger versions, for approximately 40% more ledgers validated per day. - -**New Features** - -- Secure gateway: configured IPs can forward identifying user data in HTTP headers, including user name and origin IP. If the user name exists, then resource limits are lifted for that session. See rippled-example.cfg for more information. -- Allow fractional fee multipliers (RIPD-626) -- Add “expiration” to account\_offers (RIPD-1049) -- Add “owner\_funds” to “transactions” array in RPC ledger (RIPD-1050) -- Add "tx" option to "ledger" command line -- Add server uptime in server\_info -- Allow multiple incoming connections from the same IP -- Report connection uptime in peer command (RIPD-927) -- Permit pathfinding to be disabled by setting \[path\_search\_max\] to 0 in rippled.cfg file (RIPD-271) -- Add subscription to peer status changes (RIPD-579) - -**Improvements** - -- Improvements to ledger\_request response -- Improvements to validations proposal relaying (RIPD-1057) -- Improvements to consensus algorithm -- Ledger close time optimizations (RIPD-998, RIPD-791) -- Delete unfunded offers in predictable order - -**Development-Related Updates** - -- Require boost 1.57 -- Implement new coroutines (RIPD-1043) -- Force STAccount interface to 160-bit size (RIPD-994) -- Improve compile-time OpenSSL version check - -**Bug Fixes** - -- Fix handling of secp256r1 signatures (RIPD-1040) -- Fix websocket messages dispatching -- Fix pathfinding early response (RIPD-1064) -- Handle account\_objects empty response (RIPD-958) -- Fix delivered\_amount reporting for minor ledgers (RIPD-1051) -- Fix setting admin privileges on websocket -- Fix race conditions in account\_tx command (RIPD-1035) -- Fix to enforce no-ripple constraints - -**Assistance** - -For assistance, please contact **integration@ripple.com** - -**More information** - -For more information or assistance, the following resources will be of use: - -- [Ripple Developer Forums](https://ripple.com/forum/viewforum.php?f=2) -- [IRC](https://webchat.freenode.net/?channels=#ripple) - - - - ------------------------------------------------------------ - -## Version 0.30.0 - -rippled 0.30.0 has been released. The commit can be found on GitHub at: - -Prior to building, please confirm you have the correct source tree with the **git log** command. The first log entry should be the change setting the version: - - commit a8859b495b552fe1eb140771f0f2cb32d11d2ac2 - Author: Vinnie Falco - Date: Wed Oct 21 18:26:02 2015 -0700 - - Set version to 0.30.0 - -This release incorporates a number of important features, bugfixes and functional improvements. Please refer to the [Git commit history](https://github.com/ripple/rippled/commits/0.30.0) for more detailed information. - -**Release Overview** - -As part of Ripple Labs’ ongoing commitment toward protocol security, the rippled team would like to release rippled 0.30.0. - -**Toolchain support** - -The minimum supported version of GCC used to compile rippled is v4.8. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Ubuntu_versions_older_than_13.10_:_Install_gcc_4.8) if you have not upgraded already. - -While it may be possible to compile rippled on (virtual) machines with 4GB of RAM, we recommend compiling on (virtual) machines with 8GB of RAM or more. If your build machine has more than one CPU (**`grep '^processor' /proc/cpuinfo | wc -l`**), you can use them to assist in the build process by compiling with the command **`scons -j[number of CPUs - 1]`**. - -**New Features** - -- Honor markers in ledger\_data requests ([RIPD-1010](https://ripplelabs.atlassian.net/browse/RIPD-1010)). -- New Amendment - **TrustSetAuth** (Not currently enabled) Create zero balance trust lines with auth flag ([RIPD-1003](https://ripplelabs.atlassian.net/browse/RIPD-1003)): this allows a TrustSet transaction to create a trust line if the only thing being changed is setting the tfSetfAuth flag. -- Randomize the initial transaction execution order for closed ledgers based on the hash of the consensus set ([RIPD-961](https://ripplelabs.atlassian.net/browse/RIPD-961)). **Activates on October 27, 2015 at 11:00 AM PCT**. -- Differentiate path\_find response ([RIPD-1013](https://ripplelabs.atlassian.net/browse/RIPD-1013)). -- Convert all of an asset ([RIPD-655](https://ripplelabs.atlassian.net/browse/RIPD-655)). - -**Improvements** - -- SHAMap improvements. -- Upgrade SQLite from 3.8.8.2 to 3.8.11.1. -- Limit the number of offers that can be consumed during crossing ([RIPD-1026](https://ripplelabs.atlassian.net/browse/RIPD-1026)). -- Remove unfunded offers on tecOVERSIZE ([RIPD-1026](https://ripplelabs.atlassian.net/browse/RIPD-1026)). -- Improve transport security ([RIPD-1029](https://ripplelabs.atlassian.net/browse/RIPD-1029)): to take full advantage of the improved transport security, servers with a single, static public IP address should add it to their configuration file, as follows: - - [overlay] - public_ip= - -**Development-Related Updates** - -- Transitional support for gcc 5.2: to enable support define the environmental variable `RIPPLED_OLD_GCC_ABI`=1 -- Transitional support for C++ 14: to enable support define the environment variable `RIPPLED_USE_CPP_14`=1 -- Visual Studio 2015 support -- Updates to integration tests -- Add uptime to crawl data ([RIPD-997](https://ripplelabs.atlassian.net/browse/RIPD-997)) - -**Assistance** - -For assistance, please contact **integration@ripple.com** - -**More information** - -For more information or assistance, the following resources will be of use: - -- [Ripple Developer Forums](https://ripple.com/forum/viewforum.php?f=2) -- [IRC](https://webchat.freenode.net/?channels=#ripple) - - - - ------------------------------------------------------------ - -## Version 0.29.0 - -rippled 0.29.0 has been released. The commit can be found on GitHub at: - -Prior to building, please confirm you have the correct source tree with the **git log** command. The first log entry should be the change setting the version: - - commit 5964710f736e258c7892e8b848c48952a4c7856c - Author: Nik Bougalis - Date: Tue Aug 4 13:22:45 2015 -0700 - - Set version to 0.29.0 - -This release incorporates a number of important features, bugfixes and functional improvements. Please refer to the [Git commit history](https://github.com/ripple/rippled/commits/0.29.0) for more detailed information. - -**Release Overview** - -As part of Ripple Labs’ ongoing commitment toward protocol security, the rippled team would like to announce rippled release 0.29.0. - -**Toolchain support** - -The minimum supported version of GCC used to compile rippled is v4.8. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Ubuntu_versions_older_than_13.10_:_Install_gcc_4.8) if you have not upgraded already. - -While it may be possible to compile rippled on (virtual) machines with 4GB of RAM, we recommend compiling on (virtual) machines with 8GB of RAM or more. If your build machine has more than one CPU (**\`grep '^processor' /proc/cpuinfo | wc -l\`**), you can use them to assist in the build process by compiling with the command **scons -j\[number of CPUs - 1\]**. - -**New Features** - -- Subscription stream for validations ([RIPD-504](https://ripplelabs.atlassian.net/browse/RIPD-504)) - -**Deprecated features** - -- Disable Websocket ping timer - -**Bug Fixes** - -- Fix off-by one bug that overstates the account reserve during OfferCreate transaction. **Activates August 17, 2015**. -- Fix funded offer removal during Payment transaction ([RIPD-113](https://ripplelabs.atlassian.net/browse/RIPD-113)). **Activates August 17, 2015**. -- Fix display discrepancy in fee. - -**Improvements** - -- Add “quality” field to account\_offers API response: quality is defined as the exchange rate, the ratio taker\_pays divided by taker\_gets. -- Add [full\_reply](https://ripple.com/build/rippled-apis/#path-find-create) field to path\_find API response: full\_reply is defined as true/false value depending on the completed depth of pathfinding search ([RIPD-894](https://ripplelabs.atlassian.net/browse/RIPD-894)). -- Add [DeliverMin](https://ripple.com/build/transactions/#payment) transaction field ([RIPD-930](https://ripplelabs.atlassian.net/browse/RIPD-930)). **Activates August 17, 2015**. - -**Development-Related Updates** - -- Add uptime to crawl data ([RIPD-997](https://ripplelabs.atlassian.net/browse/RIPD-997)). -- Add IOUAmount and XRPAmount: these numeric types replace the monolithic functionality found in STAmount ([RIPD-976](https://ripplelabs.atlassian.net/browse/RIPD-976)). -- Log metadata differences on built ledger mismatch. -- Add enableTesting flag to applyTransactions. - -**Assistance** - -For assistance, please contact **integration@ripple.com** - -**More information** - -For more information or assistance, the following resources will be of use: - -- [Ripple Developer Forums](https://ripple.com/forum/viewforum.php?f=2) -- [IRC](https://webchat.freenode.net/?channels=#ripple) - - - - ------------------------------------------------------------ - -## Version 0.28.2 - -rippled 0.28.2 has been released. The commit can be found on GitHub at: - -Prior to building, please confirm you have the correct source tree with the **git log** command. The first log entry should be the change setting the version: - - commit 6374aad9bc94595e051a04e23580617bc1aaf300 - Author: Vinnie Falco - Date: Tue Jul 7 09:21:44 2015 -0700 - - Set version to 0.28.2 - -This release incorporates a number of important features, bugfixes and functional improvements. Please refer to the [Git commit history](https://github.com/ripple/rippled/commits/release) for more detailed information. - -**Release Overview** - -As part of Ripple Labs’ ongoing commitment toward protocol security, the rippled team would like to announce rippled release 0.28.2. **This release is necessary for compatibility with OpenSSL v.1.0.1n and later.** - -**Toolchain support** - -The minimum supported version of GCC used to compile rippled is v4.8. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Ubuntu_versions_older_than_13.10_:_Install_gcc_4.8) if you have not upgraded already. - -While it may be possible to compile rippled on (virtual) machines with 4GB of RAM, we recommend compiling on (virtual) machines with 8GB of RAM or more. If your build machine has more than one CPU (**\`grep '^processor' /proc/cpuinfo | wc -l\`**), you can use them to assist in the build process by compiling with the command **scons -j\[number of CPUs - 1\]**. - -The minimum supported version of Boost is v1.57.0. You **must** upgrade to this release or later to successfully compile this release of rippled. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Install_Boost) if you have not upgraded already. - -**rippled.cfg Updates** - -For \[ips\] stanza, a port must be specified for each listed IP address with the space between IP address and port, ex.: `r.ripple.com` `51235` ([RIPD-893](https://ripplelabs.atlassian.net/browse/RIPD-893)) - -**New Features** - -- New API: [gateway\_balances](https://ripple.com/build/rippled-apis/#gateway-balances) to get a gateway's hot wallet balances and total obligations. - -**Deprecated features** - -- Removed temp\_db ([RIPD-887](https://ripplelabs.atlassian.net/browse/RIPD-887)) - -**Improvements** - -- Improve peer send queue management -- Support larger EDH keys -- More robust call to get the valid ledger index -- Performance improvements to transactions application to open ledger - -**Development-Related Updates** - -- New Env transaction testing framework for unit testing -- Fix MSVC link -- C++ 14 readiness - -**Assistance** - -For assistance, please contact **integration@ripple.com** - -**More information** - -For more information or assistance, the following resources will be of use: - -- [Ripple Developer Forums](https://ripple.com/forum/viewforum.php?f=2) -- [IRC](https://webchat.freenode.net/?channels=#ripple) - - - - ------------------------------------------------------------ - -## Version 0.28.1 - -rippled 0.28.1 has been released. The commit can be found on GitHub at: - -Prior to building, please confirm you have the correct source tree with the **git log** command. The first log entry should be the change setting the version: - - commit 399c43cae6e90a428e9ce6a988123972b0f03c99 - Author: Miguel Portilla - Date: Wed May 20 13:30:54 2015 -0400 - - Set version to 0.28.1 - -This release incorporates a number of important features, bugfixes and functional improvements. Please refer to the [Git commit history](https://github.com/ripple/rippled/commits/0.28.1) for more detailed information. - -**Toolchain support** - -The minimum supported version of GCC used to compile rippled is v4.8. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Ubuntu_versions_older_than_13.10_:_Install_gcc_4.8) if you have not upgraded already. - -While it may be possible to compile rippled on (virtual) machines with 4GB of RAM, we recommend compiling on (virtual) machines with 8GB of RAM or more. If your build machine has more than one CPU (**\`grep '^processor' /proc/cpuinfo | wc -l\`**), you can use them to assist in the build process by compiling with the command **scons -j\[number of CPUs - 1\]**. - -The minimum supported version of Boost is v1.57.0. You **must** upgrade to this release or later to successfully compile this release of rippled. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Install_Boost) if you have not upgraded already. - -**New Features** - -- Filtering for Account Objects ([RIPD-868](https://ripplelabs.atlassian.net/browse/RIPD-868)). -- Track rippled server peers latency ([RIPD-879](https://ripplelabs.atlassian.net/browse/RIPD-879)). - -**Bug fixes** - -- Expedite zero flow handling for offers -- Fix offer crossing when funds are the limiting factor - -**Deprecated features** - -- Wallet\_accounts and generator maps ([RIPD-804](https://ripplelabs.atlassian.net/browse/RIPD-804)) - -**Improvements** - -- Control ledger query depth based on peers latency -- Improvements to ledger history fetches -- Improve RPC ledger synchronization requirements ([RIPD-27](https://ripplelabs.atlassian.net/browse/RIPD-27), [RIPD-840](https://ripplelabs.atlassian.net/browse/RIPD-840)) -- Eliminate need for ledger in delivered\_amount calculation ([RIPD-860](https://ripplelabs.atlassian.net/browse/RIPD-860)) -- Improvements to JSON parsing - -**Development-Related Updates** - -- Add historical ledger fetches per minute to get\_counts -- Compute validated ledger age from signing time -- Remove unused database table ([RIPD-755](https://ripplelabs.atlassian.net/browse/RIPD-755)) - -**Assistance** - -For assistance, please contact **integration@ripple.com** - -**More information** - -For more information or assistance, the following resources will be of use: - -- [Ripple Developer Forums](https://ripple.com/forum/viewforum.php?f=2) -- [IRC](https://webchat.freenode.net/?channels=#ripple) - - - - ------------------------------------------------------------ - -## Version 0.28.0 - -rippled 0.28.0 has been released. The commit can be found on GitHub at: - -Prior to building, please confirm you have the correct source tree with the **git log** command. The first log entry should be the change setting the version: - - commit 7efd0ab0d6ef017331a0e214a3053893c88f38a9 - Author: Vinnie Falco - Date: Fri Apr 24 18:57:36 2015 -0700 - - Set version to 0.28.0 - -This release incorporates a number of important features, bugfixes and functional improvements. Please refer to the [Git commit history](https://github.com/ripple/rippled/commits/0.28.0) for more detailed information. - -**Release Overview** - -As part of Ripple Labs’ ongoing commitment toward improving the protocol, the rippled team is excited to announce **autobridging** — a feature that allows XRP to serve as a bridge currency. Autobridging enhances utility and has the potential to expose more of the network to liquidity and improve prices. For more information please refer to the [autobridging blog post](https://ripple.com/uncategorized/introducing-offer-autobridging/). - -**Toolchain support** - -The minimum supported version of GCC used to compile rippled is v4.8. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Ubuntu_versions_older_than_13.10_:_Install_gcc_4.8) if you have not upgraded already. - -While it may be possible to compile rippled on (virtual) machines with 4GB of RAM, we recommend compiling on (virtual) machines with 8GB of RAM or more. If your build machine has more than one CPU (**\`grep '^processor' /proc/cpuinfo | wc -l\`**), you can use them to assist in the build process by compiling with the command **scons -j\[number of CPUs - 1\]**. - -The minimum supported version of Boost is v1.57.0. You **must** upgrade to this release or later to successfully compile this release of rippled. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Install_Boost) if you have not upgraded already. - -**Important rippled.cfg update** - -With rippled version 0.28, the rippled.cfg file must be changed according to these instructions: - -- Change any entries that say - -`admin` `=` `allow` to `admin` `=` - -- For most installations, 127.0.0.1 will preserve current behavior. 0.0.0.0 may be specified to indicate "any IP" but cannot be combined with other IP addresses. Use of 0.0.0.0 may introduce severe security risks and is not recommended. See docs/rippled-example.cfg for more information. - -**More Strict Requirements on MemoType** - -The requirements on the contents of the MemoType field, if present, are more strict than the previous version. Transactions that can successfully be submitted to 0.27.4 and earlier may fail in 0.28.0. For details, please refer to [updated memo documentation](https://ripple.com/build/transactions/#memos) for details. Partners should check their implementation to make sure that their MemoType follows the new rules. - -**New Features** - -- Autobridging implementation ([RIPD-423](https://ripplelabs.atlassian.net/browse/RIPD-423)). **This feature will be turned on May 12, 2015**. -- Combine history\_ledger\_index and online\_delete settings in rippled.cfg ([RIPD-774](https://ripplelabs.atlassian.net/browse/RIPD-774)). -- Claim a fee when a required destination tag is not specified ([RIPD-574](https://ripplelabs.atlassian.net/browse/RIPD-574)). -- Require the master key when disabling the use of the master key or when enabling 'no freeze' ([RIPD-666](https://ripplelabs.atlassian.net/browse/RIPD-666)). -- Change the port setting admin to accept allowable admin IP addresses ([RIPD-820](https://ripplelabs.atlassian.net/browse/RIPD-820)): - - rpc\_admin\_allow has been removed. - - Comma-separated list of IP addresses that are allowed administrative privileges (subject to username & password authentication if configured). - - 127.0.0.1 is no longer a default admin IP. - - 0.0.0.0 may be specified to indicate "any IP" but cannot be combined with other MIP addresses. Use of 0.0.0.0 may introduce severe security risks and is not recommended. -- Enable Amendments from config file or static data ([RIPD-746](https://ripplelabs.atlassian.net/browse/RIPD-746)). - -**Bug fixes** - -- Fix payment engine handling of offer ⇔ account ⇔ offer cases ([RIPD-639](https://ripplelabs.atlassian.net/browse/RIPD-639)). **This fix will take effect on May 12, 2015**. -- Fix specified destination issuer in pathfinding ([RIPD-812](https://ripplelabs.atlassian.net/browse/RIPD-812)). -- Only report delivered\_amount for executed payments ([RIPD-827](https://ripplelabs.atlassian.net/browse/RIPD-827)). -- Return a validated ledger if there is one ([RIPD-814](https://ripplelabs.atlassian.net/browse/RIPD-814)). -- Refund owner's ticket reserve when a ticket is canceled ([RIPD-855](https://ripplelabs.atlassian.net/browse/RIPD-855)). -- Return descriptive error from account\_currencies RPC ([RIPD-806](https://ripplelabs.atlassian.net/browse/RIPD-806)). -- Fix transaction enumeration in account\_tx API ([RIPD-734](https://ripplelabs.atlassian.net/browse/RIPD-734)). -- Fix inconsistent ledger\_current return ([RIPD-669](https://ripplelabs.atlassian.net/browse/RIPD-669)). -- Fix flags --rpc\_ip and --rpc\_port ([RIPD-679](https://ripplelabs.atlassian.net/browse/RIPD-679)). -- Skip inefficient SQL query ([RIPD-870](https://ripplelabs.atlassian.net/browse/RIPD-870)) - -**Deprecated features** - -- Remove support for deprecated PreviousTxnID field ([RIPD-710](https://ripplelabs.atlassian.net/browse/RIPD-710)). **This will take effect on May 12, 2015**. -- Eliminate temREDUNDANT\_SEND\_MAX ([RIPD-690](https://ripplelabs.atlassian.net/browse/RIPD-690)). -- Remove WalletAdd ([RIPD-725](https://ripplelabs.atlassian.net/browse/RIPD-725)). -- Remove SMS support. - -**Improvements** - -- Improvements to peer communications. -- Reduce master lock for client requests. -- Update SQLite to 3.8.8.2. -- Require Boost 1.57. -- Improvements to Universal Port ([RIPD-687](https://ripplelabs.atlassian.net/browse/RIPD-687)). -- Constrain valid inputs for memo fields ([RIPD-712](https://ripplelabs.atlassian.net/browse/RIPD-712)). -- Binary option for ledger command ([RIPD-692](https://ripplelabs.atlassian.net/browse/RIPD-692)). -- Optimize transaction checks ([RIPD-751](https://ripplelabs.atlassian.net/browse/RIPD-751)). - -**Development-Related Updates** - -- Add RPC metrics ([RIPD-705](https://ripplelabs.atlassian.net/browse/RIPD-705)). -- Track and report peer load. -- Builds/Test.py will build and test by one or more scons targets. -- Support a --noserver command line option in tests: -- Run npm/integration tests without launching rippled, using a running instance of rippled (possibly in a debugger) instead. -- Works for npm test and mocha. -- Display human readable SSL error codes. -- Better transaction analysis ([RIPD-755](https://ripplelabs.atlassian.net/browse/RIPD-755)). - -**Assistance** - -For assistance, please contact **integration@ripple.com** - -**More information** - -For more information or assistance, the following resources will be of use: - -- [Ripple Developer Forums](https://ripple.com/forum/viewforum.php?f=2) -- [IRC](https://webchat.freenode.net/?channels=#ripple) - - - - ------------------------------------------------------------ - -## Version 0.27.4 - -rippled 0.27.4 has been released. The commit can be found on GitHub at: - -Prior to building, please confirm you have the correct source tree with the **git log** command. The first log entry should be the change setting the version: - - commit 92812fe7239ffa3ba91649b2ece1e892b866ec2a - Author: Nik Bougalis - Date: Wed Mar 11 11:26:44 2015 -0700 - - Set version to 0.27.4 - -This release includes one new feature. Please refer to the [Git commit history](https://github.com/ripple/rippled/commits/0.27.4) for more detailed information. - -**Toolchain support** - -The minimum supported version of GCC used to compile rippled is v4.8. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Ubuntu_versions_older_than_13.10_:_Install_gcc_4.8) if you have not upgraded already. - -While it may be possible to compile rippled on (virtual) machines with 4GB of RAM, we recommend compiling on (virtual) machines with 8GB of RAM or more. If your build machine has more than one CPU (**\`grep '^processor' /proc/cpuinfo | wc -l\`**), you can use them to assist in the build process by compiling with the command **scons -j\[number of CPUs - 1\]**. - -The minimum supported version of Boost is v1.57.0. You **must** upgrade to this release or later to successfully compile this release of rippled. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Install_Boost) if you have not upgraded already. - -**Bug Fixes** - -- Limit passes in the payment engine - -**Assistance** - -For assistance, please contact **integration@ripple.com** - -**More information** - -For more information or assistance, the following resources will be of use: - -- [Ripple Developer Forums](https://ripple.com/forum/viewforum.php?f=2) -- [IRC](https://webchat.freenode.net/?channels=#ripple) - - - - ------------------------------------------------------------ - -## Version 0.27.3-sp2 - -rippled 0.27.3-sp2 has been released. The commit can be found on GitHub at: - -Prior to building, please confirm you have the correct source tree with the **git log** command. The first log entry should be the change setting the version: - - commit f999839e599e131ed624330ad0ce85bb995f02d3 - Author: Nik Bougalis - Date: Thu Mar 12 13:37:47 2015 -0700 - - Set version to 0.27.3-sp2 - -This release includes one new feature. Please refer to the [Git commit history](https://github.com/ripple/rippled/commits/0.27.3-sp2) for more detailed information. - -**Toolchain support** - -The minimum supported version of GCC used to compile rippled is v4.8. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Ubuntu_versions_older_than_13.10_:_Install_gcc_4.8) if you have not upgraded already. - -While it may be possible to compile rippled on (virtual) machines with 4GB of RAM, we recommend compiling on (virtual) machines with 8GB of RAM or more. If your build machine has more than one CPU (**\`grep '^processor' /proc/cpuinfo | wc -l\`**), you can use them to assist in the build process by compiling with the command **scons -j\[number of CPUs - 1\]**. - -The minimum supported version of Boost is v1.57.0. You **must** upgrade to this release or later to successfully compile this release of rippled. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Install_Boost) if you have not upgraded already. - -**New Features** - -- Add noripple\_check RPC command: this command tells gateways what they need to do to set "Default Ripple" account flag and fix any trust lines created before the flag was set. - -**Assistance** - -For assistance, please contact **integration@ripple.com** - -**More information** - -For more information or assistance, the following resources will be of use: - -- [Ripple Developer Forums](https://ripple.com/forum/viewforum.php?f=2) -- [IRC](https://webchat.freenode.net/?channels=#ripple) - - - - ------------------------------------------------------------ - -## Version 0.27.3-sp1 - -rippled 0.27.3-sp1 has been released. The commit can be found on GitHub at: - -Prior to building, please confirm you have the correct source tree with the **git log** command. The first log entry should be the change setting the version: - - commit 232693419a2c9a8276a0fae991f688f6f01a3add - Author: Nik Bougalis - Date: Wed Mar 11 10:26:39 2015 -0700 - - Set version to 0.27.3-sp1 - -This release includes one new feature. Please refer to the [Git commit history](https://github.com/ripple/rippled/commits/0.27.3-sp1) for more detailed information. - -**Toolchain support** - -The minimum supported version of GCC used to compile rippled is v4.8. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Ubuntu_versions_older_than_13.10_:_Install_gcc_4.8) if you have not upgraded already. - -While it may be possible to compile rippled on (virtual) machines with 4GB of RAM, we recommend compiling on (virtual) machines with 8GB of RAM or more. If your build machine has more than one CPU (**\`grep '^processor' /proc/cpuinfo | wc -l\`**), you can use them to assist in the build process by compiling with the command **scons -j\[number of CPUs - 1\]**. - -The minimum supported version of Boost is v1.57.0. You **must** upgrade to this release or later to successfully compile this release of rippled. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Install_Boost) if you have not upgraded already. - -**New Features** - -- Add "Default Ripple" account flag - -**Assistance** - -For assistance, please contact **integration@ripple.com** - -**More information** - -For more information or assistance, the following resources will be of use: - -- [Ripple Developer Forums](https://ripple.com/forum/viewforum.php?f=2) -- [IRC](https://webchat.freenode.net/?channels=#ripple) - - ------------------------------------------------------------ - -## Version 0.27.3 - -rippled 0.27.3 has been released. The commit can be found on GitHub at: - -Prior to building, please confirm you have the correct source tree with the **git log** command. The first log entry should be the change setting the version: - - commit 70c2854f7c8a28801a7ebc81dd62bf0d068188f0 - Author: Nik Bougalis - Date: Tue Mar 10 14:06:33 2015 -0700 - - Set version to 0.27.3 - -This release includes one new feature. Please refer to the [Git commit history](https://github.com/ripple/rippled/commits/0.27.3) for more detailed information. - -**Toolchain support** - -The minimum supported version of GCC used to compile rippled is v4.8. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Ubuntu_versions_older_than_13.10_:_Install_gcc_4.8) if you have not upgraded already. - -While it may be possible to compile rippled on (virtual) machines with 4GB of RAM, we recommend compiling on (virtual) machines with 8GB of RAM or more. If your build machine has more than one CPU (**\`grep '^processor' /proc/cpuinfo | wc -l\`**), you can use them to assist in the build process by compiling with the command **scons -j\[number of CPUs - 1\]**. - -The minimum supported version of Boost is v1.57.0. You **must** upgrade to this release or later to successfully compile this release of rippled. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Install_Boost) if you have not upgraded already. - -**New Features** - -- Add "Default Ripple" account flag - -**Assistance** - -For assistance, please contact **integration@ripple.com** - -**More information** - -For more information or assistance, the following resources will be of use: - -- [Ripple Developer Forums](https://ripple.com/forum/viewforum.php?f=2) -- [IRC](https://webchat.freenode.net/?channels=#ripple) - - ------------------------------------------------------------ - -## Version 0.27.2 - -rippled 0.27.2 has been released. The commit can be found on GitHub at: - -Prior to building, please confirm you have the correct source tree with the **git log** command. The first log entry should be the change setting the version: - - commit 9cc8eec773e8afc9c12a6aab4982deda80495cf1 - Author: Nik Bougalis - Date: Sun Mar 1 14:56:44 2015 -0800 - - Set version to 0.27.2 - -This release incorporates a number of important bugfixes and functional improvements. Please refer to the [Git commit history](https://github.com/ripple/rippled/commits/0.27.2) for more detailed information. - -**Toolchain support** - -The minimum supported version of GCC used to compile rippled is v4.8. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Ubuntu_versions_older_than_13.10_:_Install_gcc_4.8) if you have not upgraded already. - -While it may be possible to compile rippled on (virtual) machines with 4GB of RAM, we recommend compiling on (virtual) machines with 8GB of RAM or more. If your build machine has more than one CPU (**\`grep '^processor' /proc/cpuinfo | wc -l\`**), you can use them to assist in the build process by compiling with the command **scons -j\[number of CPUs - 1\]**. - -The minimum supported version of Boost is v1.57.0. You **must** upgrade to this release or later to successfully compile this release of rippled. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Install_Boost) if you have not upgraded already. - -**New Features** - -- NuDB backend option: high performance key/value database optimized for rippled (set “type=nudb” in .cfg). - - Either import RockdDB to NuDB using import tool, or - - Start fresh with NuDB but delete SQLite databases if rippled ran previously with RocksDB: - - rm [database_path]/transaction.* [database_path]/ledger.* - -**Bug Fixes** - -- Fix offer quality bug - -**Deprecated** - -- HyperLevelDB, LevelDB, and SQLlite backend options. Use RocksDB for spinning drives and NuDB for SSDs backend options. - -**Assistance** - -For assistance, please contact **integration@ripple.com** - -**More information** - -For more information or assistance, the following resources will be of use: - -- [Ripple Developer Forums](https://ripple.com/forum/viewforum.php?f=2) -- [IRC](https://webchat.freenode.net/?channels=#ripple) - - ------------------------------------------------------------ - -## Version 0.27.1 - -rippled 0.27.1 has been released. The commit can be found on GitHub at: - -Prior to building, please confirm you have the correct source tree with the **git log** command. The first log entry should be the change setting the version: - - commit 95973ba3e8b0bd28eeaa034da8b806faaf498d8a - Author: Vinnie Falco - Date: Tue Feb 24 13:31:13 2015 -0800 - - Set version to 0.27.1 - -This release incorporates a number of important bugfixes and functional improvements. Please refer to the [Git commit history](https://github.com/ripple/rippled/commits/0.27.1) for more detailed information. - -**Toolchain support** - -The minimum supported version of GCC used to compile rippled is v4.8. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Ubuntu_versions_older_than_13.10_:_Install_gcc_4.8) if you have not upgraded already. - -While it may be possible to compile rippled on (virtual) machines with 4GB of RAM, we recommend compiling on (virtual) machines with 8GB of RAM or more. If your build machine has more than one CPU (**\`grep '^processor' /proc/cpuinfo | wc -l\`**), you can use them to assist in the build process by compiling with the command **scons -j\[number of CPUs - 1\]**. - -The minimum supported version of Boost is v1.57.0. You **must** upgrade to this release or later to successfully compile this release of rippled. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Install_Boost) if you have not upgraded already. - -**New Features** - -- RocksDB to NuDB import tool ([RIPD-781](https://ripplelabs.atlassian.net/browse/RIPD-781), [RIPD-785](https://ripplelabs.atlassian.net/browse/RIPD-785)): custom tool specifically designed for very fast import of RocksDB nodestore databases into NuDB - -**Bug Fixes** - -- Fix streambuf bug - -**Improvements** - -- Update RocksDB backend settings -- NuDB improvements: - - Limit size of mempool ([RIPD-787](https://ripplelabs.atlassian.net/browse/RIPD-787)) - - Performance improvements ([RIPD-793](https://ripplelabs.atlassian.net/browse/RIPD-793), [RIPD-796](https://ripplelabs.atlassian.net/browse/RIPD-796)): changes in Nudb to improve speed, reduce database size, and enhance correctness. The most significant change is to store hashes rather than entire keys in the key file. The output of the hash function is reduced to 48 bits, and stored directly in buckets. - -**Experimental** - -- Add /crawl cgi request feature to peer protocol ([RIPD-729](https://ripplelabs.atlassian.net/browse/RIPD-729)): adds support for a cgi /crawl request, issued over HTTPS to the configured peer protocol port. The response to the request is a JSON object containing the node public key, type, and IP address of each directly connected neighbor. The IP address is suppressed unless the neighbor has requested its address to be revealed by adding "Crawl: public" to its HTTP headers. This field is currently set by the peer\_private option in the rippled.cfg file. - -**Assistance** - -For assistance, please contact **integration@ripple.com** - -**More information** - -For more information or assistance, the following resources will be of use: - -- [Ripple Developer Forums](https://ripple.com/forum/viewforum.php?f=2) -- [IRC](https://webchat.freenode.net/?channels=#ripple) - - ------------------------------------------------------------ - -## Version 0.27.0 - -rippled 0.27.0 has been released. The commit can be found on GitHub at: - -Prior to building, please confirm you have the correct source tree with the **git log** command. The first log entry should be the change setting the version: - - commit c6c8e5d70c6fbde02cd946135a061aa77744396f - Author: Vinnie Falco - Date: Mon Jan 26 10:56:11 2015 -0800 - - Set version to 0.27.0 - -This release incorporates a number of important bugfixes and functional improvements. Please refer to the [Git commit history](https://github.com/ripple/rippled/commits/0.27.0) for more detailed information. - -**Release Overview** - -The rippled team is proud to release rippled 0.27.0. This new version includes many exciting features that will appeal to our users. The team continues to work on stability, scalability, and performance. - -The first feature is Online Delete. This feature allows rippled to maintain it’s database of previous ledgers within a fixed amount of disk space. It does this while allowing rippled to stay online and maintain an administrator specify minimum number of ledgers. This means administrators with limited disk space will no longer need to manage disk space by periodically manually removing the database. Also, with the previously existing backend databases performance would gradually degrade as the database grew in size. In particular, rippled would perform poorly whenever the backend database performed ever growing compaction operations. By limiting rippled to less history, compaction is less resource intensive and systems with less disk performance can now run rippled. - -Additionally, we are very excited to include Universal Port. This feature allows rippled's listening port to handshake in multiple protocols. For example, a single listening port can be configured to receive incoming peer connections, incoming RPC commands over HTTP, and incoming RPC commands over HTTPS at the same time. Or, a single port can receive both Websockets and Secure Websockets clients at the same. - -Finally, a new, experimental backend database, NuDB, has been added. This database was developed by Ripple Labs to take advantage of rippled’s specific data usage profile and performs much better than previous databases. Significantly, this database does not degrade in performance as the database grows. Very excitingly, this database works on OS X and Windows. This allows rippled to use these platforms for the first time. - -**Toolchain support** - -The minimum supported version of GCC used to compile rippled is v4.8. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Ubuntu_versions_older_than_13.10_:_Install_gcc_4.8) if you have not upgraded already. - -While it may be possible to compile rippled on (virtual) machines with 4GB of RAM, we recommend compiling on (virtual) machines with 8GB of RAM or more. If your build machine has more than one CPU (**\`grep '^processor' /proc/cpuinfo | wc -l\`**), you can use them to assist in the build process by compiling with the command **scons -j\[number of CPUs - 1\]**. - -The minimum supported version of Boost is v1.57.0. You **must** upgrade to this release or later to successfully compile this release of rippled. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Install_Boost) if you have not upgraded already. - -**Important rippled.cfg Update** - -**The format of the configuration file has changed. If upgrading from a previous version of rippled, please see the migration instructions below.** - -**New Features** - -- SHAMapStore Online Delete ([RIPD-415](https://ripplelabs.atlassian.net/browse/RIPD-415)): Makes rippled configurable to support deletion of all data in its key-value store (nodestore) and ledger and transaction SQLite databases based on validated ledger sequence numbers. See doc/rippled-example.cfg for configuration setup. -- [Universal Port](https://forum.ripple.com/viewtopic.php?f=2&t=8313&p=57969). See necessary config changes below. -- Config "ledger\_history\_index" option ([RIPD-559](https://ripplelabs.atlassian.net/browse/RIPD-559)) - -**Bug Fixes** - -- Fix pathfinding with multiple issuers for one currency ([RIPD-618](https://ripplelabs.atlassian.net/browse/RIPD-618)) -- Fix account\_lines, account\_offers and book\_offers result ([RIPD-682](https://ripplelabs.atlassian.net/browse/RIPD-682)) -- Fix pathfinding bugs ([RIPD-735](https://ripplelabs.atlassian.net/browse/RIPD-735)) -- Fix RPC subscribe with multiple books ([RIPD-77](https://ripplelabs.atlassian.net/browse/RIPD-77)) -- Fix account\_tx API - -**Improvements** - -- Improve the human-readable description of the tesSUCCESS code -- Add 'delivered\_amount' to Transaction JSON ([RIPD-643](https://ripplelabs.atlassian.net/browse/RIPD-643)): The synthetic field 'delivered\_amount' can be used to determine the exact amount delivered by a Payment without having to check the DeliveredAmount field, if present, or the Amount field otherwise. - -**Development-Related Updates** - -- HTTP Handshaking for Peers on Universal Port ([RIPD-446](https://ripplelabs.atlassian.net/browse/RIPD-446)) -- Use asio signal handling in Application ([RIPD-140](https://ripplelabs.atlassian.net/browse/RIPD-140)) -- Build dependency on Boost 1.57.0 -- Support a "no\_server" flag in test config -- API for improved Unit Testing ([RIPD-432](https://ripplelabs.atlassian.net/browse/RIPD-432)) -- Option to specify rippled path on command line (--rippled=\) - -**Experimental** - -- NuDB backend option: high performance key/value database optimized for rippled (set “type=nudb” in .cfg) - -**Migration Instructions** - -With rippled version 0.27.0, the rippled.cfg file must be changed according to these instructions: - -- Add new stanza - `[server]`. This section will contain a list of port names and key/value pairs. A port name must start with a letter and contain only letters and numbers. The name is not case-sensitive. For each name in this list, rippled will look for a configuration file section with the same name and use it to create a listening port. To simplify migration, you can use port names from your previous version of rippled.cfg (see Section 1. Server for detailed explanation in doc/rippled-example.cfg). For example: - - [server] - rpc_port - peer_port - websocket_port - ssl_key = - ssl_cert = - ssl_chain = - -- For each port name in `[server]` stanza, add separate stanzas. For example: - - [rpc_port] - port = - ip = - admin = allow - protocol = https - - [peer_port] - port = - ip = - protocol = peer - - [websocket_port] - port = - ip = - admin = allow - protocol = wss - -- Remove current `[rpc_port],` `[rpc_ip],` `[rpc_allow_remote],` `[rpc_ssl_key],` `[rpc_ssl_cert],` `and` `[rpc_ssl_chain],` `[peer_port],` `[peer_ip],` `[websocket_port],` `[websocket_ip]` settings from rippled.cfg - -- If you allow untrusted websocket connections to your rippled, add `[websocket_public_port]` stanza under `[server]` section and replace websocket public settings with `[websocket_public_port]` section: - - [websocket_public_port] - port = - ip = - protocol = ws ← make sure this is ws, not wss` - -- Remove `[websocket_public_port],` `[websocket_public_ip],` `[websocket_ssl_key],` `[websocket_ssl_cert],` `[websocket_ssl_chain]` settings from rippled.cfg -- Disable `[ssl_verify]` section by setting it to 0 -- Migrate the remaining configurations without changes. To enable online delete feature, check Section 6. Database in doc/rippled-example.cfg - -**Integration Notes** - -With this release, integrators should deprecate the "DeliveredAmount" field in favor of "delivered\_amount." - -**For Transactions That Occurred Before January 20, 2014:** - -- If amount actually delivered is different than the transactions “Amount” field - - "delivered\_amount" will show as unavailable indicating a developer should use caution when processing this payment. - - Example: A partial payment transaction (tfPartialPayment). -- Otherwise - - "delivered\_amount" will show the correct destination balance change. - -**For Transactions That Occur After January 20, 2014:** - -- If amount actually delivered is different than the transactions “Amount” field - - A "delivered\_amount" field will determine the destination amount change - - Example: A partial payment transaction (tfPartialPayment). -- Otherwise - - "delivered\_amount" will show the correct destination balance change. - -**Assistance** - -For assistance, please contact **integration@ripple.com** - -**More information** - -For more information or assistance, the following resources will be of use: - -- [Ripple Developer Forums](https://ripple.com/forum/viewforum.php?f=2) -- [IRC](https://webchat.freenode.net/?channels=#ripple) - - ------------------------------------------------------------ - -## Version 0.26.4 - -rippled 0.26.4 has been released. The repository tag is *0.26.4* and can be found on GitHub at: - -Prior to building, please confirm you have the correct source tree with the **git log** command. The first log entry should be the change setting the version: - - commit 05a04aa80192452475888479c84ff4b9b54e6ae7 - Author: Vinnie Falco - Date: Mon Nov 3 16:53:37 2014 -0800 - - Set version to 0.26.4 - -This release incorporates a number of important bugfixes and functional improvements. Please refer to the [Git commit history](https://github.com/ripple/rippled/commits/0.26.4) for more detailed information. - -**Toolchain support** - -The minimum supported version of GCC used to compile rippled is v4.8. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Ubuntu_versions_older_than_13.10_:_Install_gcc_4.8) if you have not upgraded already. - -While it may be possible to compile rippled on (virtual) machines with 4GB of RAM, we recommend compiling on (virtual) machines with 8GB of RAM or more. If your build machine has more than one CPU (**\`grep '^processor' /proc/cpuinfo | wc -l\`**), you can use them to assist in the build process by compiling with the command **scons -j\[number of CPUs - 1\]**. - -The minimum supported version of Boost is v1.55.0. You **must** upgrade to this release or later to successfully compile this release of rippled. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Install_Boost) if you have not upgraded already. - -**Important JSON-RPC Update** - -With rippled version 0.26.4, the [rippled.cfg](https://github.com/ripple/rippled/blob/0.26.4/doc/rippled-example.cfg) file must set the ssl\_verify property to 0. Without this update, JSON-RPC API calls may not work. - -**New Features** - -- Rocksdb v. 3.5.1 -- SQLite v. 3.8.7 -- Disable SSLv3 -- Add counters to track ledger read and write activities -- Use trusted validators median fee when determining transaction fee -- Add --quorum argument for server start ([RIPD-563](https://ripplelabs.atlassian.net/browse/RIPD-563)) -- Add account\_offers paging ([RIPD-344](https://ripplelabs.atlassian.net/browse/RIPD-344)) -- Add account\_lines paging ([RIPD-343](https://ripplelabs.atlassian.net/browse/RIPD-343)) -- Ability to configure network fee in rippled.cfg file ([RIPD-564](https://ripplelabs.atlassian.net/browse/RIPD-564)) - -**Bug Fixes** - -- Fix OS X version parsing/error related to OS X 10.10 update -- Fix incorrect address in connectivity check report -- Fix page sizes for ledger\_data ([RIPD-249](https://ripplelabs.atlassian.net/browse/RIPD-249)) -- Make log partitions case-insensitive in rippled.cfg - -**Improvements** - -- Performance - - Ledger performance improvements for storage and traversal ([RIPD-434](https://ripplelabs.atlassian.net/browse/RIPD-434)) - - Improve client performance for JSON responses ([RIPD-439](https://ripplelabs.atlassian.net/browse/RIPD-439)) -- Other - - Remove PROXY handshake feature - - Change to rippled.cfg to support sections containing both key/value pairs and a list of values - - Return descriptive error message for memo validation ([RIPD-591](https://ripplelabs.atlassian.net/browse/RIPD-591)) - - Changes to enforce JSON-RPC 2.0 error format - - Optimize account\_lines and account\_offers ([RIPD-587](https://ripplelabs.atlassian.net/browse/RIPD-587)) - - Improve fee setting logic ([RIPD-614](https://ripplelabs.atlassian.net/browse/RIPD-614)) - - Improve transaction security - - Config improvements - - Improve path filtering ([RIPD-561](https://ripplelabs.atlassian.net/browse/RIPD-561)) - - Logging to distinguish Byzantine failure from tx bug ([RIPD-523](https://ripplelabs.atlassian.net/browse/RIPD-523)) - -**Experimental** - -- Add "deferred" flag to transaction relay message (required for future code that will relay deferred transactions) -- Refactor STParsedJSON to parse an object or array (required for multisign implementation) ([RIPD-480](https://ripplelabs.atlassian.net/browse/RIPD-480)) - -**Development-Related Updates** - -- Changes to DatabaseReader to read ledger numbers from database -- Improvements to SConstruct - -**More information** - -For more information or assistance, the following resources will be of use: - -- [Ripple Developer Forums](https://ripple.com/forum/viewforum.php?f=2) -- [IRC](https://webchat.freenode.net/?channels=#ripple) - - ------------------------------------------------------------ - -## Version 0.26.3-sp1 - -rippled 0.26.3-sp1 has been released. The repository tag is *0.26.3-sp1* and can be found on GitHub at: - -Prior to building, please confirm you have the correct source tree with the **git log** command. The first log entry should be the change setting the version: - - commit 2ad6f0a65e248b4f614d38d199a9d5d02f5aaed8 - Author: Vinnie Falco - Date: Fri Sep 12 15:22:54 2014 -0700 - - Set version to 0.26.3-sp1 - -This release incorporates a number of important bugfixes and functional improvements. Please refer to the [Git commit history](https://github.com/ripple/rippled/commits/0.26.3-sp1) for more detailed information. - -**Toolchain support** - -The minimum supported version of GCC used to compile rippled is v4.8. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Ubuntu_versions_older_than_13.10_:_Install_gcc_4.8) if you have not upgraded already. - -While it may be possible to compile rippled on (virtual) machines with 4GB of RAM, we recommend compiling on (virtual) machines with 8GB of RAM or more. If your build machine has more than one CPU (**\`grep '^processor' /proc/cpuinfo | wc -l\`**), you can use them to assist in the build process by compiling with the command **scons -j\[number of CPUs - 1\]**. - -The minimum supported version of Boost is v1.55.0. You **must** upgrade to this release or later to successfully compile this release of rippled. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Install_Boost) if you have not upgraded already. - -**New Features** - -- New command to display HTTP/S-RPC sessions metrics ([RIPD-533](https://ripplelabs.atlassian.net/browse/RIPD-533)) - -**Bug Fixes** - -- Improved handling of HTTP/S-RPC sessions ([RIPD-489](https://ripplelabs.atlassian.net/browse/RIPD-489)) -- Fix unit tests for Windows. -- Fix integer overflows in JSON parser. - -**Improvements** - -- Improve processing of trust lines during pathfinding. - -**Experimental Features** - -- Added a command line utility called LedgerTool for retrieving and processing ledger blocks from the Ripple network. - -**Development-Related Updates** - -- HTTP message and parser improvements. - - Streambuf wrapper supports rvalue move. - - Message class holds a complete HTTP message. - - Body class holds the HTTP content body. - - Headers class holds RFC-compliant HTTP headers. - - Basic\_parser provides class interface to joyent's http-parser. - - Parser class parses into a message object. - - Remove unused http get client free function. - - Unit test for parsing malformed messages. -- Add enable\_if\_lvalue. -- Updates to includes and scons. -- Additional ledger.history.mismatch insight statistic. -- Convert rvalue to an lvalue. ([RIPD-494](https://ripplelabs.atlassian.net/browse/RIPD-494)) -- Enable heap profiling with jemalloc. -- Add aged containers to Validators module. ([RIPD-349](https://ripplelabs.atlassian.net/browse/RIPD-349)) -- Account for high-ASCII characters. ([RIPD-464](https://ripplelabs.atlassian.net/browse/RIPD-464)) - -**More information** - -For more information or assistance, the following resources will be of use: - -- [Ripple Developer Forums](https://ripple.com/forum/viewforum.php?f=2) -- [IRC](https://webchat.freenode.net/?channels=#ripple) - - ------------------------------------------------------------ - -## Version 0.26.2 - -rippled 0.26.2 has been released. The repository tag is *0.26.2* and can be found on GitHub at: - -Prior to building, please confirm you have the correct source tree with the **git log** command. The first log entry should be the change setting the version: - - commit b9454e0f0ca8dbc23844a0520d49394e10d445b1 - Author: Vinnie Falco - Date: Mon Aug 11 15:25:44 2014 -0400 - - Set version to 0.26.2 - -This release incorporates a small number of important bugfixes. Please refer to the [Git commit history](https://github.com/ripple/rippled/commits/0.26.2) for more detailed information. - -**Toolchain support** - -The minimum supported version of GCC used to compile rippled is v4.8. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Ubuntu_versions_older_than_13.10_:_Install_gcc_4.8) if you have not upgraded already. - -While it may be possible to compile rippled on (virtual) machines with 4GB of RAM, we recommend compiling on (virtual) machines with 8GB of RAM or more. If your build machine has more than one CPU (**\`grep '^processor' /proc/cpuinfo | wc -l\`**), you can use them to assist in the build process by compiling with the command **scons -j\[number of CPUs - 1\]**. - -The minimum supported version of Boost is v1.55.0. You **must** upgrade to this release or later to successfully compile this release of rippled. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Install_Boost) if you have not upgraded already. - -**New Features** - -- Freeze enforcement: activates on September 15, 2014 ([RIPD-399](https://ripplelabs.atlassian.net/browse/RIPD-399)) -- Add pubkey\_node and hostid to server stream messages ([RIPD-407](https://ripplelabs.atlassian.net/browse/RIPD-407)) - -**Bug Fixes** - -- Fix intermittent exception when closing HTTPS connections ([RIPD-475](https://ripplelabs.atlassian.net/browse/RIPD-475)) -- Correct Pathfinder::getPaths out to handle order books ([RIPD-427](https://ripplelabs.atlassian.net/browse/RIPD-427)) -- Detect inconsistency in PeerFinder self-connects ([RIPD-411](https://ripplelabs.atlassian.net/browse/RIPD-411)) - -**Experimental Features** - -- Add owner\_funds to client subscription data ([RIPD-377](https://ripplelabs.atlassian.net/browse/RIPD-377)) - -The offer funding status feature is “experimental” in this version. Developers are able to see the field, but it is subject to change in future releases. - -**More information** - -For more information or assistance, the following resources will be of use: - -- [Ripple Developer Forums](https://ripple.com/forum/viewforum.php?f=2) -- [IRC](https://webchat.freenode.net/?channels=#ripple) - - ------------------------------------------------------------ - -## Version 0.26.1 - -rippled v0.26.1 has been released. The repository tag is **0.26.1** and can be found on GitHub at: - -Prior to building, please confirm you have the correct source tree with the **git log** command. The first log entry should be the change setting the version: - - commit 9a0e806f78300374e20070e2573755fbafdbfd03 - Author: Vinnie Falco - Date: Mon Jul 28 11:27:31 2014 -0700 - - Set version to 0.26.1 - -This release incorporates a small number of important bugfixes. Please refer to the [Git commit history](https://github.com/ripple/rippled/commits/0.26.1) for more detailed information. - -**Toolchain support** - -The minimum supported version of GCC used to compile rippled is v4.8. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Ubuntu_versions_older_than_13.10_:_Install_gcc_4.8) if you have not upgraded already. - -While it may be possible to compile rippled on (virtual) machines with 4GB of RAM, we recommend compiling on (virtual) machines with 8GB of RAM or more. If your build machine has more than one CPU (**\`grep '^processor' /proc/cpuinfo | wc -l\`**), you can use them to assist in the build process by compiling with the command **scons -j\[number of CPUs - 1\]**. - -The minimum supported version of Boost is v1.55.0. You **must** upgrade to this release or later to successfully compile this release of rippled. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Install_Boost) if you have not upgraded already. - -**Bug Fixes** - -- Enabled asynchronous handling of HTTP-RPC interactions. This fixes client handlers using RPC that periodically return blank responses to requests. ([RIPD-390](https://ripplelabs.atlassian.net/browse/RIPD-390)) -- Fixed auth handling during OfferCreate. This fixes a regression of [RIPD-256](https://ripplelabs.atlassian.net/browse/RIPD-256). ([RIPD-414](https://ripplelabs.atlassian.net/browse/RIPD-414)) - -**More information** - -For more information or assistance, the following resources will be of use: - -- [Ripple Developer Forums](https://ripple.com/forum/viewforum.php?f=2) -- [IRC](https://webchat.freenode.net/?channels=#ripple) - - ------------------------------------------------------------ - -## Version 0.26.0 - -rippled v0.26.0 has been released. The repository tag is **0.26.0** and can be found on GitHub at: - -Prior to building, please confirm you have the correct source tree with the **git log** command. The first log entry should be the change setting the version: - - commit 9fa5e3987260e39dba322f218d39ac228a5b361b - Author: Vinnie Falco - Date: Tue Jul 22 09:59:45 2014 -0700 - - Set version to 0.26.0 - -This release incorporates a significant number of improvements and important bugfixes. Please refer to the [Git commit history](https://github.com/ripple/rippled/commits/develop) for more detailed information. - -**Toolchain support** - -The minimum supported version of GCC used to compile rippled is v4.8. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Ubuntu_versions_older_than_13.10_:_Install_gcc_4.8) if you have not upgraded already. - -While it may be possible to compile rippled on (virtual) machines with 4GB of RAM, we recommend compiling on (virtual) machines with 8GB of RAM or more. If your build machine has more than one CPU (**\`grep '^processor' /proc/cpuinfo | wc -l\`**), you can use them to assist in the build process by compiling with the command **scons -j\[number of CPUs - 1\]**. - -The minimum supported version of Boost is v1.55.0. You **must** upgrade to this release or later to successfully compile this release of rippled. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Install_Boost) if you have not upgraded already. - -**Improvements** - -- Updated integration tests. -- Updated tests for account freeze functionality. -- Implement setting the no-freeze flag on Ripple accounts ([RIPD-394](https://ripplelabs.atlassian.net/browse/RIPD-394)). -- Improve transaction fee and execution logic ([RIPD-323](https://ripplelabs.atlassian.net/browse/RIPD-323)). -- Implemented finding of 'sabfd' paths ([RIPD-335](https://ripplelabs.atlassian.net/browse/RIPD-335)). -- Imposed a local limit on paths lengths ([RIPD-350](https://ripplelabs.atlassian.net/browse/RIPD-350)). -- Documented [ledger entries](https://github.com/ripple/rippled/blob/develop/src/ripple/module/app/ledger/README.md) ([RIPD-361](https://ripplelabs.atlassian.net/browse/RIPD-361)). -- Documented [SHAMap](https://github.com/ripple/rippled/blob/develop/src/ripple/module/app/shamap/README.md). - -**Bug Fixes** - -- Fixed the limit parameter on book\_offers ([RIPD-295](https://ripplelabs.atlassian.net/browse/RIPD-295)). -- Removed SHAMapNodeID from SHAMapTreeNode to fix "right data, wrong ID" bug in the tree node cache ([RIPD-347](https://ripplelabs.atlassian.net/browse/RIPD-347)). -- Eliminated spurious SHAMap::getFetchPack failure ([RIPD-379](https://ripplelabs.atlassian.net/browse/RIPD-379)). -- Disabled SSLv2. -- Implemented rate-limiting of SSL client renegotiation to mitigate [SCIR DoS vulnerability](https://www.thc.org/thc-ssl-dos/) ([RIPD-360](https://ripplelabs.atlassian.net/browse/RIPD-360)). -- Display unprintable or malformatted currency codes as hex digits. -- Fix static initializers in RippleSSLContext ([RIPD-375](https://ripplelabs.atlassian.net/browse/RIPD-375)). - -**More information** - -For more information or assistance, the following resources will be of use: - -- [Ripple Developer Forums](https://ripple.com/forum/viewforum.php?f=2) -- [IRC](https://webchat.freenode.net/?channels=#ripple) - - ------------------------------------------------------------ - -## Version 0.25.2 - -rippled v0.25.2 has been released. The repository tag is **0.25.2** and can be found on GitHub at: - -Prior to building, please confirm you have the correct source tree with the **git log** command. The first log entry should be the change setting the version: - - commit ddf68d464d74e1c76a0cfd100a08bc8e65b91fec - Author: Mark Travis - Date: Mon Jul 7 11:46:15 2014 -0700 - - Set version to 0.25.2 - -This release incorporates significant improvements which may not warrant separate entries but are incorporated into the feature changes as summary lines. Please refer to the [Git commit history](https://github.com/ripple/rippled/commits/develop) for more information. - -**Toolchain support** - -The minimum supported version of GCC used to compile rippled is v4.8. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Ubuntu_versions_older_than_13.10_:_Install_gcc_4.8) if you have not upgraded already. - -While it may be possible to compile rippled on (virtual) machines with 4GB of RAM, we recommend build machines with 8GB of RAM. - -The minimum supported version of Boost is v1.55. You **must** upgrade to this release or later to successfully compile this release. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Install_Boost) if you have not upgraded already. - -**Improvements** - -- CPU utilization for certain operations has been optimized. -- Improve serialization of public ledger blocks. -- rippled now takes much less time to compile. -- Additional pathfinding heuristic: increases liquidity in some cases. - -**Bug Fixes** - -- Unprintable currency codes will be printed as hex digits. -- Transactions with unreasonably long path lengths are rejected. The maximum is now eight (8) hops. - - ------------------------------------------------------------ - -## Version 0.25.1 - -`rippled` v0.25.1 has been released. The repository tag is `0.25.1` and can be found on GitHub at: https://github.com/ripple/rippled/tree/0.25.1 - -Prior to building, please confirm you have the correct source tree with the `git log` command. The first log entry should be the change setting the version: - - commit b677cacb8ce0d4ef21f8c60112af1db51dce5bb4 - Author: Vinnie Falco - Date: Thu May 15 08:27:20 2014 -0700 - - Set version to 0.25.1 - -This release incorporates significant improvements which may not warrant separate entries but are incorporated into the feature changes as summary lines. Please refer to the [Git commit history](https://github.com/ripple/rippled/commits/develop) for more information. - -**Toolchain support** - -The minimum supported version of GCC used to compile rippled is v4.8. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Ubuntu_versions_older_than_13.10_:_Install_gcc_4.8) if you have not upgraded already. - -A minimum of 4GB of RAM are required to successfully compile this release. - -The minimum supported version of Boost is v1.55. You **must** upgrade to this release or later to successfully compile this release. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Install_Boost) if you have not upgraded already. - -**Major Features** - -* Option to compress the NodeStore db. More speed, less space. See [`rippled-example.cfg`](https://github.com/ripple/rippled/blob/0.25.1/doc/rippled-example.cfg#L691) - -**Improvements** - -* Remove redundant checkAccept call -* Added I/O latency to output of ''server_info''. -* Better performance handling of Fetch Packs. -* Improved handling of modified ledger nodes. -* Improved performance of JSON document generator. -* Made strConcat operate in O(n) time for greater efficiency. -* Added some new configuration options to doc/rippled-example.cfg - -**Bug Fixes** - -* Fixed a bug in Unicode parsing of transactions. -* Fix a blocker with tfRequireAuth -* Merkle tree nodes that are retrieved as a result of client requests are cached locally. -* Use the last ledger node closed for finding old paths through the network. -* Reduced number of asynchronous fetches. - - ------------------------------------------------------------ - -## Version 0.25.0 - -rippled version 0.25.0 has been released. The repository tag is **0.25.0** and can be found on GitHub at: - -Prior to building, please confirm you have the correct source tree with the **git log** command. The first log entry should be the change setting the version: - - commit 29d1d5f06261a93c5e94b4011c7675ff42443b7f - Author: Vinnie Falco - Date: Wed May 14 09:01:44 2014 -0700 - - Set version to 0.25.0 - -This release incorporates significant improvements which may not warrant separate entries but are incorporated into the feature changes as summary lines. Please refer to the [Git commit history](https://github.com/ripple/rippled/commits/develop) for more information. - -**Toolchain support** - -The minimum supported version of GCC used to compile rippled is v4.8. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Ubuntu_versions_older_than_13.10_:_Install_gcc_4.8) if you have not upgraded already. - -A minimum of 4GB of RAM are required to successfully compile this release. - -The minimum supported version of Boost is v1.55. You **must** upgrade to this release or later to successfully compile this release. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Install_Boost) if you have not upgraded already. - -**Major Features** - -- Option to compress the NodeStore db. More speed, less space. See [`rippled-example.cfg`](https://github.com/ripple/rippled/blob/0.25.0/doc/rippled-example.cfg#L691) - -**Improvements** - -- Remove redundant checkAccept call -- Added I/O latency to output of *server\_info*. -- Better performance handling of Fetch Packs. -- Improved handling of modified ledger nodes. -- Improved performance of JSON document generator. -- Made strConcat operate in O(n) time for greater efficiency. - -**Bug Fixes** - -- Fix a blocker with tfRequireAuth -- Merkle tree nodes that are retrieved as a result of client requests are cached locally. -- Use the last ledger node closed for finding old paths through the network. -- Reduced number of asynchronous fetches. - - ------------------------------------------------------------ - -## Version 0.24.0 - -rippled version 0.24.0 has been released. The repository tag is **0.24.0** and can be found on GitHub at: - -Prior to building, please confirm you have the correct source tree with the **git log** command. The first log entry should be the change setting the version: - - commit 3eb1c7bd6f93e5d874192197f76571184338f702 - Author: Vinnie Falco - Date: Mon May 5 10:20:46 2014 -0700 - - Set version to 0.24.0 - -This release incorporates significant improvements which may not warrant separate entries but are incorporated into the feature changes as summary lines. Please refer to the [Git commit history](https://github.com/ripple/rippled/commits/develop) for more information. - -**Toolchain support** - -The minimum supported version of GCC used to compile rippled is v4.8. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Ubuntu_versions_older_than_13.10_:_Install_gcc_4.8) if you have not upgraded already. - -A minimum of 4GB of RAM are required to successfully compile this release. - -The minimum supported version of Boost is v1.55. You **must** upgrade to this release or later to successfully compile this release. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Install_Boost) if you have not upgraded already. - -**Improvements** - -- Implemented logic for ledger processes and features. -- Use "high threads" for background RocksDB database writes. -- Separately track locally-issued transactions to ensure they always appear in the open ledger. - -**Bug Fixes** - -- Fix AccountSet for canonical transactions. -- The RPC [sign](https://ripple.com/build/rippled-apis/#sign) command will now sign with either an account's master or regular secret key. -- Fixed out-of-order network initialization. -- Improved efficiency of pathfinding for transactions. -- Reworked timing of ledger validation and related operations to fix race condition against the network. -- Build process enforces minimum versions of OpenSSL and BOOST for operation. - - ------------------------------------------------------------ - -## Version 0.23.0 - -rippled version 0.23.0 has been released. The repository tag is **0.23.0** and can be found on GitHub at: - -Prior to building, please confirm you have the correct source tree with the **git log** command. The first log entry should be the change setting the version: - - commit 29a4f61551236f70865d46d6653da2e62de1c701 - Author: Vinnie Falco - Date: Fri Mar 14 13:01:23 2014 -0700 - - Set version to 0.23.0 - -This release incorporates significant improvements which may not warrant separate entries but are incorporated into the feature changes as summary lines. Please refer to the [Git commit history](https://github.com/ripple/rippled/commits/develop) for more information. - -**Toolchain support** - -The minimum supported version of GCC used to compile rippled is v4.8. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Ubuntu_versions_older_than_13.10_:_Install_gcc_4.8) if you have not upgraded already. - -A minimum of 4GB of RAM are required to successfully compile this release. - -The minimum supported version of Boost is v1.55. You **must** upgrade to this release or later to successfully compile this release. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Install_Boost) if you have not upgraded already. - -**Improvements** - -- Allow the word 'none' in the *.cfg* file to disable storing historical ledgers. -- Clarify the initialization of hash prefixes used in the *RadMap*. -- Better validation of RPC-JSON from all sources -- Reduce spurious log output from Peers -- Eliminated some I/O for certain operations in the *RadMap*. -- Client requests for full state trees now require administrative privileges. -- Added "MemoData" field for transaction memos. -- Prevent the node cache from overflowing needlessly in certain cases -- Add "ledger\_data" command for retrieving entire ledgers in chunks. -- Reduce the quantity of forwarded transactions and proposals in some cases -- Improved diagnostics when errors occur loading SSL certificates - -**Bug Fixes** - -- Fix rare crash when a race condition involving disconnecting sockets occurs -- Fix a corner case with hex conversion of strings with odd character lengths -- Fix an exception in a corner case when erroneous transactions were being logged -- Fix the treatment of expired offers when cleaning up offers -- Prevent a needless transactor from being created if the tx ID is not valid -- Fix the peer action transition from "syncing" to "full" -- Fix error reporting for unknown inner JSON fields -- Fix source file path displayed when an assertion failure is reported -- Fix typos in transaction engine error code identifiers - - ------------------------------------------------------------ - -## Version 0.22.0 - -rippled version 0.22.0 has been released. This release is currently the tip of the **develop/** branch and can be found on GitHub at: The tag is **0.22.0** and can be found on GitHub at: - -**This is a critical release affecting transaction processing. All partners should update immediately.** - -Prior to building, please confirm you have the correct source tree with the **git log** command. The first log entry should be the change setting the version: - -This release incorporates significant improvements which may not warrant separate entries but are incorporated into the feature changes as summary lines. Please refer to the [Git commit history](https://github.com/ripple/rippled/commits/develop) for more information. - -**Toolchain support** - -The minimum supported version of GCC used to compile rippled is v4.8. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Ubuntu_versions_older_than_13.10_:_Install_gcc_4.8) if you have not upgraded already. - -A minimum of 4GB of RAM are required to successfully compile this release. - -The minimum supported version of libBOOST is v1.55. You **must** upgrade to this release or later to successfully compile this release. Please follow [these instructions](https://wiki.ripple.com/Ubuntu_build_instructions#Install_Boost) if you have not upgraded already. - -**Key release features** - -- **PeerFinder** - - - Actively guides network topology. - - Scrubs listening advertisements based on connectivity checks. - - Redirection for new nodes when existing nodes are full. - -- **Memos** - - - Transactions can optionally include a short text message, which optionally can be encrypted. - -- **Database** - - - Improved management of I/O resources. - - Better performance accessing historical data. - -- **PathFinding** - - - More efficient search algorithm when computing paths - -**Major Partner Issues Fixed** - -- **Transactions** - - - Malleability: Ability to ensure that signatures are fully canonical. - -- **PathFinding** - - - Less time needed to get the first path result! - -- **Database** - - - Eliminated "meltdowns" caused when fetching historical ledger data. - -**Significant Changes** - -- Cleaned up logic which controls when ledgers are fetched and under what conditions. -- Cleaned up file path calculation for database files. -- Changed dispatcher for WebSocket requests. -- Cleaned up multithreading mechanisms. -- Fixed custom currency code parsing. -- Optimized transaction node lookup circumstances in the node store. - - ------------------------------------------------------------ - -## Version 0.21.0 - -rippled version 0.21.0 has been released. This release is currently the tip of the **develop/** branch and can be found on GitHub at [1](https://github.com/ripple/rippled/tree/develop). The tag is **0.21.0-rc2** and can be found on GitHub at [2](https://github.com/ripple/rippled/tree/0.21.0-rc2). - -**This is a critical release. All partners should update immediately.** - -Prior to building, please confirm you have the correct source tree with the **git log** command. The first log entry should be the change setting the version: - - commit f295bb20a16d1d2999f606c1297c8930d8e33c40 - Author: JoelKatz - Date: Fri Jan 24 11:17:16 2014 -0800 - - Set version to 0.21.0.rc2 - -**Major Partner Issues Fixed** - -- Order book issues - - Ensure all crossing offers are taken - - Ensure order book is not left crossed -- Added **DeliveredAmount** field to transaction metadata - - Reports amount delivered in partial payments - -**Toolchain support** - -As with the previous release, the minimum supported version of GCC used to compile rippled is v4.8. - -**Significant Changes** - -- Pairwise no-ripple - - Permits trust lines to be protected from rippling - - Operates on protected pairs -- Performance improvements - - Improve I/O latency - - Improve fetching ledgers - - Improve pathfinding -- Features for robust transaction submission - - LastLedgerSeq for transaction expiration - - AccountTxnID for transaction chaining -- Fix some cases where an invalid transaction would stay in limbo -- Code cleanups -- Better reporting of invalid parameters - -**Release Candidates** - -RC1 fixed performance problems with order book retrieval. - -RC2 fixed a bug that caused crashes in order processing and a bug in parsing order book requests. - -**Notice** - -If you are upgrading from version 0.12 or earlier of rippled, these next sections apply to you because the format of the *rippled.cfg* file changed around that time. If you have upgraded since that time and you have applied the configuration file fixes, you can safely ignore them. - -**Validators** - -Ripple Labs is now running five validators. You can use this template for your *validators.txt* file (or place this in your config file): - - [validators] - n9KPnVLn7ewVzHvn218DcEYsnWLzKerTDwhpofhk4Ym1RUq4TeGw RIP1 - n9LFzWuhKNvXStHAuemfRKFVECLApowncMAM5chSCL9R5ECHGN4V RIP2 - n94rSdgTyBNGvYg8pZXGuNt59Y5bGAZGxbxyvjDaqD9ceRAgD85P RIP3 - n9LeQeDcLDMZKjx1TZtrXoLBLo5q1bR1sUQrWG7tEADFU6R27UBp RIP4 - n9KF6RpvktjNs2MDBkmxpJbup4BKrKeMKDXPhaXkq7cKTwLmWkFr RIP5 - -You should also raise your quorum to at least three by putting the following in your *rippled.cfg* file: - - [validation_quorum] - 3 - -If you are a validator, you should set your quorum to at least four. - -**IPs** - -A list of Ripple Labs server IP addresses can be found by resolving **r.ripple.com**. You can also add this to your *rippled.cfg* file to ensure you always have several peer connections to Ripple Labs servers: - - [ips] - 184.73.226.101 51235 - 23.23.201.55 51235 - 54.200.43.173 51235 - 184.73.57.84 51235 - 54.234.249.55 51235 - 54.200.86.110 51235 - -**RocksDB back end** - -RocksDB is based on LevelDB with improvements from Facebook and the community. Preliminary tests show that it stalls less often than HyperLevelDB for our use cases. - -If you are switching over from an existing back end, you have two options. You can remove your old database and let rippled recreate it as it re-syncs, or you can import your old database into the new one. - -To remove your old database, make sure the server is shut down (\`rippled stop\`). Remove the *db/ledger.db* and *db/transaction.db* files. Remove all the files in your back end store directory (*db/hashnode* by default). Then change your configuration file to use the RocksDB back end and restart. - -To import your old database, start by shutting the server down. Then modify the configuration file by renaming your *\[node\_db\]* stanza to *\[import\_db\]*. Create a new *\[node\_db\]* stanza and specify a RocksDB back end with a different directory. Start the server with the command **rippled --import**. When the import finishes gracefully stop the server (\`rippled stop\`). Please wait for rippled to stop on its own because it can take several minutes for it to shut down after an import. Remove the old database, put the new database into place, remove the *\[import\_db\]* section, change the *\[node\_db\]* section to refer to the final location, and restart the server. - -The recommended RocksDB configuration is: - - [node_db] - type=RocksDB - path=db/hashnode - open_files=1200 - filter_bits=12 - cache_mb=128 - file_size_mb=8 - file_size_mult=2 - -**Configuring your Node DB** - -You need to configure the [NodeBackEnd](https://wiki.ripple.com/NodeBackEnd) that you want the server to use. See above for an example RocksDB configuration. - -- **Note**: HyperLevelDB and RocksDB are not available on Windows platform. - - ------------------------------------------------------------ - -## Version 0.20.1 - -rippled version 0.20.1 has been released. This release is currently the tip of the [develop](https://github.com/ripple/rippled/tree/develop) branch and the tag is [0.20.1](https://github.com/ripple/rippled/tree/0.20.1). - -**This is a critical release. All partners should update immediately.** - -Prior to building, please confirm you have the correct source tree with the **git log** command. The first log entry should be the change setting the version: - - commit 95a573b755219d7e1e078d53b8e11a8f0d7cade1 - Author: Vinnie Falco - Date: Wed Jan 8 17:08:27 2014 -0800 - - Set version to 0.20.1 - -**Major Partner Issues Fixed** - -- rippled will crash randomly. - - Entries in the three parts of the order book are missing or do not match. In such a case, rippled will crash. -- Server loses sync randomly. - - This is due to rippled restarting after it crashes. That the server restarted is not obvious and appears to be something else. -- Server goes 'offline' randomly. - - This is due to rippled restarting after it crashes. That the server restarted is not obvious and appears to be something else. -- **complete\_ledgers** part of **server\_info** output says "None". - - This is due to rippled restarting and reconstructing the ledger after it crashes. - - If the node back end is corrupted or has been moved without being renamed in rippled.cfg, this can cause rippled to crash and restart. - -**Toolchain support** - -Starting with this release, the minimum supported version of GCC used to compile rippled is v4.8. - -**Significant Changes** - -- Don't log StatsD messages to the console by default. -- Fixed missing jtACCEPT job limit. -- Removed dead code to clean up the codebase. -- Reset liquidity before retrying rippleCalc. -- Made improvements becuase items in SHAMaps are immutable. -- Multiple pathfinding bugfixes: - - Make each path request track whether it needs updating. - - Improve new request handling, reverse order for processing requests. - - Break to handle new requests immediately. - - Make mPathFindThread an integer rather than a bool. Allow two threads. - - Suspend processing requests if server is backed up. - - Multiple performance improvements and enhancements. - - Fixed locking. -- Refactored codebase to make it C++11 compliant. -- Multiple fixes to ledger acquisition, cleanup, and logging. -- Made multiple improvements to WebSockets server. -- Added Debian-style initscript (doc/rippled.init). -- Updated default config file (doc/rippled-example.cfg) to reflect best practices. -- Made changes to SHAMapTreeNode and visitLeavesInternal to conserve memory. -- Implemented new fee schedule: - - Transaction fee: 10 drops - - Base reserve: 20 XRP - - Incremental reserve: 5 XRP -- Fixed bug \#211 (getTxsAccountB in NetworkOPs). -- Fixed a store/fetch race condition in ther node back end. -- Fixed multiple comparison operations. -- Removed Sophia and Lightning databases. - -**Notice** - -If you are upgrading from version 0.12 or earlier of rippled, these next sections apply to you because the format of the *rippled.cfg* file changed around that time. If you have upgraded since that time and you have applied the configuration file fixes, you can safely ignore them. - -**Validators** - -Ripple Labs is now running five validators. You can use this template for your *validators.txt* file (or place this in your config file): - - [validators] - n9KPnVLn7ewVzHvn218DcEYsnWLzKerTDwhpofhk4Ym1RUq4TeGw RIP1 - n9LFzWuhKNvXStHAuemfRKFVECLApowncMAM5chSCL9R5ECHGN4V RIP2 - n94rSdgTyBNGvYg8pZXGuNt59Y5bGAZGxbxyvjDaqD9ceRAgD85P RIP3 - n9LeQeDcLDMZKjx1TZtrXoLBLo5q1bR1sUQrWG7tEADFU6R27UBp RIP4 - n9KF6RpvktjNs2MDBkmxpJbup4BKrKeMKDXPhaXkq7cKTwLmWkFr RIP5 - -You should also raise your quorum to at least three by putting the following in your *rippled.cfg* file: - - [validation_quorum] - 3 - -If you are a validator, you should set your quorum to at least four. - -**IPs** - -A list of Ripple Labs server IP addresses can be found by resolving **r.ripple.com**. You can also add this to your *rippled.cfg* file to ensure you always have several peer connections to Ripple Labs servers: - - [ips] - 54.225.112.220 51235 - 54.225.123.13 51235 - 54.227.239.106 51235 - 107.21.251.218 51235 - 184.73.226.101 51235 - 23.23.201.55 51235 - -**New RocksDB back end** - -RocksDB is based on LevelDB with improvements from Facebook and the community. Preliminary tests show that it stalls less often than HyperLevelDB for our use cases. - -If you are switching over from an existing back end, you have two options. You can remove your old database and let rippled recreate it as it re-syncs, or you can import your old database into the new one. - -To remove your old database, make sure the server is shut down (`rippled stop`). Remove the *db/ledger.db* and *db/transaction.db* files. Remove all the files in your back end store directory (*db/hashnode* by default). Then change your configuration file to use the RocksDB back end and restart. - -To import your old database, start by shutting the server down. Then modify the configuration file by renaming your *\[node\_db\]* stanza to *\[import\_db\]*. Create a new *\[node\_db\]* stanza and specify a RocksDB back end with a different directory. Start the server with the command **rippled --import**. When the import finishes gracefully stop the server (`rippled stop`). Please wait for rippled to stop on its own because it can take several minutes for it to shut down after an import. Remove the old database, put the new database into place, remove the *\[import\_db\]* section, change the *\[node\_db\]* section to refer to the final location, and restart the server. - -The recommended RocksDB configuration is: - - [node_db] - type=RocksDB - path=db/hashnode - open_files=1200 - filter_bits=12 - cache_mb=256 - file_size_mb=8 - file_size_mult=2 - -**Configuring your Node DB** - -You need to configure the [NodeBackEnd](https://wiki.ripple.com/NodeBackEnd) that you want the server to use. See above for an example RocksDB configuration. - -- **Note**: HyperLevelDB and RocksDB are not available on Windows platform. - - ------------------------------------------------------------ - -## Version 0.19 - -rippled version 0.19 has now been released. This release is currently the tip of the [release](https://github.com/ripple/rippled/tree/release) branch and the tag is [0.19.0](https://github.com/ripple/rippled/tree/0.19.0). - -Prior to building, please confirm you have the correct source tree with the `git log` command. The first log entry should be the change setting the version: - - commit 26783607157a8b96e6e754f71565f4eb0134efc1 - Author: Vinnie Falco - Date: Fri Nov 22 23:36:50 2013 -0800 - - Set version to 0.19.0 - -**Significant Changes** - -- Bugfixes and improvements in path finding, path filtering, and payment execution. -- Updates to HyperLevelDB and LevelDB node storage back ends. -- Addition of RocksDB node storage back end. -- New resource manager for tracking server load. -- Fixes for a few bugs that can crashes or inability to serve client requests. - -**Validators** - -Ripple Labs is now running five validators. You can use this template for your `validators.txt` file (or place this in your config file): - - [validators] - n9KPnVLn7ewVzHvn218DcEYsnWLzKerTDwhpofhk4Ym1RUq4TeGw RIP1 - n9LFzWuhKNvXStHAuemfRKFVECLApowncMAM5chSCL9R5ECHGN4V RIP2 - n94rSdgTyBNGvYg8pZXGuNt59Y5bGAZGxbxyvjDaqD9ceRAgD85P RIP3 - n9LeQeDcLDMZKjx1TZtrXoLBLo5q1bR1sUQrWG7tEADFU6R27UBp RIP4 - n9KF6RpvktjNs2MDBkmxpJbup4BKrKeMKDXPhaXkq7cKTwLmWkFr RIP5 - -You should also raise your quorum to at least three by putting the following in your `rippled.cfg` file: - - [validation_quorum] - 3 - -If you are a validator, you should set your quorum to at least four. - -**IPs** - -A list of Ripple Labs server IP addresses can be found by resolving `r.ripple.com`. You can also add this to your `rippled.cfg` file to ensure you always have several peer connections to Ripple Labs servers: - - [ips] - 54.225.112.220 51235 - 54.225.123.13 51235 - 54.227.239.106 51235 - 107.21.251.218 51235 - 184.73.226.101 51235 - 23.23.201.55 51235 - -**New RocksDB back end** - -RocksDB is based on LevelDB with improvements from Facebook and the community. Preliminary tests show that it stall less often than HyperLevelDB. - -If you are switching over from an existing back end, you have two choices. You can remove your old database or you can import it. - -To remove your old database, make sure the server is shutdown. Remove the `db/ledger.db` and `db/transaction.db` files. Remove all the files in your back end store directory, `db/hashnode` by default. Then you can change your configuration file to use the RocksDB back end and restart. - -To import your old database, start by shutting the server down. Then modify the configuration file by renaming your `[node_db]` portion to `[import_db]`. Create a new `[node_db]` section specify a RocksDB back end and a different directory. Start the server with `rippled --import`. When the import finishes, stop the server (it can take several minutes to shut down after an import), remove the old database, put the new database into place, remove the `[import_db]` section, change the `[node_db]` section to refer to the final location, and restart the server. - -The recommended RocksDB configuration is: - - [node_db] - type=RocksDB - path=db/hashnode - open_files=1200 - filter_bits=12 - cache_mb=256 - file_size_mb=8 - file_size_mult=2 - -**Configuring your Node DB** - -You need to configure the [NodeBackEnd](https://wiki.ripple.com/NodeBackEnd) that you want the server to use. See above for an example RocksDB configuration. - -- **Note:** HyperLevelDB and RocksDB are not available on Windows platform. - - ------------------------------------------------------------ - -## Version 0.16 - -rippled version 0.16 has now been released. This release is currently the tip of the [master](https://github.com/ripple/rippled/tree/master) branch and the tag is [v0.16.0](https://github.com/ripple/rippled/tree/v0.16.0). - -Prior to building, please confirm you have the correct source tree with the `git log` command. The first log entry should be the change setting the version: - - commit 15ef43505473225af21bb7b575fb0b628d5e7f73 - Author: vinniefalco - Date: Wed Oct 2 2013 - - Set version to 0.16.0 - -**Significant Changes** - -- Improved peer discovery -- Improved pathfinding -- Ledger speed improvements -- Reduced memory consumption -- Improved server stability -- rippled no longer throws and exception on exiting -- Better error reporting -- Ripple-lib tests have been ported to use the Mocha testing framework - -**Validators** - -Ripple Labs is now running five validators. You can use this template for your `validators.txt` file: - - [validators] - n9KPnVLn7ewVzHvn218DcEYsnWLzKerTDwhpofhk4Ym1RUq4TeGw RIP1 - n9LFzWuhKNvXStHAuemfRKFVECLApowncMAM5chSCL9R5ECHGN4V RIP2 - n94rSdgTyBNGvYg8pZXGuNt59Y5bGAZGxbxyvjDaqD9ceRAgD85P RIP3 - n9LeQeDcLDMZKjx1TZtrXoLBLo5q1bR1sUQrWG7tEADFU6R27UBp RIP4 - n9KF6RpvktjNs2MDBkmxpJbup4BKrKeMKDXPhaXkq7cKTwLmWkFr RIP5 - -You should also raise your quorum to at least three by putting the following in your `rippled.cfg` file: - - [validation_quorum] - 3 - -If you are a validator, you should set your quorum to at least four. - -**IPs** - -A list of Ripple Labs server IP addresses can be found by resolving `r.ripple.com`. You can also add this to your `rippled.cfg` file to ensure you always have several peer connections to Ripple Labs servers: - - [ips] - 54.225.112.220 51235 - 54.225.123.13 51235 - 54.227.239.106 51235 - 107.21.251.218 51235 - 184.73.226.101 51235 - 23.23.201.55 51235 - -**Node DB** - -You need to configure the [NodeBackEnd](https://wiki.ripple.com/NodeBackEnd) that you want the server to use. In most cases, that will mean adding this to your configuration file: - - [node_db] - type=HyperLevelDB - path=db/hashnode - -- NOTE HyperLevelDB is not available on Windows platforms. - -**Release Candidates** - -**Issues** - -None known - - ------------------------------------------------------------ - -## Version 0.14 - -rippled version 0.14 has now been released. This release is currently the tip of the [master](https://github.com/ripple/rippled/tree/master) branch and the tag is [v0.12.0](https://github.com/ripple/rippled/tree/v0.14.0). - -Prior to building, please confirm you have the correct source tree with the `git log` command. The first log entry should be the change setting the version: - - commit b6d11c08d0245ee9bafbb97143f5d685dd2979fc - Author: vinniefalco - Date: Wed Oct 2 2013 - - Set version to 0.14.0 - -**Significant Changes** - -- Improved peer discovery -- Improved pathfinding -- Ledger speed improvements -- Reduced memory consumption -- Improved server stability -- rippled no longer throws and exception on exiting -- Better error reporting -- Ripple-lib tests have been ported to use the Mocha testing framework - -**Validators** - -Ripple Labs is now running five validators. You can use this template for your `validators.txt` file: - - [validators] - n9KPnVLn7ewVzHvn218DcEYsnWLzKerTDwhpofhk4Ym1RUq4TeGw RIP1 - n9LFzWuhKNvXStHAuemfRKFVECLApowncMAM5chSCL9R5ECHGN4V RIP2 - n94rSdgTyBNGvYg8pZXGuNt59Y5bGAZGxbxyvjDaqD9ceRAgD85P RIP3 - n9LeQeDcLDMZKjx1TZtrXoLBLo5q1bR1sUQrWG7tEADFU6R27UBp RIP4 - n9KF6RpvktjNs2MDBkmxpJbup4BKrKeMKDXPhaXkq7cKTwLmWkFr RIP5 - -You should also raise your quorum to at least three by putting the following in your `rippled.cfg` file: - - [validation_quorum] - 3 - -If you are a validator, you should set your quorum to at least four. - -**IPs** - -A list of Ripple Labs server IP addresses can be found by resolving `r.ripple.com`. You can also add this to your `rippled.cfg` file to ensure you always have several peer connections to Ripple Labs servers: - - [ips] - 54.225.112.220 51235 - 54.225.123.13 51235 - 54.227.239.106 51235 - 107.21.251.218 51235 - 184.73.226.101 51235 - 23.23.201.55 51235 - -**Node DB** - -You need to configure the [NodeBackEnd](https://wiki.ripple.com/NodeBackEnd) that you want the server to use. In most cases, that will mean adding this to your configuration file: - - [node_db] - type=HyperLevelDB - path=db/hashnode - -- NOTE HyperLevelDB is not available on Windows platforms. - -**Release Candidates** - -**Issues** - -None known - - ------------------------------------------------------------ - -## Version 0.12 - -rippled version 0.12 has now been released. This release is currently the tip of the [master branch](https://github.com/ripple/rippled/tree/master) and can be found on GitHub. The tag is [v0.12.0](https://github.com/ripple/rippled/tree/v0.12.0). - -Prior to building, please confirm you have the correct source tree with the `git log` command. The first log entry should be the change setting the version: - - commit d0a9da6f16f4083993e4b6c5728777ffebf80f3a - Author: JoelKatz - Date: Mon Aug 26 12:08:05 2013 -0700 - - Set version to v0.12.0 - -**Major Partner Issues Fixed** - -- Server Showing "Offline" - -This issue was caused by LevelDB periodically compacting its internal data structure. While compacting, rippled's processing would stall causing the node to lose sync with the rest of the network. This issue was solved by switching from LevelDB to HyperLevelDB. rippled operators will need to change their ripple.cfg file. See below for configuration details. - -- Premature Validation of Transactions - -On rare occasions, a transaction would show as locally validated before the full network consensus was confirmed. This issue was resolved by changing the way transactions are saved. - -- Missing Ledgers - -Occasionally, some rippled servers would fail to fetch all ledgers. This left gaps in the local history and caused some API calls to report incomplete results. The ledger fetch code was rewritten to both prevent this and to repair any existing gaps. - -**Significant Changes** - -- The way transactions are saved has been changed. This fixes a number of ways transactions can incorrectly be reported as fully-validated. -- `doTransactionEntry` now works against open ledgers. -- `doLedgerEntry` now supports a binary option. -- A bug in `getBookPage` that caused it to skip offers is fixed. -- `getNodeFat` now returns deeper chains, reducing ledger acquire latency. -- Catching up if the (published ledger stream falls behind the network) is now more aggressive. -- I/O stalls are drastically reduced by using the HyperLevelDB node back end. -- Persistent ledger gaps should no longer occur. -- Clusters now exchange load information. - -**Validators** - -Ripple Labs is now running five validators. You can use this template for your `validators.txt` file: - - - - [validators] - n9KPnVLn7ewVzHvn218DcEYsnWLzKerTDwhpofhk4Ym1RUq4TeGw RIP1 - n9LFzWuhKNvXStHAuemfRKFVECLApowncMAM5chSCL9R5ECHGN4V RIP2 - n94rSdgTyBNGvYg8pZXGuNt59Y5bGAZGxbxyvjDaqD9ceRAgD85P RIP3 - n9LeQeDcLDMZKjx1TZtrXoLBLo5q1bR1sUQrWG7tEADFU6R27UBp RIP4 - n9KF6RpvktjNs2MDBkmxpJbup4BKrKeMKDXPhaXkq7cKTwLmWkFr RIP5 - - - -**Update April 2014** - Due to a vulnerability in OpenSSL the validator keys above have been cycled out, the five validators by RippleLabs use the following keys now: - - [validators] - n949f75evCHwgyP4fPVgaHqNHxUVN15PsJEZ3B3HnXPcPjcZAoy7 RL1 - n9MD5h24qrQqiyBC8aeqqCWvpiBiYQ3jxSr91uiDvmrkyHRdYLUj RL2 - n9L81uNCaPgtUJfaHh89gmdvXKAmSt5Gdsw2g1iPWaPkAHW5Nm4C RL3 - n9KiYM9CgngLvtRCQHZwgC2gjpdaZcCcbt3VboxiNFcKuwFVujzS RL4 - n9LdgEtkmGB9E2h3K4Vp7iGUaKuq23Zr32ehxiU8FWY7xoxbWTSA RL5 - -You should also raise your quorum to at least three by putting the following in your `rippled.cfg` file: - - [validation_quorum] - 3 - -If you are a validator, you should set your quorum to at least four. - -**IPs** - -A list of Ripple Labs server IP addresses can be found by resolving `r.ripple.com`. You can also add this to your `rippled.cfg` file to ensure you always have several peer connections to Ripple Labs servers: - - [ips] - 54.225.112.220 51235 - 54.225.123.13 51235 - 54.227.239.106 51235 - 107.21.251.218 51235 - 184.73.226.101 51235 - 23.23.201.55 51235 - -**Node DB** - -You need to configure the [NodeBackEnd](https://wiki.ripple.com/NodeBackEnd) that you want the server to use. In most cases, that will mean adding this to your configuration file: - - [node_db] - type=HyperLevelDB - path=db/hashnode - -- NOTE HyperLevelDB is not available on Windows platforms. - -**Release Candidates** - -RC1 was the first release candidate. - -RC2 fixed a bug that could cause ledger acquires to stall. - -RC3 fixed compilation under OSX. - -RC4 includes performance improvements in countAccountTx and numerous small fixes to ledger acquisition. - -RC5 changed the peer low water mark from 4 to 10 to acquire more server connections. - -RC6 fixed some possible load issues with the network state timer and cluster reporting timers. - -**Issues** - -Fetching of historical ledgers is slower in this build than in previous builds. This is being investigated. From e9d46f0bfc172f7fcda02d0585f043844c892729 Mon Sep 17 00:00:00 2001 From: Jingchen Date: Tue, 24 Jun 2025 19:56:58 +0100 Subject: [PATCH 063/244] Remove OwnerPaysFee as it's never fully supported (#5435) The OwnerPaysFee amendment was never fully supported, and this change removes the feature to the extent possible. --- include/xrpl/protocol/Feature.h | 5 + include/xrpl/protocol/detail/features.macro | 3 +- src/libxrpl/protocol/Feature.cpp | 4 +- src/test/app/AMMExtended_test.cpp | 273 +------------------- src/test/app/Flow_test.cpp | 167 +----------- src/test/app/Offer_test.cpp | 4 +- src/test/app/TheoreticalQuality_test.cpp | 2 +- src/test/rpc/Feature_test.cpp | 3 +- src/xrpld/app/paths/RippleCalc.cpp | 5 +- 9 files changed, 27 insertions(+), 439 deletions(-) diff --git a/include/xrpl/protocol/Feature.h b/include/xrpl/protocol/Feature.h index a2eb7d8e50..304cac5bc6 100644 --- a/include/xrpl/protocol/Feature.h +++ b/include/xrpl/protocol/Feature.h @@ -55,6 +55,11 @@ * `VoteBehavior::DefaultYes`. The communication process is beyond * the scope of these instructions. * + * 5) A feature marked as Obsolete can mean either: + * 1) It is in the ledger (marked as Supported::yes) and it is on its way to + * become Retired + * 2) The feature is not in the ledger (has always been marked as + * Supported::no) and the code to support it has been removed * * When a feature has been enabled for several years, the conditional code * may be removed, and the feature "retired". To retire a feature: diff --git a/include/xrpl/protocol/detail/features.macro b/include/xrpl/protocol/detail/features.macro index 1be0af5d01..2442abef7f 100644 --- a/include/xrpl/protocol/detail/features.macro +++ b/include/xrpl/protocol/detail/features.macro @@ -114,7 +114,6 @@ XRPL_FEATURE(DepositAuth, Supported::yes, VoteBehavior::DefaultYe XRPL_FIX (1513, Supported::yes, VoteBehavior::DefaultYes) XRPL_FEATURE(FlowCross, Supported::yes, VoteBehavior::DefaultYes) XRPL_FEATURE(Flow, Supported::yes, VoteBehavior::DefaultYes) -XRPL_FEATURE(OwnerPaysFee, Supported::no, VoteBehavior::DefaultNo) // The following amendments are obsolete, but must remain supported // because they could potentially get enabled. @@ -131,6 +130,8 @@ XRPL_FIX (NFTokenNegOffer, Supported::yes, VoteBehavior::Obsolete) XRPL_FIX (NFTokenDirV1, Supported::yes, VoteBehavior::Obsolete) XRPL_FEATURE(NonFungibleTokensV1, Supported::yes, VoteBehavior::Obsolete) XRPL_FEATURE(CryptoConditionsSuite, Supported::yes, VoteBehavior::Obsolete) +// This sits here temporarily and will be moved to another section soon +XRPL_FEATURE(OwnerPaysFee, Supported::no, VoteBehavior::Obsolete) // The following amendments have been active for at least two years. Their // pre-amendment code has been removed and the identifiers are deprecated. diff --git a/src/libxrpl/protocol/Feature.cpp b/src/libxrpl/protocol/Feature.cpp index eeeee1c185..e6442d2663 100644 --- a/src/libxrpl/protocol/Feature.cpp +++ b/src/libxrpl/protocol/Feature.cpp @@ -254,7 +254,7 @@ FeatureCollections::registerFeature( { check(!readOnly, "Attempting to register a feature after startup."); check( - support == Supported::yes || vote == VoteBehavior::DefaultNo, + support == Supported::yes || vote != VoteBehavior::DefaultYes, "Invalid feature parameters. Must be supported to be up-voted."); Feature const* i = getByName(name); if (!i) @@ -268,7 +268,7 @@ FeatureCollections::registerFeature( features.emplace_back(name, f); auto const getAmendmentSupport = [=]() { - if (vote == VoteBehavior::Obsolete) + if (vote == VoteBehavior::Obsolete && support == Supported::yes) return AmendmentSupport::Retired; return support == Supported::yes ? AmendmentSupport::Supported : AmendmentSupport::Unsupported; diff --git a/src/test/app/AMMExtended_test.cpp b/src/test/app/AMMExtended_test.cpp index 3d959a6a09..70b2f30e1d 100644 --- a/src/test/app/AMMExtended_test.cpp +++ b/src/test/app/AMMExtended_test.cpp @@ -1183,9 +1183,7 @@ private: using namespace jtx; - // The problem was identified when featureOwnerPaysFee was enabled, - // so make sure that gets included. - Env env{*this, features | featureOwnerPaysFee}; + Env env{*this, features}; // The fee that's charged for transactions. auto const fee = env.current()->fees().base; @@ -2217,271 +2215,6 @@ private: } } - void - testTransferRate(FeatureBitset features) - { - testcase("Transfer Rate"); - - using namespace jtx; - - { - // transfer fee on AMM - Env env(*this, features); - - fund(env, gw, {alice, bob, carol}, XRP(10'000), {USD(1'000)}); - env(rate(gw, 1.25)); - env.close(); - - AMM ammBob(env, bob, XRP(100), USD(150)); - // no transfer fee on create - BEAST_EXPECT(expectLine(env, bob, USD(1000 - 150))); - - env(pay(alice, carol, USD(50)), path(~USD), sendmax(XRP(50))); - env.close(); - - BEAST_EXPECT(expectLine(env, bob, USD(1'000 - 150))); - BEAST_EXPECT( - ammBob.expectBalances(XRP(150), USD(100), ammBob.tokens())); - BEAST_EXPECT(expectLedgerEntryRoot( - env, alice, xrpMinusFee(env, 10'000 - 50))); - BEAST_EXPECT(expectLine(env, carol, USD(1'050))); - } - - { - // Transfer fee AMM and offer - Env env(*this, features); - - fund( - env, - gw, - {alice, bob, carol}, - XRP(10'000), - {USD(1'000), EUR(1'000)}); - env(rate(gw, 1.25)); - env.close(); - - AMM ammBob(env, bob, XRP(100), USD(140)); - BEAST_EXPECT(expectLine(env, bob, USD(1'000 - 140))); - - env(offer(bob, USD(50), EUR(50))); - - // alice buys 40EUR with 40XRP - env(pay(alice, carol, EUR(40)), path(~USD, ~EUR), sendmax(XRP(40))); - - // 40XRP is swapped in for 40USD - BEAST_EXPECT( - ammBob.expectBalances(XRP(140), USD(100), ammBob.tokens())); - // 40USD buys 40EUR via bob's offer. 40EUR delivered to carol - // and bob pays 25% on 40EUR, 40EUR*0.25=10EUR - BEAST_EXPECT(expectLine(env, bob, EUR(1'000 - 40 - 40 * 0.25))); - // bob gets 40USD back from the offer - BEAST_EXPECT(expectLine(env, bob, USD(1'000 - 140 + 40))); - BEAST_EXPECT(expectLedgerEntryRoot( - env, alice, xrpMinusFee(env, 10'000 - 40))); - BEAST_EXPECT(expectLine(env, carol, EUR(1'040))); - BEAST_EXPECT(expectOffers(env, bob, 1, {{USD(10), EUR(10)}})); - } - - { - // Transfer fee two consecutive AMM - Env env(*this, features); - - fund( - env, - gw, - {alice, bob, carol}, - XRP(10'000), - {USD(1'000), EUR(1'000)}); - env(rate(gw, 1.25)); - env.close(); - - AMM ammBobXRP_USD(env, bob, XRP(100), USD(140)); - BEAST_EXPECT(expectLine(env, bob, USD(1'000 - 140))); - - AMM ammBobUSD_EUR(env, bob, USD(100), EUR(140)); - BEAST_EXPECT(expectLine(env, bob, EUR(1'000 - 140))); - BEAST_EXPECT(expectLine(env, bob, USD(1'000 - 140 - 100))); - - // alice buys 40EUR with 40XRP - env(pay(alice, carol, EUR(40)), path(~USD, ~EUR), sendmax(XRP(40))); - - // 40XRP is swapped in for 40USD - BEAST_EXPECT(ammBobXRP_USD.expectBalances( - XRP(140), USD(100), ammBobXRP_USD.tokens())); - // 40USD is swapped in for 40EUR - BEAST_EXPECT(ammBobUSD_EUR.expectBalances( - USD(140), EUR(100), ammBobUSD_EUR.tokens())); - // no other charges on bob - BEAST_EXPECT(expectLine(env, bob, USD(1'000 - 140 - 100))); - BEAST_EXPECT(expectLine(env, bob, EUR(1'000 - 140))); - BEAST_EXPECT(expectLedgerEntryRoot( - env, alice, xrpMinusFee(env, 10'000 - 40))); - BEAST_EXPECT(expectLine(env, carol, EUR(1'040))); - } - - { - // Payment via AMM with limit quality, deliver less - // than requested - Env env(*this, features); - - fund( - env, - gw, - {alice, bob, carol}, - XRP(1'000), - {USD(1'200), GBP(1'200)}); - env(rate(gw, 1.25)); - env.close(); - - AMM amm(env, bob, GBP(1'000), USD(1'100)); - - // requested quality limit is 90USD/110GBP = 0.8181 - // trade quality is 77.2727USD/94.4444GBP = 0.8181 - env(pay(alice, carol, USD(90)), - path(~USD), - sendmax(GBP(110)), - txflags(tfNoRippleDirect | tfPartialPayment | tfLimitQuality)); - env.close(); - - if (!features[fixAMMv1_1]) - { - // alice buys 77.2727USD with 75.5555GBP and pays 25% tr fee - // on 75.5555GBP - // 1,200 - 75.55555*1.25 = 1200 - 94.4444 = 1105.55555GBP - BEAST_EXPECT(expectLine( - env, - alice, - STAmount{GBP, UINT64_C(1'105'555555555555), -12})); - // 75.5555GBP is swapped in for 77.7272USD - BEAST_EXPECT(amm.expectBalances( - STAmount{GBP, UINT64_C(1'075'555555555556), -12}, - STAmount{USD, UINT64_C(1'022'727272727272), -12}, - amm.tokens())); - } - else - { - // alice buys 77.2727USD with 75.5555GBP and pays 25% tr fee - // on 75.5555GBP - // 1,200 - 75.55555*1.25 = 1200 - 94.4444 = 1105.55555GBP - BEAST_EXPECT(expectLine( - env, - alice, - STAmount{GBP, UINT64_C(1'105'555555555554), -12})); - // 75.5555GBP is swapped in for 77.7272USD - BEAST_EXPECT(amm.expectBalances( - STAmount{GBP, UINT64_C(1'075'555555555557), -12}, - STAmount{USD, UINT64_C(1'022'727272727272), -12}, - amm.tokens())); - } - BEAST_EXPECT(expectLine( - env, carol, STAmount{USD, UINT64_C(1'277'272727272728), -12})); - } - - { - // AMM offer crossing - Env env(*this, features); - - fund(env, gw, {alice, bob}, XRP(1'000), {USD(1'200), EUR(1'200)}); - env(rate(gw, 1.25)); - env.close(); - - AMM amm(env, bob, USD(1'000), EUR(1'150)); - - env(offer(alice, EUR(100), USD(100))); - env.close(); - - if (!features[fixAMMv1_1]) - { - // 95.2380USD is swapped in for 100EUR - BEAST_EXPECT(amm.expectBalances( - STAmount{USD, UINT64_C(1'095'238095238095), -12}, - EUR(1'050), - amm.tokens())); - // alice pays 25% tr fee on 95.2380USD - // 1200-95.2380*1.25 = 1200 - 119.0477 = 1080.9523USD - BEAST_EXPECT(expectLine( - env, - alice, - STAmount{USD, UINT64_C(1'080'952380952381), -12}, - EUR(1'300))); - } - else - { - // 95.2380USD is swapped in for 100EUR - BEAST_EXPECT(amm.expectBalances( - STAmount{USD, UINT64_C(1'095'238095238096), -12}, - EUR(1'050), - amm.tokens())); - // alice pays 25% tr fee on 95.2380USD - // 1200-95.2380*1.25 = 1200 - 119.0477 = 1080.9523USD - BEAST_EXPECT(expectLine( - env, - alice, - STAmount{USD, UINT64_C(1'080'95238095238), -11}, - EUR(1'300))); - } - BEAST_EXPECT(expectOffers(env, alice, 0)); - } - - { - // First pass through a strand redeems, second pass issues, - // through an offer limiting step is not an endpoint - Env env(*this, features); - auto const USDA = alice["USD"]; - auto const USDB = bob["USD"]; - Account const dan("dan"); - - env.fund(XRP(10'000), bob, carol, dan, gw); - fund(env, {alice}, XRP(10'000)); - env(rate(gw, 1.25)); - env.trust(USD(2'000), alice, bob, carol, dan); - env.trust(EUR(2'000), carol, dan); - env.trust(USDA(1'000), bob); - env.trust(USDB(1'000), gw); - env(pay(gw, bob, USD(50))); - env(pay(gw, dan, EUR(1'050))); - env(pay(gw, dan, USD(1'000))); - AMM ammDan(env, dan, USD(1'000), EUR(1'050)); - - if (!features[fixAMMv1_1]) - { - // alice -> bob -> gw -> carol. $50 should have transfer fee; - // $10, no fee - env(pay(alice, carol, EUR(50)), - path(bob, gw, ~EUR), - sendmax(USDA(60)), - txflags(tfNoRippleDirect)); - BEAST_EXPECT(ammDan.expectBalances( - USD(1'050), EUR(1'000), ammDan.tokens())); - BEAST_EXPECT(expectLine(env, dan, USD(0))); - BEAST_EXPECT(expectLine(env, dan, EUR(0))); - BEAST_EXPECT(expectLine(env, bob, USD(-10))); - BEAST_EXPECT(expectLine(env, bob, USDA(60))); - BEAST_EXPECT(expectLine(env, carol, EUR(50))); - } - else - { - // alice -> bob -> gw -> carol. $50 should have transfer fee; - // $10, no fee - env(pay(alice, carol, EUR(50)), - path(bob, gw, ~EUR), - sendmax(USDA(60.1)), - txflags(tfNoRippleDirect)); - BEAST_EXPECT(ammDan.expectBalances( - STAmount{USD, UINT64_C(1'050'000000000001), -12}, - EUR(1'000), - ammDan.tokens())); - BEAST_EXPECT(expectLine(env, dan, USD(0))); - BEAST_EXPECT(expectLine(env, dan, EUR(0))); - BEAST_EXPECT(expectLine( - env, bob, STAmount{USD, INT64_C(-10'000000000001), -12})); - BEAST_EXPECT(expectLine( - env, bob, STAmount{USDA, UINT64_C(60'000000000001), -12})); - BEAST_EXPECT(expectLine(env, carol, EUR(50))); - } - } - } - void testTransferRateNoOwnerFee(FeatureBitset features) { @@ -4057,13 +3790,9 @@ private: { using namespace jtx; FeatureBitset const all{supported_amendments()}; - FeatureBitset const ownerPaysFee{featureOwnerPaysFee}; testFalseDry(all); testBookStep(all); - testBookStep(all | ownerPaysFee); - testTransferRate(all | ownerPaysFee); - testTransferRate((all - fixAMMv1_1 - fixAMMv1_3) | ownerPaysFee); testTransferRateNoOwnerFee(all); testTransferRateNoOwnerFee(all - fixAMMv1_1 - fixAMMv1_3); testLimitQuality(); diff --git a/src/test/app/Flow_test.cpp b/src/test/app/Flow_test.cpp index d0b8686db6..68485f4eee 100644 --- a/src/test/app/Flow_test.cpp +++ b/src/test/app/Flow_test.cpp @@ -599,158 +599,18 @@ struct Flow_test : public beast::unit_test::suite Account const bob("bob"); Account const carol("carol"); - { - // Simple payment through a gateway with a - // transfer rate - Env env(*this, features); + // Offer where the owner is also the issuer, sender pays fee + Env env(*this, features); - env.fund(XRP(10000), alice, bob, carol, gw); - env.close(); - env(rate(gw, 1.25)); - env.trust(USD(1000), alice, bob, carol); - env(pay(gw, alice, USD(50))); - env.require(balance(alice, USD(50))); - env(pay(alice, bob, USD(40)), sendmax(USD(50))); - env.require(balance(bob, USD(40)), balance(alice, USD(0))); - } - { - // transfer rate is not charged when issuer is src or dst - Env env(*this, features); - - env.fund(XRP(10000), alice, bob, carol, gw); - env.close(); - env(rate(gw, 1.25)); - env.trust(USD(1000), alice, bob, carol); - env(pay(gw, alice, USD(50))); - env.require(balance(alice, USD(50))); - env(pay(alice, gw, USD(40)), sendmax(USD(40))); - env.require(balance(alice, USD(10))); - } - { - // transfer fee on an offer - Env env(*this, features); - - env.fund(XRP(10000), alice, bob, carol, gw); - env.close(); - env(rate(gw, 1.25)); - env.trust(USD(1000), alice, bob, carol); - env(pay(gw, bob, USD(65))); - - env(offer(bob, XRP(50), USD(50))); - - env(pay(alice, carol, USD(50)), path(~USD), sendmax(XRP(50))); - env.require( - balance(alice, xrpMinusFee(env, 10000 - 50)), - balance(bob, USD(2.5)), // owner pays transfer fee - balance(carol, USD(50))); - } - - { - // Transfer fee two consecutive offers - Env env(*this, features); - - env.fund(XRP(10000), alice, bob, carol, gw); - env.close(); - env(rate(gw, 1.25)); - env.trust(USD(1000), alice, bob, carol); - env.trust(EUR(1000), alice, bob, carol); - env(pay(gw, bob, USD(50))); - env(pay(gw, bob, EUR(50))); - - env(offer(bob, XRP(50), USD(50))); - env(offer(bob, USD(50), EUR(50))); - - env(pay(alice, carol, EUR(40)), path(~USD, ~EUR), sendmax(XRP(40))); - env.require( - balance(alice, xrpMinusFee(env, 10000 - 40)), - balance(bob, USD(40)), - balance(bob, EUR(0)), - balance(carol, EUR(40))); - } - - { - // First pass through a strand redeems, second pass issues, no - // offers limiting step is not an endpoint - Env env(*this, features); - auto const USDA = alice["USD"]; - auto const USDB = bob["USD"]; - - env.fund(XRP(10000), alice, bob, carol, gw); - env.close(); - env(rate(gw, 1.25)); - env.trust(USD(1000), alice, bob, carol); - env.trust(USDA(1000), bob); - env.trust(USDB(1000), gw); - env(pay(gw, bob, USD(50))); - // alice -> bob -> gw -> carol. $50 should have transfer fee; $10, - // no fee - env(pay(alice, carol, USD(50)), path(bob), sendmax(USDA(60))); - env.require( - balance(bob, USD(-10)), - balance(bob, USDA(60)), - balance(carol, USD(50))); - } - { - // First pass through a strand redeems, second pass issues, through - // an offer limiting step is not an endpoint - Env env(*this, features); - auto const USDA = alice["USD"]; - auto const USDB = bob["USD"]; - Account const dan("dan"); - - env.fund(XRP(10000), alice, bob, carol, dan, gw); - env.close(); - env(rate(gw, 1.25)); - env.trust(USD(1000), alice, bob, carol, dan); - env.trust(EUR(1000), carol, dan); - env.trust(USDA(1000), bob); - env.trust(USDB(1000), gw); - env(pay(gw, bob, USD(50))); - env(pay(gw, dan, EUR(100))); - env(offer(dan, USD(100), EUR(100))); - // alice -> bob -> gw -> carol. $50 should have transfer fee; $10, - // no fee - env(pay(alice, carol, EUR(50)), - path(bob, gw, ~EUR), - sendmax(USDA(60)), - txflags(tfNoRippleDirect)); - env.require( - balance(bob, USD(-10)), - balance(bob, USDA(60)), - balance(dan, USD(50)), - balance(dan, EUR(37.5)), - balance(carol, EUR(50))); - } - - { - // Offer where the owner is also the issuer, owner pays fee - Env env(*this, features); - - env.fund(XRP(10000), alice, bob, gw); - env.close(); - env(rate(gw, 1.25)); - env.trust(USD(1000), alice, bob); - env(offer(gw, XRP(100), USD(100))); - env(pay(alice, bob, USD(100)), sendmax(XRP(100))); - env.require( - balance(alice, xrpMinusFee(env, 10000 - 100)), - balance(bob, USD(100))); - } - if (!features[featureOwnerPaysFee]) - { - // Offer where the owner is also the issuer, sender pays fee - Env env(*this, features); - - env.fund(XRP(10000), alice, bob, gw); - env.close(); - env(rate(gw, 1.25)); - env.trust(USD(1000), alice, bob); - env(offer(gw, XRP(125), USD(125))); - env(pay(alice, bob, USD(100)), sendmax(XRP(200))); - env.require( - balance(alice, xrpMinusFee(env, 10000 - 125)), - balance(bob, USD(100))); - } + env.fund(XRP(10000), alice, bob, gw); + env.close(); + env(rate(gw, 1.25)); + env.trust(USD(1000), alice, bob); + env(offer(gw, XRP(125), USD(125))); + env(pay(alice, bob, USD(100)), sendmax(XRP(200))); + env.require( + balance(alice, xrpMinusFee(env, 10000 - 125)), + balance(bob, USD(100))); } void @@ -1445,7 +1305,6 @@ struct Flow_test : public beast::unit_test::suite testWithFeats(FeatureBitset features) { using namespace jtx; - FeatureBitset const ownerPaysFee{featureOwnerPaysFee}; FeatureBitset const reducedOffersV2(fixReducedOffersV2); testLineQuality(features); @@ -1453,9 +1312,7 @@ struct Flow_test : public beast::unit_test::suite testBookStep(features - reducedOffersV2); testDirectStep(features); testBookStep(features); - testDirectStep(features | ownerPaysFee); - testBookStep(features | ownerPaysFee); - testTransferRate(features | ownerPaysFee); + testTransferRate(features); testSelfPayment1(features); testSelfPayment2(features); testSelfFundedXRPEndpoint(false, features); diff --git a/src/test/app/Offer_test.cpp b/src/test/app/Offer_test.cpp index 0891b27df8..d3481881c4 100644 --- a/src/test/app/Offer_test.cpp +++ b/src/test/app/Offer_test.cpp @@ -3643,9 +3643,7 @@ public: using namespace jtx; - // The problem was identified when featureOwnerPaysFee was enabled, - // so make sure that gets included. - Env env{*this, features | featureOwnerPaysFee}; + Env env{*this, features}; // The fee that's charged for transactions. auto const fee = env.current()->fees().base; diff --git a/src/test/app/TheoreticalQuality_test.cpp b/src/test/app/TheoreticalQuality_test.cpp index 1b3e6d9a82..dcffd810ed 100644 --- a/src/test/app/TheoreticalQuality_test.cpp +++ b/src/test/app/TheoreticalQuality_test.cpp @@ -264,7 +264,7 @@ class TheoreticalQuality_test : public beast::unit_test::suite sendMaxIssue, rcp.paths, /*defaultPaths*/ rcp.paths.empty(), - sb.rules().enabled(featureOwnerPaysFee), + false, OfferCrossing::no, ammContext, std::nullopt, diff --git a/src/test/rpc/Feature_test.cpp b/src/test/rpc/Feature_test.cpp index 40de395a71..06697f80c1 100644 --- a/src/test/rpc/Feature_test.cpp +++ b/src/test/rpc/Feature_test.cpp @@ -139,7 +139,8 @@ class Feature_test : public beast::unit_test::suite // Test a random sampling of the variables. If any of these get retired // or removed, swap out for any other feature. - BEAST_EXPECT(featureToName(featureOwnerPaysFee) == "OwnerPaysFee"); + BEAST_EXPECT( + featureToName(fixTrustLinesToSelf) == "fixTrustLinesToSelf"); BEAST_EXPECT(featureToName(featureFlow) == "Flow"); BEAST_EXPECT(featureToName(featureNegativeUNL) == "NegativeUNL"); BEAST_EXPECT(featureToName(fix1578) == "fix1578"); diff --git a/src/xrpld/app/paths/RippleCalc.cpp b/src/xrpld/app/paths/RippleCalc.cpp index 4e472e07c8..9c438bdfa9 100644 --- a/src/xrpld/app/paths/RippleCalc.cpp +++ b/src/xrpld/app/paths/RippleCalc.cpp @@ -95,9 +95,6 @@ RippleCalc::rippleCalculate( return std::nullopt; }(); - bool const ownerPaysTransferFee = - view.rules().enabled(featureOwnerPaysFee); - try { flowOut = flow( @@ -108,7 +105,7 @@ RippleCalc::rippleCalculate( spsPaths, defaultPaths, partialPayment, - ownerPaysTransferFee, + false, OfferCrossing::no, limitQuality, sendMax, From df6daf0d8f21a36327e0df1fcaaa16a011ad3a8c Mon Sep 17 00:00:00 2001 From: Jingchen Date: Thu, 26 Jun 2025 17:09:05 +0100 Subject: [PATCH 064/244] Add XRPL_ABANDON and use it to abandon OwnerPaysFee (#5510) --- include/xrpl/protocol/Feature.h | 17 +++++++++++++++++ include/xrpl/protocol/detail/features.macro | 10 ++++++++-- src/libxrpl/protocol/Feature.cpp | 17 +++++++++++++++++ 3 files changed, 42 insertions(+), 2 deletions(-) diff --git a/include/xrpl/protocol/Feature.h b/include/xrpl/protocol/Feature.h index 304cac5bc6..75add39af9 100644 --- a/include/xrpl/protocol/Feature.h +++ b/include/xrpl/protocol/Feature.h @@ -61,6 +61,13 @@ * 2) The feature is not in the ledger (has always been marked as * Supported::no) and the code to support it has been removed * + * If we want to discontinue a feature that we've never fully supported and + * the feature has never been enabled, we should remove all the related + * code, and mark the feature as "abandoned". To do this: + * + * 1) Open features.macro, move the feature to the abandoned section and + * change the macro to XRPL_ABANDON + * * When a feature has been enabled for several years, the conditional code * may be removed, and the feature "retired". To retire a feature: * @@ -93,10 +100,13 @@ namespace detail { #undef XRPL_FIX #pragma push_macro("XRPL_RETIRE") #undef XRPL_RETIRE +#pragma push_macro("XRPL_ABANDON") +#undef XRPL_ABANDON #define XRPL_FEATURE(name, supported, vote) +1 #define XRPL_FIX(name, supported, vote) +1 #define XRPL_RETIRE(name) +1 +#define XRPL_ABANDON(name) +1 // This value SHOULD be equal to the number of amendments registered in // Feature.cpp. Because it's only used to reserve storage, and determine how @@ -113,6 +123,8 @@ static constexpr std::size_t numFeatures = #pragma pop_macro("XRPL_FIX") #undef XRPL_FEATURE #pragma pop_macro("XRPL_FEATURE") +#undef XRPL_ABANDON +#pragma pop_macro("XRPL_ABANDON") /** Amendments that this server supports and the default voting behavior. Whether they are enabled depends on the Rules defined in the validated @@ -354,10 +366,13 @@ foreachFeature(FeatureBitset bs, F&& f) #undef XRPL_FIX #pragma push_macro("XRPL_RETIRE") #undef XRPL_RETIRE +#pragma push_macro("XRPL_ABANDON") +#undef XRPL_ABANDON #define XRPL_FEATURE(name, supported, vote) extern uint256 const feature##name; #define XRPL_FIX(name, supported, vote) extern uint256 const fix##name; #define XRPL_RETIRE(name) +#define XRPL_ABANDON(name) #include @@ -367,6 +382,8 @@ foreachFeature(FeatureBitset bs, F&& f) #pragma pop_macro("XRPL_FIX") #undef XRPL_FEATURE #pragma pop_macro("XRPL_FEATURE") +#undef XRPL_ABANDON +#pragma pop_macro("XRPL_ABANDON") } // namespace ripple diff --git a/include/xrpl/protocol/detail/features.macro b/include/xrpl/protocol/detail/features.macro index 2442abef7f..3584d8f8cf 100644 --- a/include/xrpl/protocol/detail/features.macro +++ b/include/xrpl/protocol/detail/features.macro @@ -26,6 +26,9 @@ #if !defined(XRPL_RETIRE) #error "undefined macro: XRPL_RETIRE" #endif +#if !defined(XRPL_ABANDON) +#error "undefined macro: XRPL_ABANDON" +#endif // Add new amendments to the top of this list. // Keep it sorted in reverse chronological order. @@ -130,8 +133,11 @@ XRPL_FIX (NFTokenNegOffer, Supported::yes, VoteBehavior::Obsolete) XRPL_FIX (NFTokenDirV1, Supported::yes, VoteBehavior::Obsolete) XRPL_FEATURE(NonFungibleTokensV1, Supported::yes, VoteBehavior::Obsolete) XRPL_FEATURE(CryptoConditionsSuite, Supported::yes, VoteBehavior::Obsolete) -// This sits here temporarily and will be moved to another section soon -XRPL_FEATURE(OwnerPaysFee, Supported::no, VoteBehavior::Obsolete) + +// The following amendments were never supported, never enabled, and +// we've abanded them. These features should never be in the ledger, +// and we've removed all the related code. +XRPL_ABANDON(OwnerPaysFee) // The following amendments have been active for at least two years. Their // pre-amendment code has been removed and the identifiers are deprecated. diff --git a/src/libxrpl/protocol/Feature.cpp b/src/libxrpl/protocol/Feature.cpp index e6442d2663..478b155387 100644 --- a/src/libxrpl/protocol/Feature.cpp +++ b/src/libxrpl/protocol/Feature.cpp @@ -398,6 +398,14 @@ retireFeature(std::string const& name) return registerFeature(name, Supported::yes, VoteBehavior::Obsolete); } +// Abandoned features are not in the ledger and have no code controlled by the +// feature. They were never supported, and cannot be voted on. +uint256 +abandonFeature(std::string const& name) +{ + return registerFeature(name, Supported::no, VoteBehavior::Obsolete); +} + /** Tell FeatureCollections when registration is complete. */ bool registrationIsDone() @@ -432,6 +440,8 @@ featureToName(uint256 const& f) #undef XRPL_FIX #pragma push_macro("XRPL_RETIRE") #undef XRPL_RETIRE +#pragma push_macro("XRPL_ABANDON") +#undef XRPL_ABANDON #define XRPL_FEATURE(name, supported, vote) \ uint256 const feature##name = registerFeature(#name, supported, vote); @@ -443,6 +453,11 @@ featureToName(uint256 const& f) [[deprecated("The referenced amendment has been retired")]] \ [[maybe_unused]] \ uint256 const retired##name = retireFeature(#name); + +#define XRPL_ABANDON(name) \ + [[deprecated("The referenced amendment has been abandoned")]] \ + [[maybe_unused]] \ + uint256 const abandoned##name = abandonFeature(#name); // clang-format on #include @@ -453,6 +468,8 @@ featureToName(uint256 const& f) #pragma pop_macro("XRPL_FIX") #undef XRPL_FEATURE #pragma pop_macro("XRPL_FEATURE") +#undef XRPL_ABANDON +#pragma pop_macro("XRPL_ABANDON") // All of the features should now be registered, since variables in a cpp file // are initialized from top to bottom. From e18f27f5f7e186e4fb0d6c8ddce34c2db72f80b3 Mon Sep 17 00:00:00 2001 From: Vlad <129996061+vvysokikh1@users.noreply.github.com> Date: Thu, 26 Jun 2025 20:35:31 +0100 Subject: [PATCH 065/244] test: switch some unit tests to doctest (#5383) This change moves some tests from the current unit tests that are compiled into the rippled binary to using the doctest framework. --- .github/workflows/macos.yml | 5 +- .github/workflows/nix.yml | 10 +- .github/workflows/windows.yml | 5 +- Builds/levelization/results/ordering.txt | 1 + CMakeLists.txt | 10 + cmake/xrpl_add_test.cmake | 41 +++ conanfile.py | 1 + src/test/basics/RangeSet_test.cpp | 144 ---------- src/test/basics/Slice_test.cpp | 116 -------- src/test/basics/base64_test.cpp | 82 ------ src/test/basics/mulDiv_test.cpp | 62 ----- src/test/basics/scope_test.cpp | 193 ------------- src/test/basics/tagged_integer_test.cpp | 258 ------------------ src/tests/README.md | 4 + src/tests/libxrpl/CMakeLists.txt | 14 + src/tests/libxrpl/basics/RangeSet.cpp | 129 +++++++++ src/tests/libxrpl/basics/Slice.cpp | 105 +++++++ src/tests/libxrpl/basics/base64.cpp | 67 +++++ .../libxrpl/basics/contract.cpp} | 51 ++-- src/tests/libxrpl/basics/main.cpp | 2 + src/tests/libxrpl/basics/mulDiv.cpp | 64 +++++ src/tests/libxrpl/basics/scope.cpp | 174 ++++++++++++ src/tests/libxrpl/basics/tagged_integer.cpp | 247 +++++++++++++++++ .../libxrpl/crypto/csprng.cpp} | 48 +--- src/tests/libxrpl/crypto/main.cpp | 2 + 25 files changed, 908 insertions(+), 927 deletions(-) create mode 100644 cmake/xrpl_add_test.cmake delete mode 100644 src/test/basics/RangeSet_test.cpp delete mode 100644 src/test/basics/Slice_test.cpp delete mode 100644 src/test/basics/base64_test.cpp delete mode 100644 src/test/basics/mulDiv_test.cpp delete mode 100644 src/test/basics/scope_test.cpp delete mode 100644 src/test/basics/tagged_integer_test.cpp create mode 100644 src/tests/README.md create mode 100644 src/tests/libxrpl/CMakeLists.txt create mode 100644 src/tests/libxrpl/basics/RangeSet.cpp create mode 100644 src/tests/libxrpl/basics/Slice.cpp create mode 100644 src/tests/libxrpl/basics/base64.cpp rename src/{test/basics/contract_test.cpp => tests/libxrpl/basics/contract.cpp} (60%) create mode 100644 src/tests/libxrpl/basics/main.cpp create mode 100644 src/tests/libxrpl/basics/mulDiv.cpp create mode 100644 src/tests/libxrpl/basics/scope.cpp create mode 100644 src/tests/libxrpl/basics/tagged_integer.cpp rename src/{test/core/CryptoPRNG_test.cpp => tests/libxrpl/crypto/csprng.cpp} (58%) create mode 100644 src/tests/libxrpl/crypto/main.cpp diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 63d54175ea..e533c6eb41 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -96,4 +96,7 @@ jobs: run: | n=$(nproc) echo "Using $n test jobs" - ${build_dir}/rippled --unittest --unittest-jobs $n + + cd ${build_dir} + ./rippled --unittest --unittest-jobs $n + ctest -j $n --output-on-failure diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index de59e07761..0ba7b0f212 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -163,7 +163,9 @@ jobs: cmake-args: "-Dassert=TRUE -Dwerr=TRUE ${{ matrix.cmake-args }}" - name: test run: | - ${build_dir}/rippled --unittest --unittest-jobs $(nproc) + cd ${build_dir} + ./rippled --unittest --unittest-jobs $(nproc) + ctest -j $(nproc) --output-on-failure reference-fee-test: strategy: @@ -217,8 +219,9 @@ jobs: cmake-args: "-Dassert=TRUE -Dwerr=TRUE ${{ matrix.cmake-args }}" - name: test run: | - ${build_dir}/rippled --unittest --unittest-jobs $(nproc) - + cd ${build_dir} + ./rippled --unittest --unittest-jobs $(nproc) + ctest -j $(nproc) --output-on-failure coverage: strategy: fail-fast: false @@ -441,3 +444,4 @@ jobs: run: | cd ${BUILD_DIR} ./rippled -u --unittest-jobs $(( $(nproc)/4 )) + ctest -j $(nproc) --output-on-failure diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 1d90c2ef58..7c83e7f300 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -95,5 +95,6 @@ jobs: shell: bash if: ${{ matrix.configuration.tests }} run: | - ${build_dir}/${{ matrix.configuration.type }}/rippled --unittest \ - --unittest-jobs $(nproc) + cd ${build_dir}/${{ matrix.configuration.type }} + ./rippled --unittest --unittest-jobs $(nproc) + ctest -j $(nproc) --output-on-failure diff --git a/Builds/levelization/results/ordering.txt b/Builds/levelization/results/ordering.txt index eca7fc6dc2..ce22d8edb0 100644 --- a/Builds/levelization/results/ordering.txt +++ b/Builds/levelization/results/ordering.txt @@ -132,6 +132,7 @@ test.shamap > xrpl.protocol test.toplevel > test.csf test.toplevel > xrpl.json test.unit_test > xrpl.basics +tests.libxrpl > xrpl.basics xrpl.json > xrpl.basics xrpl.protocol > xrpl.basics xrpl.protocol > xrpl.json diff --git a/CMakeLists.txt b/CMakeLists.txt index a9f063db57..c71fb68599 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -90,6 +90,11 @@ set_target_properties(OpenSSL::SSL PROPERTIES INTERFACE_COMPILE_DEFINITIONS OPENSSL_NO_SSL2 ) set(SECP256K1_INSTALL TRUE) +set(SECP256K1_BUILD_BENCHMARK FALSE) +set(SECP256K1_BUILD_TESTS FALSE) +set(SECP256K1_BUILD_EXHAUSTIVE_TESTS FALSE) +set(SECP256K1_BUILD_CTIME_TESTS FALSE) +set(SECP256K1_BUILD_EXAMPLES FALSE) add_subdirectory(external/secp256k1) add_library(secp256k1::secp256k1 ALIAS secp256k1) add_subdirectory(external/ed25519-donna) @@ -144,3 +149,8 @@ set(PROJECT_EXPORT_SET RippleExports) include(RippledCore) include(RippledInstall) include(RippledValidatorKeys) + +if(tests) + include(CTest) + add_subdirectory(src/tests/libxrpl) +endif() diff --git a/cmake/xrpl_add_test.cmake b/cmake/xrpl_add_test.cmake new file mode 100644 index 0000000000..d61f4ece3d --- /dev/null +++ b/cmake/xrpl_add_test.cmake @@ -0,0 +1,41 @@ +include(isolate_headers) + +function(xrpl_add_test name) + set(target ${PROJECT_NAME}.test.${name}) + + file(GLOB_RECURSE sources CONFIGURE_DEPENDS + "${CMAKE_CURRENT_SOURCE_DIR}/${name}/*.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/${name}.cpp" + ) + add_executable(${target} EXCLUDE_FROM_ALL ${ARGN} ${sources}) + + isolate_headers( + ${target} + "${CMAKE_SOURCE_DIR}" + "${CMAKE_SOURCE_DIR}/tests/${name}" + PRIVATE + ) + + # Make sure the test isn't optimized away in unity builds + set_target_properties(${target} PROPERTIES + UNITY_BUILD_MODE GROUP + UNITY_BUILD_BATCH_SIZE 0) # Adjust as needed + + add_test(NAME ${target} COMMAND ${target}) + set_tests_properties( + ${target} PROPERTIES + FIXTURES_REQUIRED ${target}_fixture + ) + + add_test( + NAME ${target}.build + COMMAND + ${CMAKE_COMMAND} + --build ${CMAKE_BINARY_DIR} + --config $ + --target ${target} + ) + set_tests_properties(${target}.build PROPERTIES + FIXTURES_SETUP ${target}_fixture + ) +endfunction() diff --git a/conanfile.py b/conanfile.py index da8a09611d..1a9e88fc0d 100644 --- a/conanfile.py +++ b/conanfile.py @@ -24,6 +24,7 @@ class Xrpl(ConanFile): } requires = [ + 'doctest/2.4.11', 'grpc/1.50.1', 'libarchive/3.7.6', 'nudb/2.0.8', diff --git a/src/test/basics/RangeSet_test.cpp b/src/test/basics/RangeSet_test.cpp deleted file mode 100644 index e0136ab890..0000000000 --- a/src/test/basics/RangeSet_test.cpp +++ /dev/null @@ -1,144 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2013 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include - -namespace ripple { -class RangeSet_test : public beast::unit_test::suite -{ -public: - void - testPrevMissing() - { - testcase("prevMissing"); - - // Set will include: - // [ 0, 5] - // [10,15] - // [20,25] - // etc... - - RangeSet set; - for (std::uint32_t i = 0; i < 10; ++i) - set.insert(range(10 * i, 10 * i + 5)); - - for (std::uint32_t i = 1; i < 100; ++i) - { - std::optional expected; - // no prev missing in domain for i <= 6 - if (i > 6) - { - std::uint32_t const oneBelowRange = (10 * (i / 10)) - 1; - - expected = ((i % 10) > 6) ? (i - 1) : oneBelowRange; - } - BEAST_EXPECT(prevMissing(set, i) == expected); - } - } - - void - testToString() - { - testcase("toString"); - - RangeSet set; - BEAST_EXPECT(to_string(set) == "empty"); - - set.insert(1); - BEAST_EXPECT(to_string(set) == "1"); - - set.insert(range(4u, 6u)); - BEAST_EXPECT(to_string(set) == "1,4-6"); - - set.insert(2); - BEAST_EXPECT(to_string(set) == "1-2,4-6"); - - set.erase(range(4u, 5u)); - BEAST_EXPECT(to_string(set) == "1-2,6"); - } - - void - testFromString() - { - testcase("fromString"); - - RangeSet set; - - BEAST_EXPECT(!from_string(set, "")); - BEAST_EXPECT(boost::icl::length(set) == 0); - - BEAST_EXPECT(!from_string(set, "#")); - BEAST_EXPECT(boost::icl::length(set) == 0); - - BEAST_EXPECT(!from_string(set, ",")); - BEAST_EXPECT(boost::icl::length(set) == 0); - - BEAST_EXPECT(!from_string(set, ",-")); - BEAST_EXPECT(boost::icl::length(set) == 0); - - BEAST_EXPECT(!from_string(set, "1,,2")); - BEAST_EXPECT(boost::icl::length(set) == 0); - - BEAST_EXPECT(from_string(set, "1")); - BEAST_EXPECT(boost::icl::length(set) == 1); - BEAST_EXPECT(boost::icl::first(set) == 1); - - BEAST_EXPECT(from_string(set, "1,1")); - BEAST_EXPECT(boost::icl::length(set) == 1); - BEAST_EXPECT(boost::icl::first(set) == 1); - - BEAST_EXPECT(from_string(set, "1-1")); - BEAST_EXPECT(boost::icl::length(set) == 1); - BEAST_EXPECT(boost::icl::first(set) == 1); - - BEAST_EXPECT(from_string(set, "1,4-6")); - BEAST_EXPECT(boost::icl::length(set) == 4); - BEAST_EXPECT(boost::icl::first(set) == 1); - BEAST_EXPECT(!boost::icl::contains(set, 2)); - BEAST_EXPECT(!boost::icl::contains(set, 3)); - BEAST_EXPECT(boost::icl::contains(set, 4)); - BEAST_EXPECT(boost::icl::contains(set, 5)); - BEAST_EXPECT(boost::icl::last(set) == 6); - - BEAST_EXPECT(from_string(set, "1-2,4-6")); - BEAST_EXPECT(boost::icl::length(set) == 5); - BEAST_EXPECT(boost::icl::first(set) == 1); - BEAST_EXPECT(boost::icl::contains(set, 2)); - BEAST_EXPECT(boost::icl::contains(set, 4)); - BEAST_EXPECT(boost::icl::last(set) == 6); - - BEAST_EXPECT(from_string(set, "1-2,6")); - BEAST_EXPECT(boost::icl::length(set) == 3); - BEAST_EXPECT(boost::icl::first(set) == 1); - BEAST_EXPECT(boost::icl::contains(set, 2)); - BEAST_EXPECT(boost::icl::last(set) == 6); - } - void - run() override - { - testPrevMissing(); - testToString(); - testFromString(); - } -}; - -BEAST_DEFINE_TESTSUITE(RangeSet, ripple_basics, ripple); - -} // namespace ripple diff --git a/src/test/basics/Slice_test.cpp b/src/test/basics/Slice_test.cpp deleted file mode 100644 index 3d474def79..0000000000 --- a/src/test/basics/Slice_test.cpp +++ /dev/null @@ -1,116 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github0.com/ripple/rippled - Copyright (c) 2012-2016 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include - -#include -#include - -namespace ripple { -namespace test { - -struct Slice_test : beast::unit_test::suite -{ - void - run() override - { - std::uint8_t const data[] = { - 0xa8, 0xa1, 0x38, 0x45, 0x23, 0xec, 0xe4, 0x23, 0x71, 0x6d, 0x2a, - 0x18, 0xb4, 0x70, 0xcb, 0xf5, 0xac, 0x2d, 0x89, 0x4d, 0x19, 0x9c, - 0xf0, 0x2c, 0x15, 0xd1, 0xf9, 0x9b, 0x66, 0xd2, 0x30, 0xd3}; - - { - testcase("Equality & Inequality"); - - Slice const s0{}; - - BEAST_EXPECT(s0.size() == 0); - BEAST_EXPECT(s0.data() == nullptr); - BEAST_EXPECT(s0 == s0); - - // Test slices of equal and unequal size pointing to same data: - for (std::size_t i = 0; i != sizeof(data); ++i) - { - Slice const s1{data, i}; - - BEAST_EXPECT(s1.size() == i); - BEAST_EXPECT(s1.data() != nullptr); - - if (i == 0) - BEAST_EXPECT(s1 == s0); - else - BEAST_EXPECT(s1 != s0); - - for (std::size_t j = 0; j != sizeof(data); ++j) - { - Slice const s2{data, j}; - - if (i == j) - BEAST_EXPECT(s1 == s2); - else - BEAST_EXPECT(s1 != s2); - } - } - - // Test slices of equal size but pointing to different data: - std::array a; - std::array b; - - for (std::size_t i = 0; i != sizeof(data); ++i) - a[i] = b[i] = data[i]; - - BEAST_EXPECT(makeSlice(a) == makeSlice(b)); - b[7]++; - BEAST_EXPECT(makeSlice(a) != makeSlice(b)); - a[7]++; - BEAST_EXPECT(makeSlice(a) == makeSlice(b)); - } - - { - testcase("Indexing"); - - Slice const s{data, sizeof(data)}; - - for (std::size_t i = 0; i != sizeof(data); ++i) - BEAST_EXPECT(s[i] == data[i]); - } - - { - testcase("Advancing"); - - for (std::size_t i = 0; i < sizeof(data); ++i) - { - for (std::size_t j = 0; i + j < sizeof(data); ++j) - { - Slice s(data + i, sizeof(data) - i); - s += j; - - BEAST_EXPECT(s.data() == data + i + j); - BEAST_EXPECT(s.size() == sizeof(data) - i - j); - } - } - } - } -}; - -BEAST_DEFINE_TESTSUITE(Slice, ripple_basics, ripple); - -} // namespace test -} // namespace ripple diff --git a/src/test/basics/base64_test.cpp b/src/test/basics/base64_test.cpp deleted file mode 100644 index b6d67c7c06..0000000000 --- a/src/test/basics/base64_test.cpp +++ /dev/null @@ -1,82 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012-2018 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -// -// Copyright (c) 2016-2017 Vinnie Falco (vinnie dot falco at gmail dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// -// Official repository: https://github.com/boostorg/beast -// - -#include -#include - -namespace ripple { - -class base64_test : public beast::unit_test::suite -{ -public: - void - check(std::string const& in, std::string const& out) - { - auto const encoded = base64_encode(in); - BEAST_EXPECT(encoded == out); - BEAST_EXPECT(base64_decode(encoded) == in); - } - - void - run() override - { - check("", ""); - check("f", "Zg=="); - check("fo", "Zm8="); - check("foo", "Zm9v"); - check("foob", "Zm9vYg=="); - check("fooba", "Zm9vYmE="); - check("foobar", "Zm9vYmFy"); - - check( - "Man is distinguished, not only by his reason, but by this " - "singular passion from " - "other animals, which is a lust of the mind, that by a " - "perseverance of delight " - "in the continued and indefatigable generation of knowledge, " - "exceeds the short " - "vehemence of any carnal pleasure.", - "TWFuIGlzIGRpc3Rpbmd1aXNoZWQsIG5vdCBvbmx5IGJ5IGhpcyByZWFzb24sIGJ1dC" - "BieSB0aGlz" - "IHNpbmd1bGFyIHBhc3Npb24gZnJvbSBvdGhlciBhbmltYWxzLCB3aGljaCBpcyBhIG" - "x1c3Qgb2Yg" - "dGhlIG1pbmQsIHRoYXQgYnkgYSBwZXJzZXZlcmFuY2Ugb2YgZGVsaWdodCBpbiB0aG" - "UgY29udGlu" - "dWVkIGFuZCBpbmRlZmF0aWdhYmxlIGdlbmVyYXRpb24gb2Yga25vd2xlZGdlLCBleG" - "NlZWRzIHRo" - "ZSBzaG9ydCB2ZWhlbWVuY2Ugb2YgYW55IGNhcm5hbCBwbGVhc3VyZS4="); - - std::string const notBase64 = "not_base64!!"; - std::string const truncated = "not"; - BEAST_EXPECT(base64_decode(notBase64) == base64_decode(truncated)); - } -}; - -BEAST_DEFINE_TESTSUITE(base64, ripple_basics, ripple); - -} // namespace ripple diff --git a/src/test/basics/mulDiv_test.cpp b/src/test/basics/mulDiv_test.cpp deleted file mode 100644 index 61521577d9..0000000000 --- a/src/test/basics/mulDiv_test.cpp +++ /dev/null @@ -1,62 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012-2016 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include - -namespace ripple { -namespace test { - -struct mulDiv_test : beast::unit_test::suite -{ - void - run() override - { - auto const max = std::numeric_limits::max(); - std::uint64_t const max32 = std::numeric_limits::max(); - - auto result = mulDiv(85, 20, 5); - BEAST_EXPECT(result && *result == 340); - result = mulDiv(20, 85, 5); - BEAST_EXPECT(result && *result == 340); - - result = mulDiv(0, max - 1, max - 3); - BEAST_EXPECT(result && *result == 0); - result = mulDiv(max - 1, 0, max - 3); - BEAST_EXPECT(result && *result == 0); - - result = mulDiv(max, 2, max / 2); - BEAST_EXPECT(result && *result == 4); - result = mulDiv(max, 1000, max / 1000); - BEAST_EXPECT(result && *result == 1000000); - result = mulDiv(max, 1000, max / 1001); - BEAST_EXPECT(result && *result == 1001000); - result = mulDiv(max32 + 1, max32 + 1, 5); - BEAST_EXPECT(result && *result == 3689348814741910323); - - // Overflow - result = mulDiv(max - 1, max - 2, 5); - BEAST_EXPECT(!result); - } -}; - -BEAST_DEFINE_TESTSUITE(mulDiv, ripple_basics, ripple); - -} // namespace test -} // namespace ripple diff --git a/src/test/basics/scope_test.cpp b/src/test/basics/scope_test.cpp deleted file mode 100644 index 654f7e0a11..0000000000 --- a/src/test/basics/scope_test.cpp +++ /dev/null @@ -1,193 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github0.com/ripple/rippled - Copyright (c) 2021 Ripple Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include - -namespace ripple { -namespace test { - -struct scope_test : beast::unit_test::suite -{ - void - test_scope_exit() - { - // scope_exit always executes the functor on destruction, - // unless release() is called - int i = 0; - { - scope_exit x{[&i]() { i = 1; }}; - } - BEAST_EXPECT(i == 1); - { - scope_exit x{[&i]() { i = 2; }}; - x.release(); - } - BEAST_EXPECT(i == 1); - { - scope_exit x{[&i]() { i += 2; }}; - auto x2 = std::move(x); - } - BEAST_EXPECT(i == 3); - { - scope_exit x{[&i]() { i = 4; }}; - x.release(); - auto x2 = std::move(x); - } - BEAST_EXPECT(i == 3); - { - try - { - scope_exit x{[&i]() { i = 5; }}; - throw 1; - } - catch (...) - { - } - } - BEAST_EXPECT(i == 5); - { - try - { - scope_exit x{[&i]() { i = 6; }}; - x.release(); - throw 1; - } - catch (...) - { - } - } - BEAST_EXPECT(i == 5); - } - - void - test_scope_fail() - { - // scope_fail executes the functor on destruction only - // if an exception is unwinding, unless release() is called - int i = 0; - { - scope_fail x{[&i]() { i = 1; }}; - } - BEAST_EXPECT(i == 0); - { - scope_fail x{[&i]() { i = 2; }}; - x.release(); - } - BEAST_EXPECT(i == 0); - { - scope_fail x{[&i]() { i = 3; }}; - auto x2 = std::move(x); - } - BEAST_EXPECT(i == 0); - { - scope_fail x{[&i]() { i = 4; }}; - x.release(); - auto x2 = std::move(x); - } - BEAST_EXPECT(i == 0); - { - try - { - scope_fail x{[&i]() { i = 5; }}; - throw 1; - } - catch (...) - { - } - } - BEAST_EXPECT(i == 5); - { - try - { - scope_fail x{[&i]() { i = 6; }}; - x.release(); - throw 1; - } - catch (...) - { - } - } - BEAST_EXPECT(i == 5); - } - - void - test_scope_success() - { - // scope_success executes the functor on destruction only - // if an exception is not unwinding, unless release() is called - int i = 0; - { - scope_success x{[&i]() { i = 1; }}; - } - BEAST_EXPECT(i == 1); - { - scope_success x{[&i]() { i = 2; }}; - x.release(); - } - BEAST_EXPECT(i == 1); - { - scope_success x{[&i]() { i += 2; }}; - auto x2 = std::move(x); - } - BEAST_EXPECT(i == 3); - { - scope_success x{[&i]() { i = 4; }}; - x.release(); - auto x2 = std::move(x); - } - BEAST_EXPECT(i == 3); - { - try - { - scope_success x{[&i]() { i = 5; }}; - throw 1; - } - catch (...) - { - } - } - BEAST_EXPECT(i == 3); - { - try - { - scope_success x{[&i]() { i = 6; }}; - x.release(); - throw 1; - } - catch (...) - { - } - } - BEAST_EXPECT(i == 3); - } - - void - run() override - { - test_scope_exit(); - test_scope_fail(); - test_scope_success(); - } -}; - -BEAST_DEFINE_TESTSUITE(scope, ripple_basics, ripple); - -} // namespace test -} // namespace ripple diff --git a/src/test/basics/tagged_integer_test.cpp b/src/test/basics/tagged_integer_test.cpp deleted file mode 100644 index cb15d246a6..0000000000 --- a/src/test/basics/tagged_integer_test.cpp +++ /dev/null @@ -1,258 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright 2014, Nikolaos D. Bougalis - - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include - -#include - -namespace ripple { -namespace test { - -class tagged_integer_test : public beast::unit_test::suite -{ -private: - struct Tag1 - { - }; - struct Tag2 - { - }; - - // Static checks that types are not interoperable - - using TagUInt1 = tagged_integer; - using TagUInt2 = tagged_integer; - using TagUInt3 = tagged_integer; - - // Check construction of tagged_integers - static_assert( - std::is_constructible::value, - "TagUInt1 should be constructible using a std::uint32_t"); - - static_assert( - !std::is_constructible::value, - "TagUInt1 should not be constructible using a std::uint64_t"); - - static_assert( - std::is_constructible::value, - "TagUInt3 should be constructible using a std::uint32_t"); - - static_assert( - std::is_constructible::value, - "TagUInt3 should be constructible using a std::uint64_t"); - - // Check assignment of tagged_integers - static_assert( - !std::is_assignable::value, - "TagUInt1 should not be assignable with a std::uint32_t"); - - static_assert( - !std::is_assignable::value, - "TagUInt1 should not be assignable with a std::uint64_t"); - - static_assert( - !std::is_assignable::value, - "TagUInt3 should not be assignable with a std::uint32_t"); - - static_assert( - !std::is_assignable::value, - "TagUInt3 should not be assignable with a std::uint64_t"); - - static_assert( - std::is_assignable::value, - "TagUInt1 should be assignable with a TagUInt1"); - - static_assert( - !std::is_assignable::value, - "TagUInt1 should not be assignable with a TagUInt2"); - - static_assert( - std::is_assignable::value, - "TagUInt3 should be assignable with a TagUInt1"); - - static_assert( - !std::is_assignable::value, - "TagUInt1 should not be assignable with a TagUInt3"); - - static_assert( - !std::is_assignable::value, - "TagUInt3 should not be assignable with a TagUInt1"); - - // Check convertibility of tagged_integers - static_assert( - !std::is_convertible::value, - "std::uint32_t should not be convertible to a TagUInt1"); - - static_assert( - !std::is_convertible::value, - "std::uint32_t should not be convertible to a TagUInt3"); - - static_assert( - !std::is_convertible::value, - "std::uint64_t should not be convertible to a TagUInt3"); - - static_assert( - !std::is_convertible::value, - "std::uint64_t should not be convertible to a TagUInt2"); - - static_assert( - !std::is_convertible::value, - "TagUInt1 should not be convertible to TagUInt2"); - - static_assert( - !std::is_convertible::value, - "TagUInt1 should not be convertible to TagUInt3"); - - static_assert( - !std::is_convertible::value, - "TagUInt2 should not be convertible to a TagUInt3"); - -public: - void - run() override - { - using TagInt = tagged_integer; - - { - testcase("Comparison Operators"); - - TagInt const zero(0); - TagInt const one(1); - - BEAST_EXPECT(one == one); - BEAST_EXPECT(!(one == zero)); - - BEAST_EXPECT(one != zero); - BEAST_EXPECT(!(one != one)); - - BEAST_EXPECT(zero < one); - BEAST_EXPECT(!(one < zero)); - - BEAST_EXPECT(one > zero); - BEAST_EXPECT(!(zero > one)); - - BEAST_EXPECT(one >= one); - BEAST_EXPECT(one >= zero); - BEAST_EXPECT(!(zero >= one)); - - BEAST_EXPECT(zero <= one); - BEAST_EXPECT(zero <= zero); - BEAST_EXPECT(!(one <= zero)); - } - - { - testcase("Increment/Decrement Operators"); - TagInt const zero(0); - TagInt const one(1); - TagInt a{0}; - ++a; - BEAST_EXPECT(a == one); - --a; - BEAST_EXPECT(a == zero); - a++; - BEAST_EXPECT(a == one); - a--; - BEAST_EXPECT(a == zero); - } - - { - testcase("Arithmetic Operators"); - TagInt a{-2}; - BEAST_EXPECT(+a == TagInt{-2}); - BEAST_EXPECT(-a == TagInt{2}); - BEAST_EXPECT(TagInt{-3} + TagInt{4} == TagInt{1}); - BEAST_EXPECT(TagInt{-3} - TagInt{4} == TagInt{-7}); - BEAST_EXPECT(TagInt{-3} * TagInt{4} == TagInt{-12}); - BEAST_EXPECT(TagInt{8} / TagInt{4} == TagInt{2}); - BEAST_EXPECT(TagInt{7} % TagInt{4} == TagInt{3}); - - BEAST_EXPECT(~TagInt{8} == TagInt{~TagInt::value_type{8}}); - BEAST_EXPECT((TagInt{6} & TagInt{3}) == TagInt{2}); - BEAST_EXPECT((TagInt{6} | TagInt{3}) == TagInt{7}); - BEAST_EXPECT((TagInt{6} ^ TagInt{3}) == TagInt{5}); - - BEAST_EXPECT((TagInt{4} << TagInt{2}) == TagInt{16}); - BEAST_EXPECT((TagInt{16} >> TagInt{2}) == TagInt{4}); - } - { - testcase("Assignment Operators"); - TagInt a{-2}; - TagInt b{0}; - b = a; - BEAST_EXPECT(b == TagInt{-2}); - - // -3 + 4 == 1 - a = TagInt{-3}; - a += TagInt{4}; - BEAST_EXPECT(a == TagInt{1}); - - // -3 - 4 == -7 - a = TagInt{-3}; - a -= TagInt{4}; - BEAST_EXPECT(a == TagInt{-7}); - - // -3 * 4 == -12 - a = TagInt{-3}; - a *= TagInt{4}; - BEAST_EXPECT(a == TagInt{-12}); - - // 8/4 == 2 - a = TagInt{8}; - a /= TagInt{4}; - BEAST_EXPECT(a == TagInt{2}); - - // 7 % 4 == 3 - a = TagInt{7}; - a %= TagInt{4}; - BEAST_EXPECT(a == TagInt{3}); - - // 6 & 3 == 2 - a = TagInt{6}; - a /= TagInt{3}; - BEAST_EXPECT(a == TagInt{2}); - - // 6 | 3 == 7 - a = TagInt{6}; - a |= TagInt{3}; - BEAST_EXPECT(a == TagInt{7}); - - // 6 ^ 3 == 5 - a = TagInt{6}; - a ^= TagInt{3}; - BEAST_EXPECT(a == TagInt{5}); - - // 4 << 2 == 16 - a = TagInt{4}; - a <<= TagInt{2}; - BEAST_EXPECT(a == TagInt{16}); - - // 16 >> 2 == 4 - a = TagInt{16}; - a >>= TagInt{2}; - BEAST_EXPECT(a == TagInt{4}); - } - } -}; - -BEAST_DEFINE_TESTSUITE(tagged_integer, ripple_basics, ripple); - -} // namespace test -} // namespace ripple diff --git a/src/tests/README.md b/src/tests/README.md new file mode 100644 index 0000000000..8065316580 --- /dev/null +++ b/src/tests/README.md @@ -0,0 +1,4 @@ +# Unit tests +This directory contains unit tests for the project. The difference from existing `src/test` folder +is that we switch to 3rd party testing framework (doctest). We intend to gradually move existing tests +from our own framework to doctest and such tests will be moved to this new folder. diff --git a/src/tests/libxrpl/CMakeLists.txt b/src/tests/libxrpl/CMakeLists.txt new file mode 100644 index 0000000000..68c6fa6cb3 --- /dev/null +++ b/src/tests/libxrpl/CMakeLists.txt @@ -0,0 +1,14 @@ +include(xrpl_add_test) + +# Test requirements. +find_package(doctest REQUIRED) + +# Common library dependencies for the rest of the tests. +add_library(xrpl.imports.test INTERFACE) +target_link_libraries(xrpl.imports.test INTERFACE doctest::doctest xrpl.libxrpl) + +# One test for each module. +xrpl_add_test(basics) +target_link_libraries(xrpl.test.basics PRIVATE xrpl.imports.test) +xrpl_add_test(crypto) +target_link_libraries(xrpl.test.crypto PRIVATE xrpl.imports.test) diff --git a/src/tests/libxrpl/basics/RangeSet.cpp b/src/tests/libxrpl/basics/RangeSet.cpp new file mode 100644 index 0000000000..ac0e1d9551 --- /dev/null +++ b/src/tests/libxrpl/basics/RangeSet.cpp @@ -0,0 +1,129 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2012 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include + +#include +#include + +using namespace ripple; + +TEST_SUITE_BEGIN("RangeSet"); + +TEST_CASE("prevMissing") +{ + // Set will include: + // [ 0, 5] + // [10,15] + // [20,25] + // etc... + + RangeSet set; + for (std::uint32_t i = 0; i < 10; ++i) + set.insert(range(10 * i, 10 * i + 5)); + + for (std::uint32_t i = 1; i < 100; ++i) + { + std::optional expected; + // no prev missing in domain for i <= 6 + if (i > 6) + { + std::uint32_t const oneBelowRange = (10 * (i / 10)) - 1; + + expected = ((i % 10) > 6) ? (i - 1) : oneBelowRange; + } + CHECK(prevMissing(set, i) == expected); + } +} + +TEST_CASE("toString") +{ + RangeSet set; + CHECK(to_string(set) == "empty"); + + set.insert(1); + CHECK(to_string(set) == "1"); + + set.insert(range(4u, 6u)); + CHECK(to_string(set) == "1,4-6"); + + set.insert(2); + CHECK(to_string(set) == "1-2,4-6"); + + set.erase(range(4u, 5u)); + CHECK(to_string(set) == "1-2,6"); +} + +TEST_CASE("fromString") +{ + RangeSet set; + + CHECK(!from_string(set, "")); + CHECK(boost::icl::length(set) == 0); + + CHECK(!from_string(set, "#")); + CHECK(boost::icl::length(set) == 0); + + CHECK(!from_string(set, ",")); + CHECK(boost::icl::length(set) == 0); + + CHECK(!from_string(set, ",-")); + CHECK(boost::icl::length(set) == 0); + + CHECK(!from_string(set, "1,,2")); + CHECK(boost::icl::length(set) == 0); + + CHECK(from_string(set, "1")); + CHECK(boost::icl::length(set) == 1); + CHECK(boost::icl::first(set) == 1); + + CHECK(from_string(set, "1,1")); + CHECK(boost::icl::length(set) == 1); + CHECK(boost::icl::first(set) == 1); + + CHECK(from_string(set, "1-1")); + CHECK(boost::icl::length(set) == 1); + CHECK(boost::icl::first(set) == 1); + + CHECK(from_string(set, "1,4-6")); + CHECK(boost::icl::length(set) == 4); + CHECK(boost::icl::first(set) == 1); + CHECK(!boost::icl::contains(set, 2)); + CHECK(!boost::icl::contains(set, 3)); + CHECK(boost::icl::contains(set, 4)); + CHECK(boost::icl::contains(set, 5)); + CHECK(boost::icl::last(set) == 6); + + CHECK(from_string(set, "1-2,4-6")); + CHECK(boost::icl::length(set) == 5); + CHECK(boost::icl::first(set) == 1); + CHECK(boost::icl::contains(set, 2)); + CHECK(boost::icl::contains(set, 4)); + CHECK(boost::icl::last(set) == 6); + + CHECK(from_string(set, "1-2,6")); + CHECK(boost::icl::length(set) == 3); + CHECK(boost::icl::first(set) == 1); + CHECK(boost::icl::contains(set, 2)); + CHECK(boost::icl::last(set) == 6); +} + +TEST_SUITE_END(); diff --git a/src/tests/libxrpl/basics/Slice.cpp b/src/tests/libxrpl/basics/Slice.cpp new file mode 100644 index 0000000000..eabd9b7dc7 --- /dev/null +++ b/src/tests/libxrpl/basics/Slice.cpp @@ -0,0 +1,105 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2012 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include + +#include +#include + +using namespace ripple; + +static std::uint8_t const data[] = { + 0xa8, 0xa1, 0x38, 0x45, 0x23, 0xec, 0xe4, 0x23, 0x71, 0x6d, 0x2a, + 0x18, 0xb4, 0x70, 0xcb, 0xf5, 0xac, 0x2d, 0x89, 0x4d, 0x19, 0x9c, + 0xf0, 0x2c, 0x15, 0xd1, 0xf9, 0x9b, 0x66, 0xd2, 0x30, 0xd3}; + +TEST_SUITE_BEGIN("Slice"); + +TEST_CASE("equality & inequality") +{ + Slice const s0{}; + + CHECK(s0.size() == 0); + CHECK(s0.data() == nullptr); + CHECK(s0 == s0); + + // Test slices of equal and unequal size pointing to same data: + for (std::size_t i = 0; i != sizeof(data); ++i) + { + Slice const s1{data, i}; + + CHECK(s1.size() == i); + CHECK(s1.data() != nullptr); + + if (i == 0) + CHECK(s1 == s0); + else + CHECK(s1 != s0); + + for (std::size_t j = 0; j != sizeof(data); ++j) + { + Slice const s2{data, j}; + + if (i == j) + CHECK(s1 == s2); + else + CHECK(s1 != s2); + } + } + + // Test slices of equal size but pointing to different data: + std::array a; + std::array b; + + for (std::size_t i = 0; i != sizeof(data); ++i) + a[i] = b[i] = data[i]; + + CHECK(makeSlice(a) == makeSlice(b)); + b[7]++; + CHECK(makeSlice(a) != makeSlice(b)); + a[7]++; + CHECK(makeSlice(a) == makeSlice(b)); +} + +TEST_CASE("indexing") +{ + Slice const s{data, sizeof(data)}; + + for (std::size_t i = 0; i != sizeof(data); ++i) + CHECK(s[i] == data[i]); +} + +TEST_CASE("advancing") +{ + for (std::size_t i = 0; i < sizeof(data); ++i) + { + for (std::size_t j = 0; i + j < sizeof(data); ++j) + { + Slice s(data + i, sizeof(data) - i); + s += j; + + CHECK(s.data() == data + i + j); + CHECK(s.size() == sizeof(data) - i - j); + } + } +} + +TEST_SUITE_END(); diff --git a/src/tests/libxrpl/basics/base64.cpp b/src/tests/libxrpl/basics/base64.cpp new file mode 100644 index 0000000000..fe9b86abb1 --- /dev/null +++ b/src/tests/libxrpl/basics/base64.cpp @@ -0,0 +1,67 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2012 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include + +#include + +using namespace ripple; + +static void +check(std::string const& in, std::string const& out) +{ + auto const encoded = base64_encode(in); + CHECK(encoded == out); + CHECK(base64_decode(encoded) == in); +} + +TEST_CASE("base64") +{ + check("", ""); + check("f", "Zg=="); + check("fo", "Zm8="); + check("foo", "Zm9v"); + check("foob", "Zm9vYg=="); + check("fooba", "Zm9vYmE="); + check("foobar", "Zm9vYmFy"); + + check( + "Man is distinguished, not only by his reason, but by this " + "singular passion from " + "other animals, which is a lust of the mind, that by a " + "perseverance of delight " + "in the continued and indefatigable generation of knowledge, " + "exceeds the short " + "vehemence of any carnal pleasure.", + "TWFuIGlzIGRpc3Rpbmd1aXNoZWQsIG5vdCBvbmx5IGJ5IGhpcyByZWFzb24sIGJ1dC" + "BieSB0aGlz" + "IHNpbmd1bGFyIHBhc3Npb24gZnJvbSBvdGhlciBhbmltYWxzLCB3aGljaCBpcyBhIG" + "x1c3Qgb2Yg" + "dGhlIG1pbmQsIHRoYXQgYnkgYSBwZXJzZXZlcmFuY2Ugb2YgZGVsaWdodCBpbiB0aG" + "UgY29udGlu" + "dWVkIGFuZCBpbmRlZmF0aWdhYmxlIGdlbmVyYXRpb24gb2Yga25vd2xlZGdlLCBleG" + "NlZWRzIHRo" + "ZSBzaG9ydCB2ZWhlbWVuY2Ugb2YgYW55IGNhcm5hbCBwbGVhc3VyZS4="); + + std::string const notBase64 = "not_base64!!"; + std::string const truncated = "not"; + CHECK(base64_decode(notBase64) == base64_decode(truncated)); +} diff --git a/src/test/basics/contract_test.cpp b/src/tests/libxrpl/basics/contract.cpp similarity index 60% rename from src/test/basics/contract_test.cpp rename to src/tests/libxrpl/basics/contract.cpp index 9595dbabcc..9ddf044f17 100644 --- a/src/test/basics/contract_test.cpp +++ b/src/tests/libxrpl/basics/contract.cpp @@ -1,7 +1,7 @@ //------------------------------------------------------------------------------ /* This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2013 Ripple Labs Inc. + Copyright (c) 2012 Ripple Labs Inc. Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above @@ -18,46 +18,39 @@ //============================================================================== #include -#include +#include + +#include #include -namespace ripple { +using namespace ripple; -class contract_test : public beast::unit_test::suite +TEST_CASE("contract") { -public: - void - run() override + try { + Throw("Throw test"); + } + catch (std::runtime_error const& e1) + { + CHECK(std::string(e1.what()) == "Throw test"); + try { - Throw("Throw test"); + Rethrow(); } - catch (std::runtime_error const& e1) + catch (std::runtime_error const& e2) { - BEAST_EXPECT(std::string(e1.what()) == "Throw test"); - - try - { - Rethrow(); - } - catch (std::runtime_error const& e2) - { - BEAST_EXPECT(std::string(e2.what()) == "Throw test"); - } - catch (...) - { - BEAST_EXPECT(false); - } + CHECK(std::string(e2.what()) == "Throw test"); } catch (...) { - BEAST_EXPECT(false); + CHECK(false); } } -}; - -BEAST_DEFINE_TESTSUITE(contract, basics, ripple); - -} // namespace ripple + catch (...) + { + CHECK(false); + } +} diff --git a/src/tests/libxrpl/basics/main.cpp b/src/tests/libxrpl/basics/main.cpp new file mode 100644 index 0000000000..0a3f254ea8 --- /dev/null +++ b/src/tests/libxrpl/basics/main.cpp @@ -0,0 +1,2 @@ +#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN +#include diff --git a/src/tests/libxrpl/basics/mulDiv.cpp b/src/tests/libxrpl/basics/mulDiv.cpp new file mode 100644 index 0000000000..bdbbfdc741 --- /dev/null +++ b/src/tests/libxrpl/basics/mulDiv.cpp @@ -0,0 +1,64 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2012 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include + +#include +#include + +using namespace ripple; + +TEST_CASE("mulDiv") +{ + auto const max = std::numeric_limits::max(); + std::uint64_t const max32 = std::numeric_limits::max(); + + auto result = mulDiv(85, 20, 5); + REQUIRE(result); + CHECK(*result == 340); + result = mulDiv(20, 85, 5); + REQUIRE(result); + CHECK(*result == 340); + + result = mulDiv(0, max - 1, max - 3); + REQUIRE(result); + CHECK(*result == 0); + result = mulDiv(max - 1, 0, max - 3); + REQUIRE(result); + CHECK(*result == 0); + + result = mulDiv(max, 2, max / 2); + REQUIRE(result); + CHECK(*result == 4); + result = mulDiv(max, 1000, max / 1000); + REQUIRE(result); + CHECK(*result == 1000000); + result = mulDiv(max, 1000, max / 1001); + REQUIRE(result); + CHECK(*result == 1001000); + result = mulDiv(max32 + 1, max32 + 1, 5); + REQUIRE(result); + CHECK(*result == 3689348814741910323); + + // Overflow + result = mulDiv(max - 1, max - 2, 5); + CHECK(!result); +} diff --git a/src/tests/libxrpl/basics/scope.cpp b/src/tests/libxrpl/basics/scope.cpp new file mode 100644 index 0000000000..c9cfc1e7f8 --- /dev/null +++ b/src/tests/libxrpl/basics/scope.cpp @@ -0,0 +1,174 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2021 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include + +using namespace ripple; + +TEST_CASE("scope_exit") +{ + // scope_exit always executes the functor on destruction, + // unless release() is called + int i = 0; + { + scope_exit x{[&i]() { i = 1; }}; + } + CHECK(i == 1); + { + scope_exit x{[&i]() { i = 2; }}; + x.release(); + } + CHECK(i == 1); + { + scope_exit x{[&i]() { i += 2; }}; + auto x2 = std::move(x); + } + CHECK(i == 3); + { + scope_exit x{[&i]() { i = 4; }}; + x.release(); + auto x2 = std::move(x); + } + CHECK(i == 3); + { + try + { + scope_exit x{[&i]() { i = 5; }}; + throw 1; + } + catch (...) + { + } + } + CHECK(i == 5); + { + try + { + scope_exit x{[&i]() { i = 6; }}; + x.release(); + throw 1; + } + catch (...) + { + } + } + CHECK(i == 5); +} + +TEST_CASE("scope_fail") +{ + // scope_fail executes the functor on destruction only + // if an exception is unwinding, unless release() is called + int i = 0; + { + scope_fail x{[&i]() { i = 1; }}; + } + CHECK(i == 0); + { + scope_fail x{[&i]() { i = 2; }}; + x.release(); + } + CHECK(i == 0); + { + scope_fail x{[&i]() { i = 3; }}; + auto x2 = std::move(x); + } + CHECK(i == 0); + { + scope_fail x{[&i]() { i = 4; }}; + x.release(); + auto x2 = std::move(x); + } + CHECK(i == 0); + { + try + { + scope_fail x{[&i]() { i = 5; }}; + throw 1; + } + catch (...) + { + } + } + CHECK(i == 5); + { + try + { + scope_fail x{[&i]() { i = 6; }}; + x.release(); + throw 1; + } + catch (...) + { + } + } + CHECK(i == 5); +} + +TEST_CASE("scope_success") +{ + // scope_success executes the functor on destruction only + // if an exception is not unwinding, unless release() is called + int i = 0; + { + scope_success x{[&i]() { i = 1; }}; + } + CHECK(i == 1); + { + scope_success x{[&i]() { i = 2; }}; + x.release(); + } + CHECK(i == 1); + { + scope_success x{[&i]() { i += 2; }}; + auto x2 = std::move(x); + } + CHECK(i == 3); + { + scope_success x{[&i]() { i = 4; }}; + x.release(); + auto x2 = std::move(x); + } + CHECK(i == 3); + { + try + { + scope_success x{[&i]() { i = 5; }}; + throw 1; + } + catch (...) + { + } + } + CHECK(i == 3); + { + try + { + scope_success x{[&i]() { i = 6; }}; + x.release(); + throw 1; + } + catch (...) + { + } + } + CHECK(i == 3); +} diff --git a/src/tests/libxrpl/basics/tagged_integer.cpp b/src/tests/libxrpl/basics/tagged_integer.cpp new file mode 100644 index 0000000000..d699b64a70 --- /dev/null +++ b/src/tests/libxrpl/basics/tagged_integer.cpp @@ -0,0 +1,247 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2014 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include + +#include + +using namespace ripple; + +struct Tag1 +{ +}; +struct Tag2 +{ +}; + +// Static checks that types are not interoperable + +using TagUInt1 = tagged_integer; +using TagUInt2 = tagged_integer; +using TagUInt3 = tagged_integer; + +// Check construction of tagged_integers +static_assert( + std::is_constructible::value, + "TagUInt1 should be constructible using a std::uint32_t"); + +static_assert( + !std::is_constructible::value, + "TagUInt1 should not be constructible using a std::uint64_t"); + +static_assert( + std::is_constructible::value, + "TagUInt3 should be constructible using a std::uint32_t"); + +static_assert( + std::is_constructible::value, + "TagUInt3 should be constructible using a std::uint64_t"); + +// Check assignment of tagged_integers +static_assert( + !std::is_assignable::value, + "TagUInt1 should not be assignable with a std::uint32_t"); + +static_assert( + !std::is_assignable::value, + "TagUInt1 should not be assignable with a std::uint64_t"); + +static_assert( + !std::is_assignable::value, + "TagUInt3 should not be assignable with a std::uint32_t"); + +static_assert( + !std::is_assignable::value, + "TagUInt3 should not be assignable with a std::uint64_t"); + +static_assert( + std::is_assignable::value, + "TagUInt1 should be assignable with a TagUInt1"); + +static_assert( + !std::is_assignable::value, + "TagUInt1 should not be assignable with a TagUInt2"); + +static_assert( + std::is_assignable::value, + "TagUInt3 should be assignable with a TagUInt1"); + +static_assert( + !std::is_assignable::value, + "TagUInt1 should not be assignable with a TagUInt3"); + +static_assert( + !std::is_assignable::value, + "TagUInt3 should not be assignable with a TagUInt1"); + +// Check convertibility of tagged_integers +static_assert( + !std::is_convertible::value, + "std::uint32_t should not be convertible to a TagUInt1"); + +static_assert( + !std::is_convertible::value, + "std::uint32_t should not be convertible to a TagUInt3"); + +static_assert( + !std::is_convertible::value, + "std::uint64_t should not be convertible to a TagUInt3"); + +static_assert( + !std::is_convertible::value, + "std::uint64_t should not be convertible to a TagUInt2"); + +static_assert( + !std::is_convertible::value, + "TagUInt1 should not be convertible to TagUInt2"); + +static_assert( + !std::is_convertible::value, + "TagUInt1 should not be convertible to TagUInt3"); + +static_assert( + !std::is_convertible::value, + "TagUInt2 should not be convertible to a TagUInt3"); + +TEST_SUITE_BEGIN("tagged_integer"); + +using TagInt = tagged_integer; + +TEST_CASE("comparison operators") +{ + TagInt const zero(0); + TagInt const one(1); + + CHECK(one == one); + CHECK(!(one == zero)); + + CHECK(one != zero); + CHECK(!(one != one)); + + CHECK(zero < one); + CHECK(!(one < zero)); + + CHECK(one > zero); + CHECK(!(zero > one)); + + CHECK(one >= one); + CHECK(one >= zero); + CHECK(!(zero >= one)); + + CHECK(zero <= one); + CHECK(zero <= zero); + CHECK(!(one <= zero)); +} + +TEST_CASE("increment / decrement operators") +{ + TagInt const zero(0); + TagInt const one(1); + TagInt a{0}; + ++a; + CHECK(a == one); + --a; + CHECK(a == zero); + a++; + CHECK(a == one); + a--; + CHECK(a == zero); +} + +TEST_CASE("arithmetic operators") +{ + TagInt a{-2}; + CHECK(+a == TagInt{-2}); + CHECK(-a == TagInt{2}); + CHECK(TagInt{-3} + TagInt{4} == TagInt{1}); + CHECK(TagInt{-3} - TagInt{4} == TagInt{-7}); + CHECK(TagInt{-3} * TagInt{4} == TagInt{-12}); + CHECK(TagInt{8} / TagInt{4} == TagInt{2}); + CHECK(TagInt{7} % TagInt{4} == TagInt{3}); + + CHECK(~TagInt{8} == TagInt{~TagInt::value_type{8}}); + CHECK((TagInt{6} & TagInt{3}) == TagInt{2}); + CHECK((TagInt{6} | TagInt{3}) == TagInt{7}); + CHECK((TagInt{6} ^ TagInt{3}) == TagInt{5}); + + CHECK((TagInt{4} << TagInt{2}) == TagInt{16}); + CHECK((TagInt{16} >> TagInt{2}) == TagInt{4}); +} + +TEST_CASE("assignment operators") +{ + TagInt a{-2}; + TagInt b{0}; + b = a; + CHECK(b == TagInt{-2}); + + // -3 + 4 == 1 + a = TagInt{-3}; + a += TagInt{4}; + CHECK(a == TagInt{1}); + + // -3 - 4 == -7 + a = TagInt{-3}; + a -= TagInt{4}; + CHECK(a == TagInt{-7}); + + // -3 * 4 == -12 + a = TagInt{-3}; + a *= TagInt{4}; + CHECK(a == TagInt{-12}); + + // 8/4 == 2 + a = TagInt{8}; + a /= TagInt{4}; + CHECK(a == TagInt{2}); + + // 7 % 4 == 3 + a = TagInt{7}; + a %= TagInt{4}; + CHECK(a == TagInt{3}); + + // 6 & 3 == 2 + a = TagInt{6}; + a /= TagInt{3}; + CHECK(a == TagInt{2}); + + // 6 | 3 == 7 + a = TagInt{6}; + a |= TagInt{3}; + CHECK(a == TagInt{7}); + + // 6 ^ 3 == 5 + a = TagInt{6}; + a ^= TagInt{3}; + CHECK(a == TagInt{5}); + + // 4 << 2 == 16 + a = TagInt{4}; + a <<= TagInt{2}; + CHECK(a == TagInt{16}); + + // 16 >> 2 == 4 + a = TagInt{16}; + a >>= TagInt{2}; + CHECK(a == TagInt{4}); +} + +TEST_SUITE_END(); diff --git a/src/test/core/CryptoPRNG_test.cpp b/src/tests/libxrpl/crypto/csprng.cpp similarity index 58% rename from src/test/core/CryptoPRNG_test.cpp rename to src/tests/libxrpl/crypto/csprng.cpp index 21924e582c..a55d49b67c 100644 --- a/src/test/core/CryptoPRNG_test.cpp +++ b/src/tests/libxrpl/crypto/csprng.cpp @@ -17,44 +17,18 @@ */ //============================================================================== -#include - -#include #include -namespace ripple { +#include -class CryptoPRNG_test : public beast::unit_test::suite +using namespace ripple; + +TEST_CASE("get values") { - void - testGetValues() - { - testcase("Get Values"); - try - { - auto& engine = crypto_prng(); - auto rand_val = engine(); - BEAST_EXPECT(rand_val >= engine.min()); - BEAST_EXPECT(rand_val <= engine.max()); - - uint16_t twoByte{0}; - engine(&twoByte, sizeof(uint16_t)); - pass(); - } - catch (std::exception&) - { - fail(); - } - } - -public: - void - run() override - { - testGetValues(); - } -}; - -BEAST_DEFINE_TESTSUITE(CryptoPRNG, core, ripple); - -} // namespace ripple + auto& engine = crypto_prng(); + auto rand_val = engine(); + CHECK(rand_val >= engine.min()); + CHECK(rand_val <= engine.max()); + uint16_t twoByte{0}; + engine(&twoByte, sizeof(uint16_t)); +} diff --git a/src/tests/libxrpl/crypto/main.cpp b/src/tests/libxrpl/crypto/main.cpp new file mode 100644 index 0000000000..0a3f254ea8 --- /dev/null +++ b/src/tests/libxrpl/crypto/main.cpp @@ -0,0 +1,2 @@ +#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN +#include From c2f3e2e2637d68183458899b786588ee2b73602d Mon Sep 17 00:00:00 2001 From: Mayukha Vadari Date: Thu, 3 Jul 2025 00:40:25 +0530 Subject: [PATCH 066/244] fix: crash when trace-logging in tests (#5529) This PR fixes a crash in tests when the test `Env is run at trace/debug log level. This issue only affects tests, and only if logging at trace/debug level, so really only relevant during rippled development, and does not affect production servers. --- src/test/unit_test/SuiteJournal.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/test/unit_test/SuiteJournal.h b/src/test/unit_test/SuiteJournal.h index b5c59f3d29..d56c297b0a 100644 --- a/src/test/unit_test/SuiteJournal.h +++ b/src/test/unit_test/SuiteJournal.h @@ -94,6 +94,8 @@ SuiteJournalSink::writeAlways( return "FTL:"; }(); + static std::mutex log_mutex; + std::lock_guard lock(log_mutex); suite_.log << s << partition_ << text << std::endl; } From 9874d47d7fbfe81e4cd78afd5b60ec33124ee2e9 Mon Sep 17 00:00:00 2001 From: Jingchen Date: Thu, 3 Jul 2025 15:27:37 +0100 Subject: [PATCH 067/244] Decouple CredentialHelpers from xrpld/app/tx (#5487) This PR refactors `CredentialHelpers` and removes some unnecessary dependencies as a step of modularization. The ledger component is almost independent except that it references `MPTokenAuthorize` and `CredentialHelpers.h`, and the latter further references `Transactor.h`. This PR partially clears the path to modularizing the ledger component and decouples `CredentialHelpers` from xrpld. --- src/xrpld/app/misc/CredentialHelpers.cpp | 45 ++++++++++++----------- src/xrpld/app/misc/CredentialHelpers.h | 25 ++++++++++--- src/xrpld/app/tx/detail/DeleteAccount.cpp | 9 +++-- src/xrpld/app/tx/detail/Escrow.cpp | 9 +++-- src/xrpld/app/tx/detail/PayChan.cpp | 9 +++-- src/xrpld/app/tx/detail/Payment.cpp | 33 +++++++++++++---- 6 files changed, 87 insertions(+), 43 deletions(-) diff --git a/src/xrpld/app/misc/CredentialHelpers.cpp b/src/xrpld/app/misc/CredentialHelpers.cpp index 81355f1792..6d1f9f78c5 100644 --- a/src/xrpld/app/misc/CredentialHelpers.cpp +++ b/src/xrpld/app/misc/CredentialHelpers.cpp @@ -120,15 +120,15 @@ deleteSLE( } NotTEC -checkFields(PreflightContext const& ctx) +checkFields(STTx const& tx, beast::Journal j) { - if (!ctx.tx.isFieldPresent(sfCredentialIDs)) + if (!tx.isFieldPresent(sfCredentialIDs)) return tesSUCCESS; - auto const& credentials = ctx.tx.getFieldV256(sfCredentialIDs); + auto const& credentials = tx.getFieldV256(sfCredentialIDs); if (credentials.empty() || (credentials.size() > maxCredentialsArraySize)) { - JLOG(ctx.j.trace()) + JLOG(j.trace()) << "Malformed transaction: Credentials array size is invalid: " << credentials.size(); return temMALFORMED; @@ -140,7 +140,7 @@ checkFields(PreflightContext const& ctx) auto [it, ins] = duplicates.insert(cred); if (!ins) { - JLOG(ctx.j.trace()) + JLOG(j.trace()) << "Malformed transaction: duplicates in credentials."; return temMALFORMED; } @@ -150,24 +150,28 @@ checkFields(PreflightContext const& ctx) } TER -valid(PreclaimContext const& ctx, AccountID const& src) +valid( + STTx const& tx, + ReadView const& view, + AccountID const& src, + beast::Journal j) { - if (!ctx.tx.isFieldPresent(sfCredentialIDs)) + if (!tx.isFieldPresent(sfCredentialIDs)) return tesSUCCESS; - auto const& credIDs(ctx.tx.getFieldV256(sfCredentialIDs)); + auto const& credIDs(tx.getFieldV256(sfCredentialIDs)); for (auto const& h : credIDs) { - auto const sleCred = ctx.view.read(keylet::credential(h)); + auto const sleCred = view.read(keylet::credential(h)); if (!sleCred) { - JLOG(ctx.j.trace()) << "Credential doesn't exist. Cred: " << h; + JLOG(j.trace()) << "Credential doesn't exist. Cred: " << h; return tecBAD_CREDENTIALS; } if (sleCred->getAccountID(sfSubject) != src) { - JLOG(ctx.j.trace()) + JLOG(j.trace()) << "Credential doesn't belong to the source account. Cred: " << h; return tecBAD_CREDENTIALS; @@ -175,7 +179,7 @@ valid(PreclaimContext const& ctx, AccountID const& src) if (!(sleCred->getFlags() & lsfAccepted)) { - JLOG(ctx.j.trace()) << "Credential isn't accepted. Cred: " << h; + JLOG(j.trace()) << "Credential isn't accepted. Cred: " << h; return tecBAD_CREDENTIALS; } @@ -352,10 +356,12 @@ verifyValidDomain( TER verifyDepositPreauth( - ApplyContext& ctx, + STTx const& tx, + ApplyView& view, AccountID const& src, AccountID const& dst, - std::shared_ptr const& sleDst) + std::shared_ptr const& sleDst, + beast::Journal j) { // If depositPreauth is enabled, then an account that requires // authorization has at least two ways to get a payment in: @@ -363,24 +369,21 @@ verifyDepositPreauth( // 2. If src is deposit preauthorized by dst (either by account or by // credentials). - bool const credentialsPresent = ctx.tx.isFieldPresent(sfCredentialIDs); + bool const credentialsPresent = tx.isFieldPresent(sfCredentialIDs); if (credentialsPresent && - credentials::removeExpired( - ctx.view(), ctx.tx.getFieldV256(sfCredentialIDs), ctx.journal)) + credentials::removeExpired(view, tx.getFieldV256(sfCredentialIDs), j)) return tecEXPIRED; if (sleDst && (sleDst->getFlags() & lsfDepositAuth)) { if (src != dst) { - if (!ctx.view().exists(keylet::depositPreauth(dst, src))) + if (!view.exists(keylet::depositPreauth(dst, src))) return !credentialsPresent ? tecNO_PERMISSION : credentials::authorizedDepositPreauth( - ctx.view(), - ctx.tx.getFieldV256(sfCredentialIDs), - dst); + view, tx.getFieldV256(sfCredentialIDs), dst); } } diff --git a/src/xrpld/app/misc/CredentialHelpers.h b/src/xrpld/app/misc/CredentialHelpers.h index 162ddd6515..84938180ce 100644 --- a/src/xrpld/app/misc/CredentialHelpers.h +++ b/src/xrpld/app/misc/CredentialHelpers.h @@ -20,7 +20,16 @@ #ifndef RIPPLE_APP_MISC_CREDENTIALHELPERS_H_INCLUDED #define RIPPLE_APP_MISC_CREDENTIALHELPERS_H_INCLUDED -#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include namespace ripple { namespace credentials { @@ -48,13 +57,17 @@ deleteSLE( // Amendment and parameters checks for sfCredentialIDs field NotTEC -checkFields(PreflightContext const& ctx); +checkFields(STTx const& tx, beast::Journal j); // Accessing the ledger to check if provided credentials are valid. Do not use // in doApply (only in preclaim) since it does not remove expired credentials. // If you call it in prelaim, you also must call verifyDepositPreauth in doApply TER -valid(PreclaimContext const& ctx, AccountID const& src); +valid( + STTx const& tx, + ReadView const& view, + AccountID const& src, + beast::Journal j); // Check if subject has any credential maching the given domain. If you call it // in preclaim and it returns tecEXPIRED, you should call verifyValidDomain in @@ -93,10 +106,12 @@ verifyValidDomain( // Check expired credentials and for existing DepositPreauth ledger object TER verifyDepositPreauth( - ApplyContext& ctx, + STTx const& tx, + ApplyView& view, AccountID const& src, AccountID const& dst, - std::shared_ptr const& sleDst); + std::shared_ptr const& sleDst, + beast::Journal j); } // namespace ripple diff --git a/src/xrpld/app/tx/detail/DeleteAccount.cpp b/src/xrpld/app/tx/detail/DeleteAccount.cpp index 7aa47e05f3..4311aa79a8 100644 --- a/src/xrpld/app/tx/detail/DeleteAccount.cpp +++ b/src/xrpld/app/tx/detail/DeleteAccount.cpp @@ -58,7 +58,8 @@ DeleteAccount::preflight(PreflightContext const& ctx) // An account cannot be deleted and give itself the resulting XRP. return temDST_IS_SRC; - if (auto const err = credentials::checkFields(ctx); !isTesSuccess(err)) + if (auto const err = credentials::checkFields(ctx.tx, ctx.j); + !isTesSuccess(err)) return err; return preflight2(ctx); @@ -241,7 +242,8 @@ DeleteAccount::preclaim(PreclaimContext const& ctx) return tecDST_TAG_NEEDED; // If credentials are provided - check them anyway - if (auto const err = credentials::valid(ctx, account); !isTesSuccess(err)) + if (auto const err = credentials::valid(ctx.tx, ctx.view, account, ctx.j); + !isTesSuccess(err)) return err; // if credentials then postpone auth check to doApply, to check for expired @@ -376,7 +378,8 @@ DeleteAccount::doApply() if (ctx_.view().rules().enabled(featureDepositAuth) && ctx_.tx.isFieldPresent(sfCredentialIDs)) { - if (auto err = verifyDepositPreauth(ctx_, account_, dstID, dst); + if (auto err = verifyDepositPreauth( + ctx_.tx, ctx_.view(), account_, dstID, dst, ctx_.journal); !isTesSuccess(err)) return err; } diff --git a/src/xrpld/app/tx/detail/Escrow.cpp b/src/xrpld/app/tx/detail/Escrow.cpp index 75080da9a5..8f7005d55c 100644 --- a/src/xrpld/app/tx/detail/Escrow.cpp +++ b/src/xrpld/app/tx/detail/Escrow.cpp @@ -672,7 +672,8 @@ EscrowFinish::preflight(PreflightContext const& ctx) } } - if (auto const err = credentials::checkFields(ctx); !isTesSuccess(err)) + if (auto const err = credentials::checkFields(ctx.tx, ctx.j); + !isTesSuccess(err)) return err; return tesSUCCESS; @@ -761,7 +762,8 @@ EscrowFinish::preclaim(PreclaimContext const& ctx) { if (ctx.view.rules().enabled(featureCredentials)) { - if (auto const err = credentials::valid(ctx, ctx.tx[sfAccount]); + if (auto const err = + credentials::valid(ctx.tx, ctx.view, ctx.tx[sfAccount], ctx.j); !isTesSuccess(err)) return err; } @@ -1107,7 +1109,8 @@ EscrowFinish::doApply() if (ctx_.view().rules().enabled(featureDepositAuth)) { - if (auto err = verifyDepositPreauth(ctx_, account_, destID, sled); + if (auto err = verifyDepositPreauth( + ctx_.tx, ctx_.view(), account_, destID, sled, ctx_.journal); !isTesSuccess(err)) return err; } diff --git a/src/xrpld/app/tx/detail/PayChan.cpp b/src/xrpld/app/tx/detail/PayChan.cpp index a42902f6ac..d9e53ac75c 100644 --- a/src/xrpld/app/tx/detail/PayChan.cpp +++ b/src/xrpld/app/tx/detail/PayChan.cpp @@ -473,7 +473,8 @@ PayChanClaim::preflight(PreflightContext const& ctx) return temBAD_SIGNATURE; } - if (auto const err = credentials::checkFields(ctx); !isTesSuccess(err)) + if (auto const err = credentials::checkFields(ctx.tx, ctx.j); + !isTesSuccess(err)) return err; return preflight2(ctx); @@ -485,7 +486,8 @@ PayChanClaim::preclaim(PreclaimContext const& ctx) if (!ctx.view.rules().enabled(featureCredentials)) return Transactor::preclaim(ctx); - if (auto const err = credentials::valid(ctx, ctx.tx[sfAccount]); + if (auto const err = + credentials::valid(ctx.tx, ctx.view, ctx.tx[sfAccount], ctx.j); !isTesSuccess(err)) return err; @@ -554,7 +556,8 @@ PayChanClaim::doApply() if (depositAuth) { - if (auto err = verifyDepositPreauth(ctx_, txAccount, dst, sled); + if (auto err = verifyDepositPreauth( + ctx_.tx, ctx_.view(), txAccount, dst, sled, ctx_.journal); !isTesSuccess(err)) return err; } diff --git a/src/xrpld/app/tx/detail/Payment.cpp b/src/xrpld/app/tx/detail/Payment.cpp index f36e1bfe3d..692e03109e 100644 --- a/src/xrpld/app/tx/detail/Payment.cpp +++ b/src/xrpld/app/tx/detail/Payment.cpp @@ -238,7 +238,8 @@ Payment::preflight(PreflightContext const& ctx) } } - if (auto const err = credentials::checkFields(ctx); !isTesSuccess(err)) + if (auto const err = credentials::checkFields(ctx.tx, ctx.j); + !isTesSuccess(err)) return err; return preflight2(ctx); @@ -358,7 +359,8 @@ Payment::preclaim(PreclaimContext const& ctx) } } - if (auto const err = credentials::valid(ctx, ctx.tx[sfAccount]); + if (auto const err = + credentials::valid(ctx.tx, ctx.view, ctx.tx[sfAccount], ctx.j); !isTesSuccess(err)) return err; @@ -450,8 +452,13 @@ Payment::doApply() // 1. If Account == Destination, or // 2. If Account is deposit preauthorized by destination. - if (auto err = - verifyDepositPreauth(ctx_, account_, dstAccountID, sleDst); + if (auto err = verifyDepositPreauth( + ctx_.tx, + ctx_.view(), + account_, + dstAccountID, + sleDst, + ctx_.journal); !isTesSuccess(err)) return err; } @@ -521,8 +528,13 @@ Payment::doApply() ter != tesSUCCESS) return ter; - if (auto err = - verifyDepositPreauth(ctx_, account_, dstAccountID, sleDst); + if (auto err = verifyDepositPreauth( + ctx_.tx, + ctx_.view(), + account_, + dstAccountID, + sleDst, + ctx_.journal); !isTesSuccess(err)) return err; @@ -644,8 +656,13 @@ Payment::doApply() if (dstAmount > dstReserve || sleDst->getFieldAmount(sfBalance) > dstReserve) { - if (auto err = - verifyDepositPreauth(ctx_, account_, dstAccountID, sleDst); + if (auto err = verifyDepositPreauth( + ctx_.tx, + ctx_.view(), + account_, + dstAccountID, + sleDst, + ctx_.journal); !isTesSuccess(err)) return err; } From a7eea9546f8a7200dec56111588169b2a304fb84 Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Wed, 9 Jul 2025 13:43:11 +0100 Subject: [PATCH 068/244] test: Remove circular jtx.h dependencies (#5544) Circular includes in header files can yield unpredictable results. --- src/test/app/Vault_test.cpp | 12 +----------- src/test/jtx.h | 1 + src/test/jtx/impl/mpt.cpp | 2 +- src/test/jtx/impl/permissioned_dex.cpp | 2 +- src/test/jtx/impl/permissioned_domains.cpp | 2 +- src/test/jtx/mpt.h | 3 ++- src/test/jtx/permissioned_dex.h | 4 +++- src/test/jtx/permissioned_domains.h | 3 ++- 8 files changed, 12 insertions(+), 17 deletions(-) diff --git a/src/test/app/Vault_test.cpp b/src/test/app/Vault_test.cpp index ccac0e2819..25e486f1a2 100644 --- a/src/test/app/Vault_test.cpp +++ b/src/test/app/Vault_test.cpp @@ -17,18 +17,8 @@ */ //============================================================================== -#include +#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include diff --git a/src/test/jtx.h b/src/test/jtx.h index 4188910085..6347b9dcf9 100644 --- a/src/test/jtx.h +++ b/src/test/jtx.h @@ -22,6 +22,7 @@ // Convenience header that includes everything +#include #include #include #include diff --git a/src/test/jtx/impl/mpt.cpp b/src/test/jtx/impl/mpt.cpp index c8ff167221..d33432d316 100644 --- a/src/test/jtx/impl/mpt.cpp +++ b/src/test/jtx/impl/mpt.cpp @@ -17,7 +17,7 @@ */ //============================================================================== -#include +#include #include diff --git a/src/test/jtx/impl/permissioned_dex.cpp b/src/test/jtx/impl/permissioned_dex.cpp index 04497ebbdc..4b09a11880 100644 --- a/src/test/jtx/impl/permissioned_dex.cpp +++ b/src/test/jtx/impl/permissioned_dex.cpp @@ -17,7 +17,7 @@ */ //============================================================================== -#include +#include #include #include diff --git a/src/test/jtx/impl/permissioned_domains.cpp b/src/test/jtx/impl/permissioned_domains.cpp index 866ca3bb7e..441ee325c8 100644 --- a/src/test/jtx/impl/permissioned_domains.cpp +++ b/src/test/jtx/impl/permissioned_domains.cpp @@ -17,7 +17,7 @@ */ //============================================================================== -#include +#include namespace ripple { namespace test { diff --git a/src/test/jtx/mpt.h b/src/test/jtx/mpt.h index 52ade92323..64eaa452f5 100644 --- a/src/test/jtx/mpt.h +++ b/src/test/jtx/mpt.h @@ -20,7 +20,8 @@ #ifndef RIPPLE_TEST_JTX_MPT_H_INCLUDED #define RIPPLE_TEST_JTX_MPT_H_INCLUDED -#include +#include +#include #include #include diff --git a/src/test/jtx/permissioned_dex.h b/src/test/jtx/permissioned_dex.h index fb32e1c1be..b95574d94d 100644 --- a/src/test/jtx/permissioned_dex.h +++ b/src/test/jtx/permissioned_dex.h @@ -19,7 +19,9 @@ #pragma once -#include +#include +#include + namespace ripple { namespace test { namespace jtx { diff --git a/src/test/jtx/permissioned_domains.h b/src/test/jtx/permissioned_domains.h index ee80c6a69f..ed086e366d 100644 --- a/src/test/jtx/permissioned_domains.h +++ b/src/test/jtx/permissioned_domains.h @@ -20,7 +20,8 @@ #ifndef RIPPLE_TEST_JTX_PERMISSIONED_DOMAINS_H_INCLUDED #define RIPPLE_TEST_JTX_PERMISSIONED_DOMAINS_H_INCLUDED -#include +#include +#include #include namespace ripple { From f47e2f4e82bd36f5819043708442602be977973e Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Wed, 9 Jul 2025 18:47:34 +0100 Subject: [PATCH 069/244] chore: Fix compilation error with clang-20 and cleanup (#5543) Removes clutter for old compilers, defaults to non-unity builds in cmake to match conanfile.py, and workaround for clang-20 compilation errors. --- cmake/RippledSettings.cmake | 2 +- include/xrpl/basics/Expected.h | 10 +++++++++ include/xrpl/beast/hash/hash_append.h | 29 +++++++++++++++++++++++++-- src/test/basics/Buffer_test.cpp | 6 ++---- 4 files changed, 40 insertions(+), 7 deletions(-) diff --git a/cmake/RippledSettings.cmake b/cmake/RippledSettings.cmake index b2d7b0d9a5..9dc8609f58 100644 --- a/cmake/RippledSettings.cmake +++ b/cmake/RippledSettings.cmake @@ -18,7 +18,7 @@ if(tests) endif() endif() -option(unity "Creates a build using UNITY support in cmake. This is the default" ON) +option(unity "Creates a build using UNITY support in cmake." OFF) if(unity) if(NOT is_ci) set(CMAKE_UNITY_BUILD_BATCH_SIZE 15 CACHE STRING "") diff --git a/include/xrpl/basics/Expected.h b/include/xrpl/basics/Expected.h index 9afb160d9d..d2440f63ab 100644 --- a/include/xrpl/basics/Expected.h +++ b/include/xrpl/basics/Expected.h @@ -22,8 +22,18 @@ #include +#if defined(__clang__) +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated" +#pragma clang diagnostic ignored "-Wdeprecated-declarations" +#endif + #include +#if defined(__clang__) +#pragma clang diagnostic pop +#endif + #include namespace ripple { diff --git a/include/xrpl/beast/hash/hash_append.h b/include/xrpl/beast/hash/hash_append.h index 6b11fe1eb3..825555320a 100644 --- a/include/xrpl/beast/hash/hash_append.h +++ b/include/xrpl/beast/hash/hash_append.h @@ -24,13 +24,38 @@ #include #include +/* + +Workaround for overzealous clang warning, which trips on libstdc++ headers + + In file included from + /usr/lib/gcc/x86_64-linux-gnu/12/../../../../include/c++/12/bits/stl_algo.h:61: + /usr/lib/gcc/x86_64-linux-gnu/12/../../../../include/c++/12/bits/stl_tempbuf.h:263:8: + error: 'get_temporary_buffer> *>>' is deprecated + [-Werror,-Wdeprecated-declarations] 263 | + std::get_temporary_buffer(_M_original_len)); + ^ +*/ + +#if defined(__clang__) +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated" +#pragma clang diagnostic ignored "-Wdeprecated-declarations" +#endif + +#include +#include + +#if defined(__clang__) +#pragma clang diagnostic pop +#endif + #include #include #include #include -#include #include -#include #include #include #include diff --git a/src/test/basics/Buffer_test.cpp b/src/test/basics/Buffer_test.cpp index 43ca048d7f..c59805f569 100644 --- a/src/test/basics/Buffer_test.cpp +++ b/src/test/basics/Buffer_test.cpp @@ -98,8 +98,7 @@ struct Buffer_test : beast::unit_test::suite x = b0; BEAST_EXPECT(x == b0); BEAST_EXPECT(sane(x)); -#if defined(__clang__) && (!defined(__APPLE__) && (__clang_major__ >= 7)) || \ - (defined(__APPLE__) && (__apple_build_version__ >= 10010043)) +#if defined(__clang__) #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wself-assign-overloaded" #endif @@ -111,8 +110,7 @@ struct Buffer_test : beast::unit_test::suite BEAST_EXPECT(y == b3); BEAST_EXPECT(sane(y)); -#if defined(__clang__) && (!defined(__APPLE__) && (__clang_major__ >= 7)) || \ - (defined(__APPLE__) && (__apple_build_version__ >= 10010043)) +#if defined(__clang__) #pragma clang diagnostic pop #endif } From 358b7f50a7457b2d543632bbdb8f3ce1a7c22812 Mon Sep 17 00:00:00 2001 From: Ayaz Salikhov Date: Thu, 10 Jul 2025 11:14:27 +0100 Subject: [PATCH 070/244] fix: Link with boost libraries explicitly (#5546) Having `boost::boost` in `self.requires` makes clio link with all boost libraries. There are additionally several Boost stacktrace backends that are both linked with, which violate ODR. This change fixes the problem. --- cmake/deps/Boost.cmake | 3 +-- conanfile.py | 12 +++++++++++- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/cmake/deps/Boost.cmake b/cmake/deps/Boost.cmake index 041c2380e1..031202f4d2 100644 --- a/cmake/deps/Boost.cmake +++ b/cmake/deps/Boost.cmake @@ -2,7 +2,6 @@ find_package(Boost 1.82 REQUIRED COMPONENTS chrono container - context coroutine date_time filesystem @@ -24,7 +23,7 @@ endif() target_link_libraries(ripple_boost INTERFACE - Boost::boost + Boost::headers Boost::chrono Boost::container Boost::coroutine diff --git a/conanfile.py b/conanfile.py index 1a9e88fc0d..2df08544cd 100644 --- a/conanfile.py +++ b/conanfile.py @@ -164,7 +164,17 @@ class Xrpl(ConanFile): # `include/`, not `include/ripple/proto/`. libxrpl.includedirs = ['include', 'include/ripple/proto'] libxrpl.requires = [ - 'boost::boost', + 'boost::headers', + 'boost::chrono', + 'boost::container', + 'boost::coroutine', + 'boost::date_time', + 'boost::filesystem', + 'boost::json', + 'boost::program_options', + 'boost::regex', + 'boost::system', + 'boost::thread', 'date::date', 'grpc::grpc++', 'libarchive::libarchive', From b113190563f56c5741ae3599e8880daf5458b919 Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Thu, 10 Jul 2025 16:46:02 +0100 Subject: [PATCH 071/244] Downgrade required CMake version for Antithesis SDK (#5548) The current version was copied from `antithesis-sdk-cpp` but there is no logical reason to require this specific version of CMake. This change downgrades the version to make the project build with older CMake versions. --- external/antithesis-sdk/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/external/antithesis-sdk/CMakeLists.txt b/external/antithesis-sdk/CMakeLists.txt index d2c1f536af..46c7b4bf7a 100644 --- a/external/antithesis-sdk/CMakeLists.txt +++ b/external/antithesis-sdk/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.25) +cmake_minimum_required(VERSION 3.18) # Note, version set explicitly by rippled project project(antithesis-sdk-cpp VERSION 0.4.4 LANGUAGES CXX) From 8e94ea3154384fe03104a751c2c7fbb351303b11 Mon Sep 17 00:00:00 2001 From: Denis Angell Date: Thu, 10 Jul 2025 18:29:51 +0200 Subject: [PATCH 072/244] fix: add allowTrustLineLocking flag for account_info (#5525) * Update the `account_info` API so that the `allowTrustLineLocking` flag is included in the response. * The proposed `TokenEscrow` amendment added an `allowTrustLineLocking` flag in the `AccountRoot` object. * In the API response, under `account_flags`, there is now an `allowTrustLineLocking` field with a boolean (`true` or `false`) value. * For reference, the XLS-85 Token-Enabled Escrows implementation can be found in https://github.com/XRPLF/rippled/pull/5185 --- src/test/rpc/AccountInfo_test.cpp | 27 ++++++++++++++++++++++++++ src/xrpld/rpc/handlers/AccountInfo.cpp | 8 ++++++++ 2 files changed, 35 insertions(+) diff --git a/src/test/rpc/AccountInfo_test.cpp b/src/test/rpc/AccountInfo_test.cpp index 238b739611..0b41da2ded 100644 --- a/src/test/rpc/AccountInfo_test.cpp +++ b/src/test/rpc/AccountInfo_test.cpp @@ -675,6 +675,30 @@ public: BEAST_EXPECT( !getAccountFlag(allowTrustLineClawbackFlag.first, bob)); } + + static constexpr std::pair + allowTrustLineLockingFlag{ + "allowTrustLineLocking", asfAllowTrustLineLocking}; + + if (features[featureTokenEscrow]) + { + auto const f1 = + getAccountFlag(allowTrustLineLockingFlag.first, bob); + BEAST_EXPECT(f1.has_value()); + BEAST_EXPECT(!f1.value()); + + // Set allowTrustLineLocking + env(fset(bob, allowTrustLineLockingFlag.second)); + env.close(); + auto const f2 = + getAccountFlag(allowTrustLineLockingFlag.first, bob); + BEAST_EXPECT(f2.has_value()); + BEAST_EXPECT(f2.value()); + } + else + { + BEAST_EXPECT(!getAccountFlag(allowTrustLineLockingFlag.first, bob)); + } } void @@ -691,6 +715,9 @@ public: testAccountFlags(allFeatures - featureDisallowIncoming); testAccountFlags( allFeatures - featureDisallowIncoming - featureClawback); + testAccountFlags( + allFeatures - featureDisallowIncoming - featureClawback - + featureTokenEscrow); } }; diff --git a/src/xrpld/rpc/handlers/AccountInfo.cpp b/src/xrpld/rpc/handlers/AccountInfo.cpp index 6416309e2e..3432021690 100644 --- a/src/xrpld/rpc/handlers/AccountInfo.cpp +++ b/src/xrpld/rpc/handlers/AccountInfo.cpp @@ -108,6 +108,10 @@ doAccountInfo(RPC::JsonContext& context) allowTrustLineClawbackFlag{ "allowTrustLineClawback", lsfAllowTrustLineClawback}; + static constexpr std::pair + allowTrustLineLockingFlag{ + "allowTrustLineLocking", lsfAllowTrustLineLocking}; + auto const sleAccepted = ledger->read(keylet::account(accountID)); if (sleAccepted) { @@ -140,6 +144,10 @@ doAccountInfo(RPC::JsonContext& context) acctFlags[allowTrustLineClawbackFlag.first.data()] = sleAccepted->isFlag(allowTrustLineClawbackFlag.second); + if (ledger->rules().enabled(featureTokenEscrow)) + acctFlags[allowTrustLineLockingFlag.first.data()] = + sleAccepted->isFlag(allowTrustLineLockingFlag.second); + result[jss::account_flags] = std::move(acctFlags); // The document[https://xrpl.org/account_info.html#account_info] states From 6534757d85879517cb825c8205b65b5d87943b65 Mon Sep 17 00:00:00 2001 From: Vlad <129996061+vvysokikh1@users.noreply.github.com> Date: Thu, 10 Jul 2025 19:15:42 +0100 Subject: [PATCH 073/244] chore: Remove unused headers (#5526) --- include/xrpl/basics/Buffer.h | 1 + include/xrpl/basics/StringUtilities.h | 1 - include/xrpl/basics/algorithm.h | 1 - include/xrpl/basics/hardened_hash.h | 4 ---- include/xrpl/basics/mulDiv.h | 1 - include/xrpl/basics/tagged_integer.h | 2 -- include/xrpl/beast/clock/abstract_clock.h | 3 --- include/xrpl/beast/clock/manual_clock.h | 2 ++ include/xrpl/beast/container/aged_container_utility.h | 1 + .../beast/container/detail/aged_associative_container.h | 2 -- .../xrpl/beast/container/detail/aged_ordered_container.h | 1 - include/xrpl/beast/core/LexicalCast.h | 2 -- include/xrpl/beast/hash/hash_append.h | 5 +---- include/xrpl/beast/net/IPAddress.h | 4 ---- include/xrpl/beast/net/IPAddressV4.h | 6 ------ include/xrpl/beast/net/IPAddressV6.h | 6 ------ include/xrpl/beast/net/IPEndpoint.h | 1 - include/xrpl/beast/rfc2616.h | 2 -- include/xrpl/beast/test/yield_to.h | 1 - include/xrpl/beast/unit_test/reporter.h | 1 - include/xrpl/beast/unit_test/runner.h | 1 - include/xrpl/json/json_value.h | 1 - include/xrpl/protocol/AccountID.h | 1 - include/xrpl/protocol/ApiVersion.h | 1 - include/xrpl/protocol/Feature.h | 1 - include/xrpl/protocol/FeeUnits.h | 5 ----- include/xrpl/protocol/Indexes.h | 1 + include/xrpl/protocol/Issue.h | 3 --- include/xrpl/protocol/MultiApiJson.h | 1 - include/xrpl/protocol/Permissions.h | 1 - include/xrpl/protocol/PublicKey.h | 1 - include/xrpl/protocol/SOTemplate.h | 1 - include/xrpl/protocol/STBase.h | 1 - include/xrpl/protocol/STBlob.h | 1 - include/xrpl/protocol/STValidation.h | 2 -- include/xrpl/protocol/Serializer.h | 1 - include/xrpl/protocol/Sign.h | 2 -- include/xrpl/protocol/XChainAttestations.h | 2 -- include/xrpl/protocol/detail/b58_utils.h | 1 - include/xrpl/protocol/digest.h | 1 - include/xrpl/protocol/json_get_or_throw.h | 1 - include/xrpl/resource/Charge.h | 1 - include/xrpl/resource/Gossip.h | 2 ++ include/xrpl/server/detail/BaseHTTPPeer.h | 1 - include/xrpl/server/detail/Door.h | 2 -- include/xrpl/server/detail/io_list.h | 1 - src/xrpld/app/misc/HashRouter.h | 1 + src/xrpld/core/JobQueue.h | 2 ++ src/xrpld/ledger/PaymentSandbox.h | 1 - src/xrpld/shamap/SHAMap.h | 1 + 50 files changed, 12 insertions(+), 77 deletions(-) diff --git a/include/xrpl/basics/Buffer.h b/include/xrpl/basics/Buffer.h index b2f1163452..3379a923f0 100644 --- a/include/xrpl/basics/Buffer.h +++ b/include/xrpl/basics/Buffer.h @@ -25,6 +25,7 @@ #include #include +#include namespace ripple { diff --git a/include/xrpl/basics/StringUtilities.h b/include/xrpl/basics/StringUtilities.h index 23d60e2db4..5f905638cb 100644 --- a/include/xrpl/basics/StringUtilities.h +++ b/include/xrpl/basics/StringUtilities.h @@ -29,7 +29,6 @@ #include #include #include -#include #include namespace ripple { diff --git a/include/xrpl/basics/algorithm.h b/include/xrpl/basics/algorithm.h index ed6e8080d9..673d5e955b 100644 --- a/include/xrpl/basics/algorithm.h +++ b/include/xrpl/basics/algorithm.h @@ -20,7 +20,6 @@ #ifndef RIPPLE_ALGORITHM_H_INCLUDED #define RIPPLE_ALGORITHM_H_INCLUDED -#include #include namespace ripple { diff --git a/include/xrpl/basics/hardened_hash.h b/include/xrpl/basics/hardened_hash.h index 0b77b0a07a..aae6c55dff 100644 --- a/include/xrpl/basics/hardened_hash.h +++ b/include/xrpl/basics/hardened_hash.h @@ -24,12 +24,8 @@ #include #include -#include #include #include -#include -#include -#include #include namespace ripple { diff --git a/include/xrpl/basics/mulDiv.h b/include/xrpl/basics/mulDiv.h index e338f87c81..96d466f6c7 100644 --- a/include/xrpl/basics/mulDiv.h +++ b/include/xrpl/basics/mulDiv.h @@ -23,7 +23,6 @@ #include #include #include -#include namespace ripple { auto constexpr muldiv_max = std::numeric_limits::max(); diff --git a/include/xrpl/basics/tagged_integer.h b/include/xrpl/basics/tagged_integer.h index 471fa8eb1e..ed30b6f120 100644 --- a/include/xrpl/basics/tagged_integer.h +++ b/include/xrpl/basics/tagged_integer.h @@ -24,10 +24,8 @@ #include -#include #include #include -#include namespace ripple { diff --git a/include/xrpl/beast/clock/abstract_clock.h b/include/xrpl/beast/clock/abstract_clock.h index 128ab82b4b..7b0f04225f 100644 --- a/include/xrpl/beast/clock/abstract_clock.h +++ b/include/xrpl/beast/clock/abstract_clock.h @@ -20,9 +20,6 @@ #ifndef BEAST_CHRONO_ABSTRACT_CLOCK_H_INCLUDED #define BEAST_CHRONO_ABSTRACT_CLOCK_H_INCLUDED -#include -#include - namespace beast { /** Abstract interface to a clock. diff --git a/include/xrpl/beast/clock/manual_clock.h b/include/xrpl/beast/clock/manual_clock.h index 32ff76bb07..a0e82b7014 100644 --- a/include/xrpl/beast/clock/manual_clock.h +++ b/include/xrpl/beast/clock/manual_clock.h @@ -23,6 +23,8 @@ #include #include +#include + namespace beast { /** Manual clock implementation. diff --git a/include/xrpl/beast/container/aged_container_utility.h b/include/xrpl/beast/container/aged_container_utility.h index b64cefbf5a..d315f05346 100644 --- a/include/xrpl/beast/container/aged_container_utility.h +++ b/include/xrpl/beast/container/aged_container_utility.h @@ -22,6 +22,7 @@ #include +#include #include namespace beast { diff --git a/include/xrpl/beast/container/detail/aged_associative_container.h b/include/xrpl/beast/container/detail/aged_associative_container.h index 5ff7901552..678fbe4e17 100644 --- a/include/xrpl/beast/container/detail/aged_associative_container.h +++ b/include/xrpl/beast/container/detail/aged_associative_container.h @@ -20,8 +20,6 @@ #ifndef BEAST_CONTAINER_DETAIL_AGED_ASSOCIATIVE_CONTAINER_H_INCLUDED #define BEAST_CONTAINER_DETAIL_AGED_ASSOCIATIVE_CONTAINER_H_INCLUDED -#include - namespace beast { namespace detail { diff --git a/include/xrpl/beast/container/detail/aged_ordered_container.h b/include/xrpl/beast/container/detail/aged_ordered_container.h index 8c978d0517..ef3e1b5ea1 100644 --- a/include/xrpl/beast/container/detail/aged_ordered_container.h +++ b/include/xrpl/beast/container/detail/aged_ordered_container.h @@ -33,7 +33,6 @@ #include #include #include -#include #include #include #include diff --git a/include/xrpl/beast/core/LexicalCast.h b/include/xrpl/beast/core/LexicalCast.h index aa67bcad50..5551e1f2dc 100644 --- a/include/xrpl/beast/core/LexicalCast.h +++ b/include/xrpl/beast/core/LexicalCast.h @@ -29,11 +29,9 @@ #include #include #include -#include #include #include #include -#include namespace beast { diff --git a/include/xrpl/beast/hash/hash_append.h b/include/xrpl/beast/hash/hash_append.h index 825555320a..e113567ab1 100644 --- a/include/xrpl/beast/hash/hash_append.h +++ b/include/xrpl/beast/hash/hash_append.h @@ -45,7 +45,6 @@ Workaround for overzealous clang warning, which trips on libstdc++ headers #endif #include -#include #if defined(__clang__) #pragma clang diagnostic pop @@ -53,10 +52,8 @@ Workaround for overzealous clang warning, which trips on libstdc++ headers #include #include -#include #include -#include -#include +#include #include #include #include diff --git a/include/xrpl/beast/net/IPAddress.h b/include/xrpl/beast/net/IPAddress.h index 62469cfda1..fb5dac90ec 100644 --- a/include/xrpl/beast/net/IPAddress.h +++ b/include/xrpl/beast/net/IPAddress.h @@ -29,11 +29,7 @@ #include #include -#include -#include -#include #include -#include //------------------------------------------------------------------------------ diff --git a/include/xrpl/beast/net/IPAddressV4.h b/include/xrpl/beast/net/IPAddressV4.h index 98a92dba20..c65adae05b 100644 --- a/include/xrpl/beast/net/IPAddressV4.h +++ b/include/xrpl/beast/net/IPAddressV4.h @@ -24,12 +24,6 @@ #include -#include -#include -#include -#include -#include - namespace beast { namespace IP { diff --git a/include/xrpl/beast/net/IPAddressV6.h b/include/xrpl/beast/net/IPAddressV6.h index 4a4ef73b86..9e24b228e5 100644 --- a/include/xrpl/beast/net/IPAddressV6.h +++ b/include/xrpl/beast/net/IPAddressV6.h @@ -24,12 +24,6 @@ #include -#include -#include -#include -#include -#include - namespace beast { namespace IP { diff --git a/include/xrpl/beast/net/IPEndpoint.h b/include/xrpl/beast/net/IPEndpoint.h index 345ba4b8da..8d43eb0ba9 100644 --- a/include/xrpl/beast/net/IPEndpoint.h +++ b/include/xrpl/beast/net/IPEndpoint.h @@ -25,7 +25,6 @@ #include #include -#include #include #include diff --git a/include/xrpl/beast/rfc2616.h b/include/xrpl/beast/rfc2616.h index 648fbc22e2..d6b3fa3cda 100644 --- a/include/xrpl/beast/rfc2616.h +++ b/include/xrpl/beast/rfc2616.h @@ -28,10 +28,8 @@ #include #include -#include #include #include -#include #include namespace beast { diff --git a/include/xrpl/beast/test/yield_to.h b/include/xrpl/beast/test/yield_to.h index 9e9f83b897..27a3a2db20 100644 --- a/include/xrpl/beast/test/yield_to.h +++ b/include/xrpl/beast/test/yield_to.h @@ -13,7 +13,6 @@ #include #include -#include #include #include #include diff --git a/include/xrpl/beast/unit_test/reporter.h b/include/xrpl/beast/unit_test/reporter.h index e7a7d4b3ad..0054daab98 100644 --- a/include/xrpl/beast/unit_test/reporter.h +++ b/include/xrpl/beast/unit_test/reporter.h @@ -16,7 +16,6 @@ #include #include -#include #include #include #include diff --git a/include/xrpl/beast/unit_test/runner.h b/include/xrpl/beast/unit_test/runner.h index 283f7c8723..977cc45035 100644 --- a/include/xrpl/beast/unit_test/runner.h +++ b/include/xrpl/beast/unit_test/runner.h @@ -13,7 +13,6 @@ #include #include -#include #include namespace beast { diff --git a/include/xrpl/json/json_value.h b/include/xrpl/json/json_value.h index 2e815b79f2..272d12d680 100644 --- a/include/xrpl/json/json_value.h +++ b/include/xrpl/json/json_value.h @@ -26,7 +26,6 @@ #include #include #include -#include #include /** \brief JSON (JavaScript Object Notation). diff --git a/include/xrpl/protocol/AccountID.h b/include/xrpl/protocol/AccountID.h index 295cf41e4f..d546346bb4 100644 --- a/include/xrpl/protocol/AccountID.h +++ b/include/xrpl/protocol/AccountID.h @@ -29,7 +29,6 @@ #include #include -#include #include #include diff --git a/include/xrpl/protocol/ApiVersion.h b/include/xrpl/protocol/ApiVersion.h index dd09cf6bd1..deafafa513 100644 --- a/include/xrpl/protocol/ApiVersion.h +++ b/include/xrpl/protocol/ApiVersion.h @@ -20,7 +20,6 @@ #ifndef RIPPLE_PROTOCOL_APIVERSION_H_INCLUDED #define RIPPLE_PROTOCOL_APIVERSION_H_INCLUDED -#include #include #include diff --git a/include/xrpl/protocol/Feature.h b/include/xrpl/protocol/Feature.h index 75add39af9..c55776a5ce 100644 --- a/include/xrpl/protocol/Feature.h +++ b/include/xrpl/protocol/Feature.h @@ -24,7 +24,6 @@ #include -#include #include #include #include diff --git a/include/xrpl/protocol/FeeUnits.h b/include/xrpl/protocol/FeeUnits.h index c6949a434c..31a1886b7f 100644 --- a/include/xrpl/protocol/FeeUnits.h +++ b/include/xrpl/protocol/FeeUnits.h @@ -27,14 +27,9 @@ #include #include -#include -#include #include #include #include -#include -#include -#include namespace ripple { diff --git a/include/xrpl/protocol/Indexes.h b/include/xrpl/protocol/Indexes.h index 57c8727ae6..3e3f2843c1 100644 --- a/include/xrpl/protocol/Indexes.h +++ b/include/xrpl/protocol/Indexes.h @@ -32,6 +32,7 @@ #include #include +#include namespace ripple { diff --git a/include/xrpl/protocol/Issue.h b/include/xrpl/protocol/Issue.h index 83ef337c35..eb4861f59b 100644 --- a/include/xrpl/protocol/Issue.h +++ b/include/xrpl/protocol/Issue.h @@ -24,9 +24,6 @@ #include #include -#include -#include - namespace ripple { /** A currency issued by an account. diff --git a/include/xrpl/protocol/MultiApiJson.h b/include/xrpl/protocol/MultiApiJson.h index 1e35bdbda2..4a3d0115de 100644 --- a/include/xrpl/protocol/MultiApiJson.h +++ b/include/xrpl/protocol/MultiApiJson.h @@ -28,7 +28,6 @@ #include #include #include -#include #include #include diff --git a/include/xrpl/protocol/Permissions.h b/include/xrpl/protocol/Permissions.h index 8ba53d94d7..67f3eea8d7 100644 --- a/include/xrpl/protocol/Permissions.h +++ b/include/xrpl/protocol/Permissions.h @@ -25,7 +25,6 @@ #include #include #include -#include namespace ripple { /** diff --git a/include/xrpl/protocol/PublicKey.h b/include/xrpl/protocol/PublicKey.h index c68656877c..9bf01e5cda 100644 --- a/include/xrpl/protocol/PublicKey.h +++ b/include/xrpl/protocol/PublicKey.h @@ -32,7 +32,6 @@ #include #include #include -#include namespace ripple { diff --git a/include/xrpl/protocol/SOTemplate.h b/include/xrpl/protocol/SOTemplate.h index 9fd4cbf19d..14497b4222 100644 --- a/include/xrpl/protocol/SOTemplate.h +++ b/include/xrpl/protocol/SOTemplate.h @@ -25,7 +25,6 @@ #include #include -#include #include namespace ripple { diff --git a/include/xrpl/protocol/STBase.h b/include/xrpl/protocol/STBase.h index eec9a97987..3f5a3b57ab 100644 --- a/include/xrpl/protocol/STBase.h +++ b/include/xrpl/protocol/STBase.h @@ -24,7 +24,6 @@ #include #include -#include #include #include #include diff --git a/include/xrpl/protocol/STBlob.h b/include/xrpl/protocol/STBlob.h index 80832b2688..374abd2a7c 100644 --- a/include/xrpl/protocol/STBlob.h +++ b/include/xrpl/protocol/STBlob.h @@ -27,7 +27,6 @@ #include #include -#include namespace ripple { diff --git a/include/xrpl/protocol/STValidation.h b/include/xrpl/protocol/STValidation.h index 11ec733c01..2aa74203a2 100644 --- a/include/xrpl/protocol/STValidation.h +++ b/include/xrpl/protocol/STValidation.h @@ -28,8 +28,6 @@ #include #include -#include -#include #include #include diff --git a/include/xrpl/protocol/Serializer.h b/include/xrpl/protocol/Serializer.h index 9c77aa4111..5ea4d3ca96 100644 --- a/include/xrpl/protocol/Serializer.h +++ b/include/xrpl/protocol/Serializer.h @@ -33,7 +33,6 @@ #include #include -#include #include namespace ripple { diff --git a/include/xrpl/protocol/Sign.h b/include/xrpl/protocol/Sign.h index 7e1156ceda..5aa9fabddc 100644 --- a/include/xrpl/protocol/Sign.h +++ b/include/xrpl/protocol/Sign.h @@ -25,8 +25,6 @@ #include #include -#include - namespace ripple { /** Sign an STObject diff --git a/include/xrpl/protocol/XChainAttestations.h b/include/xrpl/protocol/XChainAttestations.h index 721950ca9c..92fd04731d 100644 --- a/include/xrpl/protocol/XChainAttestations.h +++ b/include/xrpl/protocol/XChainAttestations.h @@ -35,8 +35,6 @@ #include #include -#include -#include #include namespace ripple { diff --git a/include/xrpl/protocol/detail/b58_utils.h b/include/xrpl/protocol/detail/b58_utils.h index 8fc85f390b..ecd301524f 100644 --- a/include/xrpl/protocol/detail/b58_utils.h +++ b/include/xrpl/protocol/detail/b58_utils.h @@ -27,7 +27,6 @@ #include #include -#include #include #include #include diff --git a/include/xrpl/protocol/digest.h b/include/xrpl/protocol/digest.h index efec616a0c..303fbafe4f 100644 --- a/include/xrpl/protocol/digest.h +++ b/include/xrpl/protocol/digest.h @@ -25,7 +25,6 @@ #include -#include #include namespace ripple { diff --git a/include/xrpl/protocol/json_get_or_throw.h b/include/xrpl/protocol/json_get_or_throw.h index c59b5a71a3..74d1779339 100644 --- a/include/xrpl/protocol/json_get_or_throw.h +++ b/include/xrpl/protocol/json_get_or_throw.h @@ -10,7 +10,6 @@ #include #include #include -#include namespace Json { struct JsonMissingKeyError : std::exception diff --git a/include/xrpl/resource/Charge.h b/include/xrpl/resource/Charge.h index a75ad32624..ead46ca31f 100644 --- a/include/xrpl/resource/Charge.h +++ b/include/xrpl/resource/Charge.h @@ -20,7 +20,6 @@ #ifndef RIPPLE_RESOURCE_CHARGE_H_INCLUDED #define RIPPLE_RESOURCE_CHARGE_H_INCLUDED -#include #include namespace ripple { diff --git a/include/xrpl/resource/Gossip.h b/include/xrpl/resource/Gossip.h index 6e2a86ecd7..3495de5b95 100644 --- a/include/xrpl/resource/Gossip.h +++ b/include/xrpl/resource/Gossip.h @@ -22,6 +22,8 @@ #include +#include + namespace ripple { namespace Resource { diff --git a/include/xrpl/server/detail/BaseHTTPPeer.h b/include/xrpl/server/detail/BaseHTTPPeer.h index 51ac866e1e..b065a97cf0 100644 --- a/include/xrpl/server/detail/BaseHTTPPeer.h +++ b/include/xrpl/server/detail/BaseHTTPPeer.h @@ -41,7 +41,6 @@ #include #include #include -#include #include namespace ripple { diff --git a/include/xrpl/server/detail/Door.h b/include/xrpl/server/detail/Door.h index 90de885579..88e19db8cd 100644 --- a/include/xrpl/server/detail/Door.h +++ b/include/xrpl/server/detail/Door.h @@ -37,10 +37,8 @@ #include #include -#include #include #include -#include namespace ripple { diff --git a/include/xrpl/server/detail/io_list.h b/include/xrpl/server/detail/io_list.h index fbf60c9a7f..fba8b28f87 100644 --- a/include/xrpl/server/detail/io_list.h +++ b/include/xrpl/server/detail/io_list.h @@ -26,7 +26,6 @@ #include #include #include -#include #include #include diff --git a/src/xrpld/app/misc/HashRouter.h b/src/xrpld/app/misc/HashRouter.h index a13bcb9f8f..d1d69623c1 100644 --- a/src/xrpld/app/misc/HashRouter.h +++ b/src/xrpld/app/misc/HashRouter.h @@ -27,6 +27,7 @@ #include #include +#include namespace ripple { diff --git a/src/xrpld/core/JobQueue.h b/src/xrpld/core/JobQueue.h index 051c298251..eda956c019 100644 --- a/src/xrpld/core/JobQueue.h +++ b/src/xrpld/core/JobQueue.h @@ -30,6 +30,8 @@ #include +#include + namespace ripple { namespace perf { diff --git a/src/xrpld/ledger/PaymentSandbox.h b/src/xrpld/ledger/PaymentSandbox.h index a41a0211a2..2cd31ea490 100644 --- a/src/xrpld/ledger/PaymentSandbox.h +++ b/src/xrpld/ledger/PaymentSandbox.h @@ -27,7 +27,6 @@ #include #include -#include namespace ripple { diff --git a/src/xrpld/shamap/SHAMap.h b/src/xrpld/shamap/SHAMap.h index 33c42c2d23..738cf96ecc 100644 --- a/src/xrpld/shamap/SHAMap.h +++ b/src/xrpld/shamap/SHAMap.h @@ -36,6 +36,7 @@ #include #include +#include #include #include From b8626ea3c66b05ed754f62ae89e46f3a0d91dcb3 Mon Sep 17 00:00:00 2001 From: Shawn Xie <35279399+shawnxie999@users.noreply.github.com> Date: Fri, 11 Jul 2025 13:50:03 -0400 Subject: [PATCH 074/244] Add MPT related txns into issuer's account history (#5530) Currently there is no easy way to track MPT related transactions for the issuer. This change allows MPT transactions to show up on issuer's AccountTx RPC (to align with how IOUs work). --- src/libxrpl/protocol/TxMeta.cpp | 12 +++++ src/test/app/TxQ_test.cpp | 34 -------------- src/test/jtx/envconfig.h | 5 ++ src/test/jtx/impl/envconfig.cpp | 33 +++++++++++++ src/test/rpc/AccountTx_test.cpp | 83 +++++++++++++++++++++++++++++++++ 5 files changed, 133 insertions(+), 34 deletions(-) diff --git a/src/libxrpl/protocol/TxMeta.cpp b/src/libxrpl/protocol/TxMeta.cpp index 2083fc8eaf..2343a6a794 100644 --- a/src/libxrpl/protocol/TxMeta.cpp +++ b/src/libxrpl/protocol/TxMeta.cpp @@ -185,6 +185,18 @@ TxMeta::getAffectedAccounts() const { auto issuer = lim->getIssuer(); + if (issuer.isNonZero()) + list.insert(issuer); + } + } + else if (field.getFName() == sfMPTokenIssuanceID) + { + auto mptID = + dynamic_cast const*>(&field); + if (mptID != nullptr) + { + auto issuer = MPTIssue(mptID->value()).getIssuer(); + if (issuer.isNonZero()) list.insert(issuer); } diff --git a/src/test/app/TxQ_test.cpp b/src/test/app/TxQ_test.cpp index 947640495d..d0965cc8ff 100644 --- a/src/test/app/TxQ_test.cpp +++ b/src/test/app/TxQ_test.cpp @@ -99,40 +99,6 @@ class TxQPosNegFlows_test : public beast::unit_test::suite return calcMedFeeLevel(feeLevel, feeLevel); } - static std::unique_ptr - makeConfig( - std::map extraTxQ = {}, - std::map extraVoting = {}) - { - auto p = test::jtx::envconfig(); - auto& section = p->section("transaction_queue"); - section.set("ledgers_in_queue", "2"); - section.set("minimum_queue_size", "2"); - section.set("min_ledgers_to_compute_size_limit", "3"); - section.set("max_ledger_counts_to_store", "100"); - section.set("retry_sequence_percent", "25"); - section.set("normal_consensus_increase_percent", "0"); - - for (auto const& [k, v] : extraTxQ) - section.set(k, v); - - // Some tests specify different fee settings that are enabled by - // a FeeVote - if (!extraVoting.empty()) - { - auto& votingSection = p->section("voting"); - for (auto const& [k, v] : extraVoting) - { - votingSection.set(k, v); - } - - // In order for the vote to occur, we must run as a validator - p->section("validation_seed") - .legacy("shUwVw52ofnCUX5m7kPTKzJdr4HEH"); - } - return p; - } - std::size_t initFee( jtx::Env& env, diff --git a/src/test/jtx/envconfig.h b/src/test/jtx/envconfig.h index f22c5743e7..432ef28ff6 100644 --- a/src/test/jtx/envconfig.h +++ b/src/test/jtx/envconfig.h @@ -127,6 +127,11 @@ addGrpcConfigWithSecureGateway( std::unique_ptr, std::string const& secureGateway); +std::unique_ptr +makeConfig( + std::map extraTxQ = {}, + std::map extraVoting = {}); + } // namespace jtx } // namespace test } // namespace ripple diff --git a/src/test/jtx/impl/envconfig.cpp b/src/test/jtx/impl/envconfig.cpp index dd9c735465..624036196d 100644 --- a/src/test/jtx/impl/envconfig.cpp +++ b/src/test/jtx/impl/envconfig.cpp @@ -140,6 +140,39 @@ addGrpcConfigWithSecureGateway( return cfg; } +std::unique_ptr +makeConfig( + std::map extraTxQ, + std::map extraVoting) +{ + auto p = test::jtx::envconfig(); + auto& section = p->section("transaction_queue"); + section.set("ledgers_in_queue", "2"); + section.set("minimum_queue_size", "2"); + section.set("min_ledgers_to_compute_size_limit", "3"); + section.set("max_ledger_counts_to_store", "100"); + section.set("retry_sequence_percent", "25"); + section.set("normal_consensus_increase_percent", "0"); + + for (auto const& [k, v] : extraTxQ) + section.set(k, v); + + // Some tests specify different fee settings that are enabled by + // a FeeVote + if (!extraVoting.empty()) + { + auto& votingSection = p->section("voting"); + for (auto const& [k, v] : extraVoting) + { + votingSection.set(k, v); + } + + // In order for the vote to occur, we must run as a validator + p->section("validation_seed").legacy("shUwVw52ofnCUX5m7kPTKzJdr4HEH"); + } + return p; +} + } // namespace jtx } // namespace test } // namespace ripple diff --git a/src/test/rpc/AccountTx_test.cpp b/src/test/rpc/AccountTx_test.cpp index 6e25c26e58..82809b5c5b 100644 --- a/src/test/rpc/AccountTx_test.cpp +++ b/src/test/rpc/AccountTx_test.cpp @@ -18,9 +18,12 @@ //============================================================================== #include +#include #include +#include #include +#include #include #include @@ -753,6 +756,85 @@ class AccountTx_test : public beast::unit_test::suite } } + void + testMPT() + { + testcase("MPT"); + + using namespace test::jtx; + using namespace std::chrono_literals; + + auto cfg = makeConfig(); + cfg->FEES.reference_fee = 10; + Env env(*this, std::move(cfg)); + + Account const alice{"alice"}; + Account const bob{"bob"}; + Account const carol{"carol"}; + + MPTTester mptAlice(env, alice, {.holders = {bob, carol}}); + + // check the latest mpt-related txn is in alice's account history + auto const checkAliceAcctTx = [&](size_t size, + Json::StaticString txType) { + Json::Value params; + params[jss::account] = alice.human(); + params[jss::limit] = 100; + auto const jv = + env.rpc("json", "account_tx", to_string(params))[jss::result]; + + BEAST_EXPECT(jv[jss::transactions].size() == size); + auto const& tx0(jv[jss::transactions][0u][jss::tx]); + BEAST_EXPECT(tx0[jss::TransactionType] == txType); + + std::string const txHash{ + env.tx()->getJson(JsonOptions::none)[jss::hash].asString()}; + BEAST_EXPECT(tx0[jss::hash] == txHash); + }; + + // alice creates issuance + mptAlice.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanClawback | tfMPTRequireAuth | tfMPTCanTransfer}); + + checkAliceAcctTx(3, jss::MPTokenIssuanceCreate); + + // bob creates a MPToken; + mptAlice.authorize({.account = bob}); + checkAliceAcctTx(4, jss::MPTokenAuthorize); + env.close(); + + // TODO: windows pipeline fails validation for the hardcoded ledger hash + // due to having different test config, it can be uncommented after + // figuring out what happened + // + // ledger hash should be fixed regardless any change to account history + // BEAST_EXPECT( + // to_string(env.closed()->info().hash) == + // "0BD507BB87D3C0E73B462485E6E381798A8C82FC49BF17FE39C60E08A1AF035D"); + + // alice authorizes bob + mptAlice.authorize({.account = alice, .holder = bob}); + checkAliceAcctTx(5, jss::MPTokenAuthorize); + + // carol creates a MPToken; + mptAlice.authorize({.account = carol}); + checkAliceAcctTx(6, jss::MPTokenAuthorize); + + // alice authorizes carol + mptAlice.authorize({.account = alice, .holder = carol}); + checkAliceAcctTx(7, jss::MPTokenAuthorize); + + // alice pays bob 100 tokens + mptAlice.pay(alice, bob, 100); + checkAliceAcctTx(8, jss::Payment); + + // bob pays carol 10 tokens + mptAlice.pay(bob, carol, 10); + checkAliceAcctTx(9, jss::Payment); + } + public: void run() override @@ -761,6 +843,7 @@ public: std::bind_front(&AccountTx_test::testParameters, this)); testContents(); testAccountDelete(); + testMPT(); } }; BEAST_DEFINE_TESTSUITE(AccountTx, rpc, ripple); From 258ba71363317742aa346fd516da5aabad01d51a Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Fri, 11 Jul 2025 19:57:09 +0100 Subject: [PATCH 075/244] chore: Add gcc-12 workaround (#5554) This change silences a dummy warning, which is breaking builds with GCC 12 (but not newer versions of GCC) in release mode only. --- include/xrpl/beast/utility/rngfill.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/xrpl/beast/utility/rngfill.h b/include/xrpl/beast/utility/rngfill.h index 0188e5c529..2b5a9ba040 100644 --- a/include/xrpl/beast/utility/rngfill.h +++ b/include/xrpl/beast/utility/rngfill.h @@ -48,8 +48,10 @@ rngfill(void* buffer, std::size_t bytes, Generator& g) #ifdef __GNUC__ // gcc 11.1 (falsely) warns about an array-bounds overflow in release mode. + // gcc 12.1 (also falsely) warns about an string overflow in release mode. #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Warray-bounds" +#pragma GCC diagnostic ignored "-Wstringop-overflow" #endif if (bytes > 0) From 8aa94ea09af1eef8df54b0b2b3a0373400a09b34 Mon Sep 17 00:00:00 2001 From: yinyiqian1 Date: Fri, 11 Jul 2025 16:03:28 -0400 Subject: [PATCH 076/244] fixAMMClawbackRounding: adjust last holder's LPToken balance (#5513) Due to rounding, the LPTokenBalance of the last LP might not match the LP's trustline balance. This was fixed for `AMMWithdraw` in `fixAMMv1_1` by adjusting the LPTokenBalance to be the same as the trustline balance. Since `AMMClawback` is also performing a withdrawal, we need to adjust LPTokenBalance as well in `AMMClawback.` This change includes: 1. Refactored `verifyAndAdjustLPTokenBalance` function in `AMMUtils`, which both`AMMWithdraw` and `AMMClawback` call to adjust LPTokenBalance. 2. Added the unit test `testLastHolderLPTokenBalance` to test the scenario. 3. Modify the existing unit tests for `fixAMMClawbackRounding`. --- include/xrpl/protocol/detail/features.macro | 1 + src/test/app/AMMClawback_test.cpp | 787 +++++++++++++++----- src/xrpld/app/misc/AMMUtils.h | 11 + src/xrpld/app/misc/detail/AMMUtils.cpp | 29 + src/xrpld/app/tx/detail/AMMClawback.cpp | 53 +- src/xrpld/app/tx/detail/AMMWithdraw.cpp | 17 +- 6 files changed, 693 insertions(+), 205 deletions(-) diff --git a/include/xrpl/protocol/detail/features.macro b/include/xrpl/protocol/detail/features.macro index 3584d8f8cf..93b4dedae3 100644 --- a/include/xrpl/protocol/detail/features.macro +++ b/include/xrpl/protocol/detail/features.macro @@ -35,6 +35,7 @@ // If you add an amendment here, then do not forget to increment `numFeatures` // in include/xrpl/protocol/Feature.h. +XRPL_FIX (AMMClawbackRounding, Supported::no, VoteBehavior::DefaultNo) XRPL_FEATURE(TokenEscrow, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (EnforceNFTokenTrustlineV2, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (AMMv1_3, Supported::yes, VoteBehavior::DefaultNo) diff --git a/src/test/app/AMMClawback_test.cpp b/src/test/app/AMMClawback_test.cpp index 77e908d5fe..83257f0755 100644 --- a/src/test/app/AMMClawback_test.cpp +++ b/src/test/app/AMMClawback_test.cpp @@ -17,26 +17,25 @@ #include #include -#include -#include +#include + +#include #include -#include - namespace ripple { namespace test { -class AMMClawback_test : public jtx::AMMTest +class AMMClawback_test : public beast::unit_test::suite { void - testInvalidRequest(FeatureBitset features) + testInvalidRequest() { testcase("test invalid request"); using namespace jtx; // Test if holder does not exist. { - Env env(*this, features); + Env env(*this); Account gw{"gateway"}; Account alice{"alice"}; env.fund(XRP(100000), gw, alice); @@ -47,8 +46,9 @@ class AMMClawback_test : public jtx::AMMTest env.close(); env.require(flags(gw, asfAllowTrustLineClawback)); + auto const USD = gw["USD"]; env.trust(USD(10000), alice); - env(pay(gw, alice, gw["USD"](100))); + env(pay(gw, alice, USD(100))); AMM amm(env, alice, XRP(100), USD(100)); env.close(); @@ -61,7 +61,7 @@ class AMMClawback_test : public jtx::AMMTest // Test if asset pair provided does not exist. This should // return terNO_AMM error. { - Env env(*this, features); + Env env(*this); Account gw{"gateway"}; Account alice{"alice"}; env.fund(XRP(100000), gw, alice); @@ -87,14 +87,14 @@ class AMMClawback_test : public jtx::AMMTest // The AMM account does not exist at all now. // It should return terNO_AMM error. - env(amm::ammClawback(gw, alice, USD, EUR, std::nullopt), + env(amm::ammClawback(gw, alice, USD, gw["EUR"], std::nullopt), ter(terNO_AMM)); } // Test if the issuer field and holder field is the same. This should // return temMALFORMED error. { - Env env(*this, features); + Env env(*this); Account gw{"gateway"}; Account alice{"alice"}; env.fund(XRP(10000), gw, alice); @@ -124,7 +124,7 @@ class AMMClawback_test : public jtx::AMMTest // Test if the Asset field matches the Account field. { - Env env(*this, features); + Env env(*this); Account gw{"gateway"}; Account alice{"alice"}; env.fund(XRP(10000), gw, alice); @@ -156,7 +156,7 @@ class AMMClawback_test : public jtx::AMMTest // Test if the Amount field matches the Asset field. { - Env env(*this, features); + Env env(*this); Account gw{"gateway"}; Account alice{"alice"}; env.fund(XRP(10000), gw, alice); @@ -189,7 +189,7 @@ class AMMClawback_test : public jtx::AMMTest // Test if the Amount is invalid, which is less than zero. { - Env env(*this, features); + Env env(*this); Account gw{"gateway"}; Account alice{"alice"}; env.fund(XRP(10000), gw, alice); @@ -230,7 +230,7 @@ class AMMClawback_test : public jtx::AMMTest // Test if the issuer did not set asfAllowTrustLineClawback, AMMClawback // transaction is prohibited. { - Env env(*this, features); + Env env(*this); Account gw{"gateway"}; Account alice{"alice"}; env.fund(XRP(10000), gw, alice); @@ -241,7 +241,7 @@ class AMMClawback_test : public jtx::AMMTest env.trust(USD(1000), alice); env(pay(gw, alice, USD(100))); env.close(); - env.require(balance(alice, gw["USD"](100))); + env.require(balance(alice, USD(100))); env.require(balance(gw, alice["USD"](-100))); // gw creates AMM pool of XRP/USD. @@ -255,7 +255,7 @@ class AMMClawback_test : public jtx::AMMTest // Test invalid flag. { - Env env(*this, features); + Env env(*this); Account gw{"gateway"}; Account alice{"alice"}; env.fund(XRP(10000), gw, alice); @@ -283,7 +283,7 @@ class AMMClawback_test : public jtx::AMMTest // Test if tfClawTwoAssets is set when the two assets in the AMM pool // are not issued by the same issuer. { - Env env(*this, features); + Env env(*this); Account gw{"gateway"}; Account alice{"alice"}; env.fund(XRP(10000), gw, alice); @@ -314,7 +314,7 @@ class AMMClawback_test : public jtx::AMMTest // Test clawing back XRP is being prohibited. { - Env env(*this, features); + Env env(*this); Account gw{"gateway"}; Account alice{"alice"}; env.fund(XRP(1000000), gw, alice); @@ -400,7 +400,7 @@ class AMMClawback_test : public jtx::AMMTest env(pay(gw, alice, USD(3000))); env.close(); env.require(balance(gw, alice["USD"](-3000))); - env.require(balance(alice, gw["USD"](3000))); + env.require(balance(alice, USD(3000))); // gw2 issues 3000 EUR to Alice. auto const EUR = gw2["EUR"]; @@ -408,7 +408,7 @@ class AMMClawback_test : public jtx::AMMTest env(pay(gw2, alice, EUR(3000))); env.close(); env.require(balance(gw2, alice["EUR"](-3000))); - env.require(balance(alice, gw2["EUR"](3000))); + env.require(balance(alice, EUR(3000))); // Alice creates AMM pool of EUR/USD. AMM amm(env, alice, EUR(1000), USD(2000), ter(tesSUCCESS)); @@ -426,13 +426,13 @@ class AMMClawback_test : public jtx::AMMTest // USD into the pool, then she has 1000 USD. And 1000 USD was clawed // back from the AMM pool, so she still has 1000 USD. env.require(balance(gw, alice["USD"](-1000))); - env.require(balance(alice, gw["USD"](1000))); + env.require(balance(alice, USD(1000))); // Alice's initial balance for EUR is 3000 EUR. Alice deposited 1000 // EUR into the pool, 500 EUR was withdrawn proportionally. So she // has 2500 EUR now. env.require(balance(gw2, alice["EUR"](-2500))); - env.require(balance(alice, gw2["EUR"](2500))); + env.require(balance(alice, EUR(2500))); // 1000 USD and 500 EUR was withdrawn from the AMM pool, so the // current balance is 1000 USD and 500 EUR. @@ -452,12 +452,12 @@ class AMMClawback_test : public jtx::AMMTest // Alice should still has 1000 USD because gw clawed back from the // AMM pool. env.require(balance(gw, alice["USD"](-1000))); - env.require(balance(alice, gw["USD"](1000))); + env.require(balance(alice, USD(1000))); // Alice should has 3000 EUR now because another 500 EUR was // withdrawn. env.require(balance(gw2, alice["EUR"](-3000))); - env.require(balance(alice, gw2["EUR"](3000))); + env.require(balance(alice, EUR(3000))); // amm is automatically deleted. BEAST_EXPECT(!amm.ammExists()); @@ -483,7 +483,7 @@ class AMMClawback_test : public jtx::AMMTest env(pay(gw, alice, USD(3000))); env.close(); env.require(balance(gw, alice["USD"](-3000))); - env.require(balance(alice, gw["USD"](3000))); + env.require(balance(alice, USD(3000))); // Alice creates AMM pool of XRP/USD. AMM amm(env, alice, XRP(1000), USD(2000), ter(tesSUCCESS)); @@ -503,11 +503,12 @@ class AMMClawback_test : public jtx::AMMTest // USD into the pool, then she has 1000 USD. And 1000 USD was clawed // back from the AMM pool, so she still has 1000 USD. env.require(balance(gw, alice["USD"](-1000))); - env.require(balance(alice, gw["USD"](1000))); + env.require(balance(alice, USD(1000))); // Alice will get 500 XRP back. BEAST_EXPECT( expectLedgerEntryRoot(env, alice, aliceXrpBalance + XRP(500))); + aliceXrpBalance = env.balance(alice, XRP); // 1000 USD and 500 XRP was withdrawn from the AMM pool, so the // current balance is 1000 USD and 500 XRP. @@ -527,11 +528,11 @@ class AMMClawback_test : public jtx::AMMTest // Alice should still has 1000 USD because gw clawed back from the // AMM pool. env.require(balance(gw, alice["USD"](-1000))); - env.require(balance(alice, gw["USD"](1000))); + env.require(balance(alice, USD(1000))); - // Alice will get another 1000 XRP back. + // Alice will get another 500 XRP back. BEAST_EXPECT( - expectLedgerEntryRoot(env, alice, aliceXrpBalance + XRP(1000))); + expectLedgerEntryRoot(env, alice, aliceXrpBalance + XRP(500))); // amm is automatically deleted. BEAST_EXPECT(!amm.ammExists()); @@ -568,14 +569,14 @@ class AMMClawback_test : public jtx::AMMTest env.trust(USD(100000), alice); env(pay(gw, alice, USD(6000))); env.close(); - env.require(balance(alice, gw["USD"](6000))); + env.require(balance(alice, USD(6000))); // gw2 issues 6000 EUR to Alice. auto const EUR = gw2["EUR"]; env.trust(EUR(100000), alice); env(pay(gw2, alice, EUR(6000))); env.close(); - env.require(balance(alice, gw2["EUR"](6000))); + env.require(balance(alice, EUR(6000))); // Alice creates AMM pool of EUR/USD AMM amm(env, alice, EUR(5000), USD(4000), ter(tesSUCCESS)); @@ -596,12 +597,12 @@ class AMMClawback_test : public jtx::AMMTest // Alice's initial balance for USD is 6000 USD. Alice deposited 4000 // USD into the pool, then she has 2000 USD. And 1000 USD was clawed // back from the AMM pool, so she still has 2000 USD. - env.require(balance(alice, gw["USD"](2000))); + env.require(balance(alice, USD(2000))); // Alice's initial balance for EUR is 6000 EUR. Alice deposited 5000 // EUR into the pool, 1250 EUR was withdrawn proportionally. So she // has 2500 EUR now. - env.require(balance(alice, gw2["EUR"](2250))); + env.require(balance(alice, EUR(2250))); // 1000 USD and 1250 EUR was withdrawn from the AMM pool, so the // current balance is 3000 USD and 3750 EUR. @@ -627,7 +628,7 @@ class AMMClawback_test : public jtx::AMMTest // Alice should still has 2000 USD because gw clawed back from the // AMM pool. - env.require(balance(alice, gw["USD"](2000))); + env.require(balance(alice, USD(2000))); if (!features[fixAMMv1_3]) BEAST_EXPECT(amm.expectBalances( @@ -650,23 +651,32 @@ class AMMClawback_test : public jtx::AMMTest env.close(); // Another 1 USD / 1.25 EUR was withdrawn. - env.require(balance(alice, gw["USD"](2000))); + env.require(balance(alice, USD(2000))); - if (!features[fixAMMv1_3]) + if (!features[fixAMMv1_3] && !features[fixAMMClawbackRounding]) BEAST_EXPECT(amm.expectBalances( STAmount{USD, UINT64_C(2499000000000002), -12}, STAmount{EUR, UINT64_C(3123750000000002), -12}, IOUAmount{2793966937885989, -12})); - else + else if (!features[fixAMMClawbackRounding]) BEAST_EXPECT(amm.expectBalances( USD(2499), EUR(3123.75), IOUAmount{2793966937885987, -12})); + else if (features[fixAMMClawbackRounding] && features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectBalances( + STAmount{USD, UINT64_C(2499000000000001), -12}, + STAmount{EUR, UINT64_C(3123750000000001), -12}, + IOUAmount{2793966937885988, -12})); - if (!features[fixAMMv1_3]) + if (!features[fixAMMv1_3] && !features[fixAMMClawbackRounding]) BEAST_EXPECT( env.balance(alice, EUR) == - STAmount(EUR, UINT64_C(2'876'249999999998), -12)); - else + STAmount(EUR, UINT64_C(2876'249999999998), -12)); + else if (!features[fixAMMClawbackRounding]) BEAST_EXPECT(env.balance(alice, EUR) == EUR(2876.25)); + else if (features[fixAMMClawbackRounding] && features[fixAMMv1_3]) + BEAST_EXPECT( + env.balance(alice, EUR) == + STAmount(EUR, UINT64_C(2876'249999999999), -12)); // gw clawback 4000 USD, exceeding the current balance. We // will clawback all. @@ -674,7 +684,7 @@ class AMMClawback_test : public jtx::AMMTest ter(tesSUCCESS)); env.close(); - env.require(balance(alice, gw["USD"](2000))); + env.require(balance(alice, USD(2000))); // All alice's EUR in the pool goes back to alice. BEAST_EXPECT( @@ -745,6 +755,7 @@ class AMMClawback_test : public jtx::AMMTest else BEAST_EXPECT(amm2.expectBalances( EUR(1000), XRP(3000), IOUAmount{1732050807568877, -9})); + amm2.deposit(alice, EUR(1000), XRP(3000)); if (!features[fixAMMv1_3]) BEAST_EXPECT(amm2.expectBalances( @@ -752,6 +763,7 @@ class AMMClawback_test : public jtx::AMMTest else BEAST_EXPECT(amm2.expectBalances( EUR(2000), XRP(6000), IOUAmount{3464101615137754, -9})); + amm2.deposit(bob, EUR(1000), XRP(3000)); if (!features[fixAMMv1_3]) BEAST_EXPECT(amm2.expectBalances( @@ -772,27 +784,42 @@ class AMMClawback_test : public jtx::AMMTest // Alice's initial balance for USD is 6000 USD. Alice deposited 1000 // USD into the pool, then she has 5000 USD. And 500 USD was clawed // back from the AMM pool, so she still has 5000 USD. - env.require(balance(alice, gw["USD"](5000))); + env.require(balance(alice, USD(5000))); // Bob's balance is not changed. - env.require(balance(bob, gw["USD"](4000))); + env.require(balance(bob, USD(4000))); // Alice gets 1000 XRP back. - BEAST_EXPECT( - expectLedgerEntryRoot(env, alice, aliceXrpBalance + XRP(1000))); + if (features[fixAMMClawbackRounding] && features[fixAMMv1_3]) + BEAST_EXPECT(expectLedgerEntryRoot( + env, alice, aliceXrpBalance + XRP(1000) - XRPAmount(1))); + else + BEAST_EXPECT(expectLedgerEntryRoot( + env, alice, aliceXrpBalance + XRP(1000))); + aliceXrpBalance = env.balance(alice, XRP); - if (!features[fixAMMv1_3]) + if (!features[fixAMMv1_3] && !features[fixAMMClawbackRounding]) BEAST_EXPECT(amm.expectBalances( USD(2500), XRP(5000), IOUAmount{3535533905932738, -9})); - else + else if (!features[fixAMMClawbackRounding]) BEAST_EXPECT(amm.expectBalances( USD(2500), XRP(5000), IOUAmount{3535533905932737, -9})); - if (!features[fixAMMv1_3]) + else if (features[fixAMMClawbackRounding] && features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectBalances( + USD(2500), + XRPAmount(5000000001), + IOUAmount{3'535'533'905932738, -9})); + + if (!features[fixAMMv1_3] && !features[fixAMMClawbackRounding]) BEAST_EXPECT(amm.expectLPTokens( alice, IOUAmount{7071067811865480, -10})); - else + else if (!features[fixAMMClawbackRounding]) BEAST_EXPECT(amm.expectLPTokens( alice, IOUAmount{7071067811865474, -10})); + else if (features[fixAMMClawbackRounding] && features[fixAMMv1_3]) + BEAST_EXPECT( + amm.expectLPTokens(alice, IOUAmount{707106781186548, -9})); + BEAST_EXPECT( amm.expectLPTokens(bob, IOUAmount{1414213562373095, -9})); @@ -800,50 +827,79 @@ class AMMClawback_test : public jtx::AMMTest env(amm::ammClawback(gw, bob, USD, XRP, USD(10)), ter(tesSUCCESS)); env.close(); - env.require(balance(alice, gw["USD"](5000))); - env.require(balance(bob, gw["USD"](4000))); + env.require(balance(alice, USD(5000))); + env.require(balance(bob, USD(4000))); // Bob gets 20 XRP back. BEAST_EXPECT( expectLedgerEntryRoot(env, bob, bobXrpBalance + XRP(20))); - if (!features[fixAMMv1_3]) + bobXrpBalance = env.balance(bob, XRP); + + if (!features[fixAMMv1_3] && !features[fixAMMClawbackRounding]) BEAST_EXPECT(amm.expectBalances( STAmount{USD, UINT64_C(2490000000000001), -12}, XRP(4980), IOUAmount{3521391770309008, -9})); - else + else if (!features[fixAMMClawbackRounding]) BEAST_EXPECT(amm.expectBalances( USD(2'490), XRP(4980), IOUAmount{3521391770309006, -9})); - if (!features[fixAMMv1_3]) + else if (features[fixAMMClawbackRounding] && features[fixAMMv1_3]) + BEAST_EXPECT(amm.expectBalances( + STAmount{USD, UINT64_C(2490000000000001), -12}, + XRPAmount(4980000001), + IOUAmount{3521391'770309008, -9})); + + if (!features[fixAMMv1_3] && !features[fixAMMClawbackRounding]) BEAST_EXPECT(amm.expectLPTokens( alice, IOUAmount{7071067811865480, -10})); - else + else if (!features[fixAMMClawbackRounding]) BEAST_EXPECT(amm.expectLPTokens( alice, IOUAmount{7071067811865474, -10})); - if (!features[fixAMMv1_3]) + else if (features[fixAMMClawbackRounding] && features[fixAMMv1_3]) + BEAST_EXPECT( + amm.expectLPTokens(alice, IOUAmount{707106781186548, -9})); + + if (!features[fixAMMv1_3] && !features[fixAMMClawbackRounding]) BEAST_EXPECT( amm.expectLPTokens(bob, IOUAmount{1400071426749365, -9})); - else + else if (!features[fixAMMClawbackRounding]) BEAST_EXPECT( amm.expectLPTokens(bob, IOUAmount{1400071426749364, -9})); + else if (features[fixAMMClawbackRounding] && features[fixAMMv1_3]) + BEAST_EXPECT( + amm.expectLPTokens(bob, IOUAmount{1400071426749365, -9})); // gw2 clawback 200 EUR from amm2. env(amm::ammClawback(gw2, alice, EUR, XRP, EUR(200)), ter(tesSUCCESS)); env.close(); - env.require(balance(alice, gw2["EUR"](4000))); - env.require(balance(bob, gw2["EUR"](3000))); + env.require(balance(alice, EUR(4000))); + env.require(balance(bob, EUR(3000))); - // Alice gets 600 XRP back. - BEAST_EXPECT(expectLedgerEntryRoot( - env, alice, aliceXrpBalance + XRP(1000) + XRP(600))); - if (!features[fixAMMv1_3]) + if (!features[fixAMMv1_3] && !features[fixAMMClawbackRounding]) + BEAST_EXPECT(expectLedgerEntryRoot( + env, alice, aliceXrpBalance + XRP(600))); + else if (!features[fixAMMClawbackRounding]) + BEAST_EXPECT(expectLedgerEntryRoot( + env, alice, aliceXrpBalance + XRP(600))); + else if (features[fixAMMClawbackRounding] && features[fixAMMv1_3]) + BEAST_EXPECT(expectLedgerEntryRoot( + env, alice, aliceXrpBalance + XRP(600) - XRPAmount{1})); + aliceXrpBalance = env.balance(alice, XRP); + + if (!features[fixAMMv1_3] && !features[fixAMMClawbackRounding]) BEAST_EXPECT(amm2.expectBalances( EUR(2800), XRP(8400), IOUAmount{4849742261192859, -9})); - else + else if (!features[fixAMMClawbackRounding]) BEAST_EXPECT(amm2.expectBalances( EUR(2800), XRP(8400), IOUAmount{4849742261192856, -9})); + else if (features[fixAMMv1_3] && features[fixAMMClawbackRounding]) + BEAST_EXPECT(amm2.expectBalances( + EUR(2800), + XRPAmount(8400000001), + IOUAmount{4849742261192856, -9})); + if (!features[fixAMMv1_3]) BEAST_EXPECT(amm2.expectLPTokens( alice, IOUAmount{1385640646055103, -9})); @@ -864,38 +920,47 @@ class AMMClawback_test : public jtx::AMMTest ter(tesSUCCESS)); env.close(); - env.require(balance(alice, gw["USD"](5000))); - env.require(balance(bob, gw["USD"](4000))); + env.require(balance(alice, USD(5000))); + env.require(balance(bob, USD(4000))); // Alice gets 1000 XRP back. - if (!features[fixAMMv1_3]) + if (!features[fixAMMv1_3] && !features[fixAMMClawbackRounding]) BEAST_EXPECT(expectLedgerEntryRoot( - env, - alice, - aliceXrpBalance + XRP(1000) + XRP(600) + XRP(1000))); - else + env, alice, aliceXrpBalance + XRP(1000))); + else if (!features[fixAMMClawbackRounding]) BEAST_EXPECT(expectLedgerEntryRoot( - env, - alice, - aliceXrpBalance + XRP(1000) + XRP(600) + XRP(1000) - - XRPAmount{1})); + env, alice, aliceXrpBalance + XRP(1000) - XRPAmount{1})); + else if (features[fixAMMv1_3] && features[fixAMMClawbackRounding]) + BEAST_EXPECT(expectLedgerEntryRoot( + env, alice, aliceXrpBalance + XRP(1000))); + aliceXrpBalance = env.balance(alice, XRP); + BEAST_EXPECT(amm.expectLPTokens(alice, IOUAmount(0))); - if (!features[fixAMMv1_3]) + if (!features[fixAMMv1_3] && !features[fixAMMClawbackRounding]) BEAST_EXPECT( amm.expectLPTokens(bob, IOUAmount{1400071426749365, -9})); - else + else if (!features[fixAMMClawbackRounding]) BEAST_EXPECT( amm.expectLPTokens(bob, IOUAmount{1400071426749364, -9})); - if (!features[fixAMMv1_3]) + else if (features[fixAMMClawbackRounding] && features[fixAMMv1_3]) + BEAST_EXPECT( + amm.expectLPTokens(bob, IOUAmount{1400071426749365, -9})); + + if (!features[fixAMMv1_3] && !features[fixAMMClawbackRounding]) BEAST_EXPECT(amm.expectBalances( STAmount{USD, UINT64_C(1990000000000001), -12}, XRP(3980), IOUAmount{2814284989122460, -9})); - else + else if (!features[fixAMMClawbackRounding]) BEAST_EXPECT(amm.expectBalances( USD(1'990), XRPAmount{3'980'000'001}, IOUAmount{2814284989122459, -9})); + else if (features[fixAMMv1_3] && features[fixAMMClawbackRounding]) + BEAST_EXPECT(amm.expectBalances( + STAmount{USD, UINT64_C(1990000000000001), -12}, + XRPAmount{3'980'000'001}, + IOUAmount{2814284989122460, -9})); // gw clawback 1000 USD from bob in amm, which also exceeds bob's // balance in amm. All bob's lptoken in amm will be consumed, which @@ -904,22 +969,14 @@ class AMMClawback_test : public jtx::AMMTest ter(tesSUCCESS)); env.close(); - env.require(balance(alice, gw["USD"](5000))); - env.require(balance(bob, gw["USD"](4000))); + env.require(balance(alice, USD(5000))); + env.require(balance(bob, USD(4000))); - if (!features[fixAMMv1_3]) - BEAST_EXPECT(expectLedgerEntryRoot( - env, - alice, - aliceXrpBalance + XRP(1000) + XRP(600) + XRP(1000))); - else - BEAST_EXPECT(expectLedgerEntryRoot( - env, - alice, - aliceXrpBalance + XRP(1000) + XRP(600) + XRP(1000) - - XRPAmount{1})); - BEAST_EXPECT(expectLedgerEntryRoot( - env, bob, bobXrpBalance + XRP(20) + XRP(1980))); + BEAST_EXPECT(expectLedgerEntryRoot(env, alice, aliceXrpBalance)); + + BEAST_EXPECT( + expectLedgerEntryRoot(env, bob, bobXrpBalance + XRP(1980))); + bobXrpBalance = env.balance(bob, XRP); // Now neither alice nor bob has any lptoken in amm. BEAST_EXPECT(amm.expectLPTokens(alice, IOUAmount(0))); @@ -932,35 +989,31 @@ class AMMClawback_test : public jtx::AMMTest ter(tesSUCCESS)); env.close(); - env.require(balance(alice, gw2["EUR"](4000))); - env.require(balance(bob, gw2["EUR"](3000))); + env.require(balance(alice, EUR(4000))); + env.require(balance(bob, EUR(3000))); // Alice gets another 2400 XRP back, bob's XRP balance remains the // same. - if (!features[fixAMMv1_3]) - BEAST_EXPECT(expectLedgerEntryRoot( - env, - alice, - aliceXrpBalance + XRP(1000) + XRP(600) + XRP(1000) + - XRP(2400))); - else - BEAST_EXPECT(expectLedgerEntryRoot( - env, - alice, - aliceXrpBalance + XRP(1000) + XRP(600) + XRP(1000) + - XRP(2400) - XRPAmount{1})); - BEAST_EXPECT(expectLedgerEntryRoot( - env, bob, bobXrpBalance + XRP(20) + XRP(1980))); + BEAST_EXPECT( + expectLedgerEntryRoot(env, alice, aliceXrpBalance + XRP(2400))); + + BEAST_EXPECT(expectLedgerEntryRoot(env, bob, bobXrpBalance)); + aliceXrpBalance = env.balance(alice, XRP); // Alice now does not have any lptoken in amm2 BEAST_EXPECT(amm2.expectLPTokens(alice, IOUAmount(0))); - if (!features[fixAMMv1_3]) + if (!features[fixAMMv1_3] && !features[fixAMMClawbackRounding]) BEAST_EXPECT(amm2.expectBalances( EUR(2000), XRP(6000), IOUAmount{3464101615137756, -9})); - else + else if (!features[fixAMMClawbackRounding]) BEAST_EXPECT(amm2.expectBalances( EUR(2000), XRP(6000), IOUAmount{3464101615137754, -9})); + else if (features[fixAMMv1_3] && features[fixAMMClawbackRounding]) + BEAST_EXPECT(amm2.expectBalances( + EUR(2000), + XRPAmount(6000000001), + IOUAmount{3464101615137754, -9})); // gw2 claw back 2000 EUR from bob in amm2, which exceeds bob's // balance. All bob's lptokens will be consumed, which corresponds @@ -969,36 +1022,32 @@ class AMMClawback_test : public jtx::AMMTest ter(tesSUCCESS)); env.close(); - env.require(balance(alice, gw2["EUR"](4000))); - env.require(balance(bob, gw2["EUR"](3000))); + env.require(balance(alice, EUR(4000))); + env.require(balance(bob, EUR(3000))); // Bob gets another 3000 XRP back. Alice's XRP balance remains the // same. - if (!features[fixAMMv1_3]) - BEAST_EXPECT(expectLedgerEntryRoot( - env, - alice, - aliceXrpBalance + XRP(1000) + XRP(600) + XRP(1000) + - XRP(2400))); - else - BEAST_EXPECT(expectLedgerEntryRoot( - env, - alice, - aliceXrpBalance + XRP(1000) + XRP(600) + XRP(1000) + - XRP(2400) - XRPAmount{1})); - BEAST_EXPECT(expectLedgerEntryRoot( - env, bob, bobXrpBalance + XRP(20) + XRP(1980) + XRP(3000))); + BEAST_EXPECT(expectLedgerEntryRoot(env, alice, aliceXrpBalance)); + + BEAST_EXPECT( + expectLedgerEntryRoot(env, bob, bobXrpBalance + XRP(3000))); + bobXrpBalance = env.balance(bob, XRP); // Neither alice nor bob has any lptoken in amm2 BEAST_EXPECT(amm2.expectLPTokens(alice, IOUAmount(0))); BEAST_EXPECT(amm2.expectLPTokens(bob, IOUAmount(0))); - if (!features[fixAMMv1_3]) + if (!features[fixAMMv1_3] && !features[fixAMMClawbackRounding]) BEAST_EXPECT(amm2.expectBalances( EUR(1000), XRP(3000), IOUAmount{1732050807568878, -9})); - else + else if (!features[fixAMMClawbackRounding]) BEAST_EXPECT(amm2.expectBalances( EUR(1000), XRP(3000), IOUAmount{1732050807568877, -9})); + else if (features[fixAMMv1_3] && features[fixAMMClawbackRounding]) + BEAST_EXPECT(amm2.expectBalances( + EUR(1000), + XRPAmount(3000000001), + IOUAmount{1732050807568877, -9})); } } @@ -1096,12 +1145,12 @@ class AMMClawback_test : public jtx::AMMTest BEAST_EXPECT(amm.expectLPTokens( carol, IOUAmount{1118033988749894, -12})); - env.require(balance(alice, gw["USD"](2000))); - env.require(balance(alice, gw2["EUR"](1000))); - env.require(balance(bob, gw["USD"](3000))); - env.require(balance(bob, gw2["EUR"](2500))); - env.require(balance(carol, gw["USD"](3000))); - env.require(balance(carol, gw2["EUR"](2750))); + env.require(balance(alice, USD(2000))); + env.require(balance(alice, EUR(1000))); + env.require(balance(bob, USD(3000))); + env.require(balance(bob, EUR(2500))); + env.require(balance(carol, USD(3000))); + env.require(balance(carol, EUR(2750))); // gw clawback all the bob's USD in amm. (2000 USD / 2500 EUR) env(amm::ammClawback(gw, bob, USD, EUR, std::nullopt), @@ -1134,8 +1183,8 @@ class AMMClawback_test : public jtx::AMMTest carol, IOUAmount{1118033988749894, -12})); // Bob will get 2500 EUR back. - env.require(balance(alice, gw["USD"](2000))); - env.require(balance(alice, gw2["EUR"](1000))); + env.require(balance(alice, USD(2000))); + env.require(balance(alice, EUR(1000))); BEAST_EXPECT( env.balance(bob, USD) == STAmount(USD, UINT64_C(3000000000000000), -12)); @@ -1148,8 +1197,8 @@ class AMMClawback_test : public jtx::AMMTest BEAST_EXPECT( env.balance(bob, EUR) == STAmount(EUR, UINT64_C(4999999999999999), -12)); - env.require(balance(carol, gw["USD"](3000))); - env.require(balance(carol, gw2["EUR"](2750))); + env.require(balance(carol, USD(3000))); + env.require(balance(carol, EUR(2750))); // gw2 clawback all carol's EUR in amm. (1000 USD / 1250 EUR) env(amm::ammClawback(gw2, carol, EUR, USD, std::nullopt), @@ -1180,8 +1229,8 @@ class AMMClawback_test : public jtx::AMMTest ter(tesSUCCESS)); env.close(); - env.require(balance(carol, gw2["EUR"](2750))); - env.require(balance(carol, gw["USD"](4000))); + env.require(balance(carol, EUR(2750))); + env.require(balance(carol, USD(4000))); BEAST_EXPECT(!amm.ammExists()); } @@ -1564,11 +1613,20 @@ class AMMClawback_test : public jtx::AMMTest // gw claws back 1000 USD from gw2. env(amm::ammClawback(gw, gw2, USD, EUR, USD(1000)), ter(tesSUCCESS)); env.close(); - BEAST_EXPECT(amm.expectBalances( - USD(5000), EUR(10000), IOUAmount{7071067811865475, -12})); + if (!features[fixAMMv1_3] || !features[fixAMMClawbackRounding]) + BEAST_EXPECT(amm.expectBalances( + USD(5000), EUR(10000), IOUAmount{7071067811865475, -12})); + else + BEAST_EXPECT(amm.expectBalances( + USD(5000), EUR(10000), IOUAmount{7071067811865474, -12})); BEAST_EXPECT(amm.expectLPTokens(gw, IOUAmount{1414213562373095, -12})); - BEAST_EXPECT(amm.expectLPTokens(gw2, IOUAmount{1414213562373095, -12})); + if (!features[fixAMMv1_3] || !features[fixAMMClawbackRounding]) + BEAST_EXPECT( + amm.expectLPTokens(gw2, IOUAmount{1414213562373095, -12})); + else + BEAST_EXPECT( + amm.expectLPTokens(gw2, IOUAmount{1414213562373094, -12})); BEAST_EXPECT( amm.expectLPTokens(alice, IOUAmount{4242640687119285, -12})); @@ -1580,22 +1638,37 @@ class AMMClawback_test : public jtx::AMMTest // gw2 claws back 1000 EUR from gw. env(amm::ammClawback(gw2, gw, EUR, USD, EUR(1000)), ter(tesSUCCESS)); env.close(); - if (!features[fixAMMv1_3]) + if (!features[fixAMMv1_3] && !features[fixAMMClawbackRounding]) BEAST_EXPECT(amm.expectBalances( USD(4500), STAmount(EUR, UINT64_C(9000000000000001), -12), IOUAmount{6363961030678928, -12})); - else + else if (!features[fixAMMClawbackRounding]) BEAST_EXPECT(amm.expectBalances( USD(4500), EUR(9000), IOUAmount{6363961030678928, -12})); + else if (features[fixAMMv1_3] && features[fixAMMClawbackRounding]) + BEAST_EXPECT(amm.expectBalances( + USD(4500), + STAmount(EUR, UINT64_C(9000000000000001), -12), + IOUAmount{6363961030678927, -12})); - if (!features[fixAMMv1_3]) + if (!features[fixAMMv1_3] && !features[fixAMMClawbackRounding]) BEAST_EXPECT( amm.expectLPTokens(gw, IOUAmount{7071067811865480, -13})); - else + else if (!features[fixAMMClawbackRounding]) BEAST_EXPECT( amm.expectLPTokens(gw, IOUAmount{7071067811865475, -13})); - BEAST_EXPECT(amm.expectLPTokens(gw2, IOUAmount{1414213562373095, -12})); + else if (features[fixAMMv1_3] && features[fixAMMClawbackRounding]) + BEAST_EXPECT( + amm.expectLPTokens(gw, IOUAmount{7071067811865480, -13})); + + if (!features[fixAMMv1_3] || !features[fixAMMClawbackRounding]) + BEAST_EXPECT( + amm.expectLPTokens(gw2, IOUAmount{1414213562373095, -12})); + else + BEAST_EXPECT( + amm.expectLPTokens(gw2, IOUAmount{1414213562373094, -12})); + BEAST_EXPECT( amm.expectLPTokens(alice, IOUAmount{4242640687119285, -12})); @@ -1607,22 +1680,36 @@ class AMMClawback_test : public jtx::AMMTest // gw2 claws back 4000 EUR from alice. env(amm::ammClawback(gw2, alice, EUR, USD, EUR(4000)), ter(tesSUCCESS)); env.close(); - if (!features[fixAMMv1_3]) + if (!features[fixAMMv1_3] && !features[fixAMMClawbackRounding]) BEAST_EXPECT(amm.expectBalances( USD(2500), STAmount(EUR, UINT64_C(5000000000000001), -12), IOUAmount{3535533905932738, -12})); - else + else if (!features[fixAMMClawbackRounding]) BEAST_EXPECT(amm.expectBalances( USD(2500), EUR(5000), IOUAmount{3535533905932738, -12})); + else if (features[fixAMMv1_3] && features[fixAMMClawbackRounding]) + BEAST_EXPECT(amm.expectBalances( + USD(2500), + STAmount(EUR, UINT64_C(5000000000000001), -12), + IOUAmount{3535533905932737, -12})); - if (!features[fixAMMv1_3]) + if (!features[fixAMMv1_3] && !features[fixAMMClawbackRounding]) BEAST_EXPECT( amm.expectLPTokens(gw, IOUAmount{7071067811865480, -13})); - else + else if (!features[fixAMMClawbackRounding]) BEAST_EXPECT( amm.expectLPTokens(gw, IOUAmount{7071067811865475, -13})); - BEAST_EXPECT(amm.expectLPTokens(gw2, IOUAmount{1414213562373095, -12})); + else if (features[fixAMMv1_3] && features[fixAMMClawbackRounding]) + BEAST_EXPECT( + amm.expectLPTokens(gw, IOUAmount{7071067811865480, -13})); + + if (!features[fixAMMv1_3] || !features[fixAMMClawbackRounding]) + BEAST_EXPECT( + amm.expectLPTokens(gw2, IOUAmount{1414213562373095, -12})); + else + BEAST_EXPECT( + amm.expectLPTokens(gw2, IOUAmount{1414213562373094, -12})); BEAST_EXPECT( amm.expectLPTokens(alice, IOUAmount{1414213562373095, -12})); @@ -1689,14 +1776,14 @@ class AMMClawback_test : public jtx::AMMTest env.trust(USD(100000), alice); env(pay(gw, alice, USD(3000))); env.close(); - env.require(balance(alice, gw["USD"](3000))); + env.require(balance(alice, USD(3000))); // gw2 issues 3000 EUR to Alice. auto const EUR = gw2["EUR"]; env.trust(EUR(100000), alice); env(pay(gw2, alice, EUR(3000))); env.close(); - env.require(balance(alice, gw2["EUR"](3000))); + env.require(balance(alice, EUR(3000))); // Alice creates AMM pool of EUR/USD. AMM amm(env, alice, EUR(1000), USD(2000), ter(tesSUCCESS)); @@ -1714,8 +1801,8 @@ class AMMClawback_test : public jtx::AMMTest ter(tesSUCCESS)); env.close(); - env.require(balance(alice, gw["USD"](1000))); - env.require(balance(alice, gw2["EUR"](2500))); + env.require(balance(alice, USD(1000))); + env.require(balance(alice, EUR(2500))); BEAST_EXPECT(amm.expectBalances( USD(1000), EUR(500), IOUAmount{7071067811865475, -13})); @@ -1731,8 +1818,8 @@ class AMMClawback_test : public jtx::AMMTest // Alice should still has 1000 USD because gw clawed back from the // AMM pool. - env.require(balance(alice, gw["USD"](1000))); - env.require(balance(alice, gw2["EUR"](3000))); + env.require(balance(alice, USD(1000))); + env.require(balance(alice, EUR(3000))); // amm is automatically deleted. BEAST_EXPECT(!amm.ammExists()); @@ -1757,14 +1844,14 @@ class AMMClawback_test : public jtx::AMMTest env.trust(USD(100000), alice); env(pay(gw, alice, USD(3000))); env.close(); - env.require(balance(alice, gw["USD"](3000))); + env.require(balance(alice, USD(3000))); // gw2 issues 3000 EUR to Alice. auto const EUR = gw2["EUR"]; env.trust(EUR(100000), alice); env(pay(gw2, alice, EUR(3000))); env.close(); - env.require(balance(alice, gw2["EUR"](3000))); + env.require(balance(alice, EUR(3000))); // Alice creates AMM pool of EUR/USD. AMM amm(env, alice, EUR(1000), USD(2000), ter(tesSUCCESS)); @@ -1783,8 +1870,8 @@ class AMMClawback_test : public jtx::AMMTest ter(tesSUCCESS)); env.close(); - env.require(balance(alice, gw["USD"](1000))); - env.require(balance(alice, gw2["EUR"](2500))); + env.require(balance(alice, USD(1000))); + env.require(balance(alice, EUR(2500))); BEAST_EXPECT(amm.expectBalances( USD(1000), EUR(500), IOUAmount{7071067811865475, -13})); BEAST_EXPECT( @@ -1810,14 +1897,14 @@ class AMMClawback_test : public jtx::AMMTest env.trust(USD(100000), alice); env(pay(gw, alice, USD(3000))); env.close(); - env.require(balance(alice, gw["USD"](3000))); + env.require(balance(alice, USD(3000))); // gw2 issues 3000 EUR to Alice. auto const EUR = gw2["EUR"]; env.trust(EUR(100000), alice); env(pay(gw2, alice, EUR(3000))); env.close(); - env.require(balance(alice, gw2["EUR"](3000))); + env.require(balance(alice, EUR(3000))); // Alice creates AMM pool of EUR/USD. AMM amm(env, alice, EUR(1000), USD(2000), ter(tesSUCCESS)); @@ -1835,8 +1922,8 @@ class AMMClawback_test : public jtx::AMMTest ter(tesSUCCESS)); env.close(); - env.require(balance(alice, gw["USD"](1000))); - env.require(balance(alice, gw2["EUR"](2500))); + env.require(balance(alice, USD(1000))); + env.require(balance(alice, EUR(2500))); BEAST_EXPECT(amm.expectBalances( USD(1000), EUR(500), IOUAmount{7071067811865475, -13})); BEAST_EXPECT( @@ -1975,10 +2062,11 @@ class AMMClawback_test : public jtx::AMMTest { testcase("test single depoit and clawback"); using namespace jtx; + std::string logs; // Test AMMClawback for USD/XRP pool. Claw back USD, and XRP goes back // to the holder. - Env env(*this, features); + Env env(*this, features, std::make_unique(&logs)); Account gw{"gateway"}; Account alice{"alice"}; env.fund(XRP(1000000000), gw, alice); @@ -1994,7 +2082,7 @@ class AMMClawback_test : public jtx::AMMTest env.trust(USD(100000), alice); env(pay(gw, alice, USD(1000))); env.close(); - env.require(balance(alice, gw["USD"](1000))); + env.require(balance(alice, USD(1000))); // gw creates AMM pool of XRP/USD. AMM amm(env, gw, XRP(100), USD(400), ter(tesSUCCESS)); @@ -2032,26 +2120,349 @@ class AMMClawback_test : public jtx::AMMTest env, alice, aliceXrpBalance + XRP(29.289321))); } + void + testLastHolderLPTokenBalance(FeatureBitset features) + { + testcase( + "test last holder's lptoken balance not equal to AMM's lptoken " + "balance before clawback"); + using namespace jtx; + std::string logs; + + auto setupAccounts = + [&](Env& env, Account& gw, Account& alice, Account& bob) { + env.fund(XRP(100000), gw, alice, bob); + env.close(); + env(fset(gw, asfAllowTrustLineClawback)); + env.close(); + + auto const USD = gw["USD"]; + env.trust(USD(100000), alice); + env(pay(gw, alice, USD(50000))); + env.trust(USD(100000), bob); + env(pay(gw, bob, USD(40000))); + env.close(); + + return USD; + }; + + auto getLPTokenBalances = + [&](auto& env, + auto const& amm, + auto const& account) -> std::pair { + auto const lpToken = + getAccountLines( + env, account, amm.lptIssue())[jss::lines][0u][jss::balance] + .asString(); + auto const lpTokenBalance = + amm.ammRpcInfo()[jss::amm][jss::lp_token][jss::value] + .asString(); + return {lpToken, lpTokenBalance}; + }; + + // IOU/XRP pool. AMMClawback almost last holder's USD balance + { + Env env(*this, features, std::make_unique(&logs)); + Account gw{"gateway"}, alice{"alice"}, bob{"bob"}; + auto const USD = setupAccounts(env, gw, alice, bob); + + AMM amm(env, alice, XRP(2), USD(1)); + amm.deposit(alice, IOUAmount{1'876123487565916, -15}); + amm.deposit(bob, IOUAmount{1'000'000}); + amm.withdraw(alice, IOUAmount{1'876123487565916, -15}); + amm.withdrawAll(bob); + + auto [lpToken, lpTokenBalance] = + getLPTokenBalances(env, amm, alice); + BEAST_EXPECT( + lpToken == "1414.21356237366" && + lpTokenBalance == "1414.213562374"); + + auto res = + isOnlyLiquidityProvider(*env.current(), amm.lptIssue(), alice); + BEAST_EXPECT(res && res.value()); + + if (!features[fixAMMClawbackRounding] || !features[fixAMMv1_3]) + { + env(amm::ammClawback(gw, alice, USD, XRP, USD(1)), + ter(tecAMM_BALANCE)); + BEAST_EXPECT(amm.ammExists()); + } + else + { + auto const lpBalance = IOUAmount{989, -12}; + env(amm::ammClawback(gw, alice, USD, XRP, USD(1))); + BEAST_EXPECT(amm.expectBalances( + STAmount(USD, UINT64_C(7000000000000000), -28), + XRPAmount(1), + lpBalance)); + BEAST_EXPECT(amm.expectLPTokens(alice, lpBalance)); + } + } + + // IOU/XRP pool. AMMClawback part of last holder's USD balance + { + Env env(*this, features, std::make_unique(&logs)); + Account gw{"gateway"}, alice{"alice"}, bob{"bob"}; + auto const USD = setupAccounts(env, gw, alice, bob); + + AMM amm(env, alice, XRP(2), USD(1)); + amm.deposit(alice, IOUAmount{1'876123487565916, -15}); + amm.deposit(bob, IOUAmount{1'000'000}); + amm.withdrawAll(bob); + + auto [lpToken, lpTokenBalance] = + getLPTokenBalances(env, amm, alice); + BEAST_EXPECT( + lpToken == "1416.08968586066" && + lpTokenBalance == "1416.089685861"); + + auto res = + isOnlyLiquidityProvider(*env.current(), amm.lptIssue(), alice); + BEAST_EXPECT(res && res.value()); + + env(amm::ammClawback(gw, alice, USD, XRP, USD(0.5))); + + if (!features[fixAMMv1_3] && !features[fixAMMClawbackRounding]) + { + BEAST_EXPECT(amm.expectBalances( + STAmount(USD, UINT64_C(5013266196406), -13), + XRPAmount(1002653), + IOUAmount{708'9829046744236, -13})); + } + else if (!features[fixAMMClawbackRounding]) + { + BEAST_EXPECT(amm.expectBalances( + STAmount(USD, UINT64_C(5013266196407), -13), + XRPAmount(1002654), + IOUAmount{708'9829046744941, -13})); + } + else if (features[fixAMMv1_3] && features[fixAMMClawbackRounding]) + { + auto const lpBalance = IOUAmount{708'9829046743238, -13}; + BEAST_EXPECT(amm.expectBalances( + STAmount(USD, UINT64_C(5013266196406999), -16), + XRPAmount(1002655), + lpBalance)); + BEAST_EXPECT(amm.expectLPTokens(alice, lpBalance)); + } + } + + // IOU/XRP pool. AMMClawback all of last holder's USD balance + { + Env env(*this, features, std::make_unique(&logs)); + Account gw{"gateway"}, alice{"alice"}, bob{"bob"}; + auto const USD = setupAccounts(env, gw, alice, bob); + + AMM amm(env, alice, XRP(2), USD(1)); + amm.deposit(alice, IOUAmount{1'876123487565916, -15}); + amm.deposit(bob, IOUAmount{1'000'000}); + amm.withdraw(alice, IOUAmount{1'876123487565916, -15}); + amm.withdrawAll(bob); + + auto [lpToken, lpTokenBalance] = + getLPTokenBalances(env, amm, alice); + BEAST_EXPECT( + lpToken == "1414.21356237366" && + lpTokenBalance == "1414.213562374"); + + auto res = + isOnlyLiquidityProvider(*env.current(), amm.lptIssue(), alice); + BEAST_EXPECT(res && res.value()); + + if (!features[fixAMMClawbackRounding] && !features[fixAMMv1_3]) + { + env(amm::ammClawback(gw, alice, USD, XRP, std::nullopt), + ter(tecAMM_BALANCE)); + } + else if (!features[fixAMMClawbackRounding]) + { + env(amm::ammClawback(gw, alice, USD, XRP, std::nullopt)); + BEAST_EXPECT(amm.expectBalances( + STAmount(USD, UINT64_C(2410000000000000), -28), + XRPAmount(1), + IOUAmount{34, -11})); + } + else if (features[fixAMMv1_3] && features[fixAMMClawbackRounding]) + { + env(amm::ammClawback(gw, alice, USD, XRP, std::nullopt)); + BEAST_EXPECT(!amm.ammExists()); + } + } + + // IOU/IOU pool, different issuers + { + Env env(*this, features, std::make_unique(&logs)); + Account gw{"gateway"}, alice{"alice"}, bob{"bob"}; + auto const USD = setupAccounts(env, gw, alice, bob); + + Account gw2{"gateway2"}; + env.fund(XRP(100000), gw2); + env.close(); + auto const EUR = gw2["EUR"]; + env.trust(EUR(100000), alice); + env(pay(gw2, alice, EUR(50000))); + env.trust(EUR(100000), bob); + env(pay(gw2, bob, EUR(50000))); + env.close(); + + AMM amm(env, alice, USD(2), EUR(1)); + amm.deposit(alice, IOUAmount{1'576123487565916, -15}); + amm.deposit(bob, IOUAmount{1'000}); + amm.withdraw(alice, IOUAmount{1'576123487565916, -15}); + amm.withdrawAll(bob); + + auto [lpToken, lpTokenBalance] = + getLPTokenBalances(env, amm, alice); + BEAST_EXPECT( + lpToken == "1.414213562374011" && + lpTokenBalance == "1.414213562374"); + + auto res = + isOnlyLiquidityProvider(*env.current(), amm.lptIssue(), alice); + BEAST_EXPECT(res && res.value()); + + if (features[fixAMMv1_3] && features[fixAMMClawbackRounding]) + { + env(amm::ammClawback(gw, alice, USD, EUR, std::nullopt)); + BEAST_EXPECT(!amm.ammExists()); + } + else + { + env(amm::ammClawback(gw, alice, USD, EUR, std::nullopt), + ter(tecINTERNAL)); + BEAST_EXPECT(amm.ammExists()); + } + } + + // IOU/IOU pool, same issuer + { + Env env(*this, features, std::make_unique(&logs)); + Account gw{"gateway"}, alice{"alice"}, bob{"bob"}; + auto const USD = setupAccounts(env, gw, alice, bob); + + auto const EUR = gw["EUR"]; + env.trust(EUR(100000), alice); + env(pay(gw, alice, EUR(50000))); + env.trust(EUR(100000), bob); + env(pay(gw, bob, EUR(50000))); + env.close(); + + AMM amm(env, alice, USD(1), EUR(2)); + amm.deposit(alice, IOUAmount{1'076123487565916, -15}); + amm.deposit(bob, IOUAmount{1'000}); + amm.withdraw(alice, IOUAmount{1'076123487565916, -15}); + amm.withdrawAll(bob); + + auto [lpToken, lpTokenBalance] = + getLPTokenBalances(env, amm, alice); + BEAST_EXPECT( + lpToken == "1.414213562374011" && + lpTokenBalance == "1.414213562374"); + + auto res = + isOnlyLiquidityProvider(*env.current(), amm.lptIssue(), alice); + BEAST_EXPECT(res && res.value()); + + if (features[fixAMMClawbackRounding]) + { + env(amm::ammClawback(gw, alice, USD, EUR, std::nullopt), + txflags(tfClawTwoAssets)); + BEAST_EXPECT(!amm.ammExists()); + } + else + { + env(amm::ammClawback(gw, alice, USD, EUR, std::nullopt), + txflags(tfClawTwoAssets), + ter(tecINTERNAL)); + BEAST_EXPECT(amm.ammExists()); + } + } + + // IOU/IOU pool, larger asset ratio + { + Env env(*this, features, std::make_unique(&logs)); + Account gw{"gateway"}, alice{"alice"}, bob{"bob"}; + auto const USD = setupAccounts(env, gw, alice, bob); + + auto const EUR = gw["EUR"]; + env.trust(EUR(1000000000), alice); + env(pay(gw, alice, EUR(500000000))); + env.trust(EUR(1000000000), bob); + env(pay(gw, bob, EUR(500000000))); + env.close(); + + AMM amm(env, alice, USD(1), EUR(2000000)); + amm.deposit(alice, IOUAmount{1'076123487565916, -12}); + amm.deposit(bob, IOUAmount{10000}); + amm.withdraw(alice, IOUAmount{1'076123487565916, -12}); + amm.withdrawAll(bob); + + auto [lpToken, lpTokenBalance] = + getLPTokenBalances(env, amm, alice); + + BEAST_EXPECT( + lpToken == "1414.213562373101" && + lpTokenBalance == "1414.2135623731"); + + auto res = + isOnlyLiquidityProvider(*env.current(), amm.lptIssue(), alice); + BEAST_EXPECT(res && res.value()); + + if (!features[fixAMMClawbackRounding] && !features[fixAMMv1_3]) + { + env(amm::ammClawback(gw, alice, USD, EUR, USD(1))); + BEAST_EXPECT(amm.expectBalances( + STAmount(USD, UINT64_C(4), -15), + STAmount(EUR, UINT64_C(8), -9), + IOUAmount{6, -12})); + } + else if (!features[fixAMMClawbackRounding]) + { + // sqrt(amount * amount2) >= LPTokens and exceeds the allowed + // tolerance + env(amm::ammClawback(gw, alice, USD, EUR, USD(1)), + ter(tecINVARIANT_FAILED)); + BEAST_EXPECT(amm.ammExists()); + } + else if (features[fixAMMv1_3] && features[fixAMMClawbackRounding]) + { + env(amm::ammClawback(gw, alice, USD, EUR, USD(1)), + txflags(tfClawTwoAssets)); + auto const lpBalance = IOUAmount{5, -12}; + BEAST_EXPECT(amm.expectBalances( + STAmount(USD, UINT64_C(4), -15), + STAmount(EUR, UINT64_C(8), -9), + lpBalance)); + BEAST_EXPECT(amm.expectLPTokens(alice, lpBalance)); + } + } + } + void run() override { - FeatureBitset const all{jtx::supported_amendments()}; - testInvalidRequest(all); + FeatureBitset const all{ + jtx::supported_amendments() | fixAMMClawbackRounding}; + + testInvalidRequest(); testFeatureDisabled(all - featureAMMClawback); - testAMMClawbackSpecificAmount(all); - testAMMClawbackExceedBalance(all); - testAMMClawbackExceedBalance(all - fixAMMv1_3); - testAMMClawbackAll(all); - testAMMClawbackAll(all - fixAMMv1_3); - testAMMClawbackSameIssuerAssets(all); - testAMMClawbackSameIssuerAssets(all - fixAMMv1_3); - testAMMClawbackSameCurrency(all); - testAMMClawbackIssuesEachOther(all); - testNotHoldingLptoken(all); - testAssetFrozen(all); - testAssetFrozen(all - fixAMMv1_3); - testSingleDepositAndClawback(all); - testSingleDepositAndClawback(all - fixAMMv1_3); + for (auto const& features : + {all - fixAMMv1_3 - fixAMMClawbackRounding, + all - fixAMMClawbackRounding, + all}) + { + testAMMClawbackSpecificAmount(features); + testAMMClawbackExceedBalance(features); + testAMMClawbackAll(features); + testAMMClawbackSameIssuerAssets(features); + testAMMClawbackSameCurrency(features); + testAMMClawbackIssuesEachOther(features); + testNotHoldingLptoken(features); + testAssetFrozen(features); + testSingleDepositAndClawback(features); + testLastHolderLPTokenBalance(features); + } } }; BEAST_DEFINE_TESTSUITE(AMMClawback, app, ripple); diff --git a/src/xrpld/app/misc/AMMUtils.h b/src/xrpld/app/misc/AMMUtils.h index b2c0007dc7..2a9f82ae60 100644 --- a/src/xrpld/app/misc/AMMUtils.h +++ b/src/xrpld/app/misc/AMMUtils.h @@ -125,6 +125,17 @@ isOnlyLiquidityProvider( Issue const& ammIssue, AccountID const& lpAccount); +/** Due to rounding, the LPTokenBalance of the last LP might + * not match the LP's trustline balance. If it's within the tolerance, + * update LPTokenBalance to match the LP's trustline balance. + */ +Expected +verifyAndAdjustLPTokenBalance( + Sandbox& sb, + STAmount const& lpTokens, + std::shared_ptr& ammSle, + AccountID const& account); + } // namespace ripple #endif // RIPPLE_APP_MISC_AMMUTILS_H_INCLUDED diff --git a/src/xrpld/app/misc/detail/AMMUtils.cpp b/src/xrpld/app/misc/detail/AMMUtils.cpp index ba4c741300..b56ce2748e 100644 --- a/src/xrpld/app/misc/detail/AMMUtils.cpp +++ b/src/xrpld/app/misc/detail/AMMUtils.cpp @@ -17,6 +17,7 @@ */ //============================================================================== +#include #include #include @@ -464,4 +465,32 @@ isOnlyLiquidityProvider( return Unexpected(tecINTERNAL); // LCOV_EXCL_LINE } +Expected +verifyAndAdjustLPTokenBalance( + Sandbox& sb, + STAmount const& lpTokens, + std::shared_ptr& ammSle, + AccountID const& account) +{ + if (auto const res = isOnlyLiquidityProvider(sb, lpTokens.issue(), account); + !res) + return Unexpected(res.error()); + else if (res.value()) + { + if (withinRelativeDistance( + lpTokens, + ammSle->getFieldAmount(sfLPTokenBalance), + Number{1, -3})) + { + ammSle->setFieldAmount(sfLPTokenBalance, lpTokens); + sb.update(ammSle); + } + else + { + return Unexpected(tecAMM_INVALID_TOKENS); + } + } + return true; +} + } // namespace ripple diff --git a/src/xrpld/app/tx/detail/AMMClawback.cpp b/src/xrpld/app/tx/detail/AMMClawback.cpp index 64a42374ec..07c5151727 100644 --- a/src/xrpld/app/tx/detail/AMMClawback.cpp +++ b/src/xrpld/app/tx/detail/AMMClawback.cpp @@ -151,6 +151,20 @@ AMMClawback::applyGuts(Sandbox& sb) if (!accountSle) return tecINTERNAL; // LCOV_EXCL_LINE + if (sb.rules().enabled(fixAMMClawbackRounding)) + { + // retrieve LP token balance inside the amendment gate to avoid + // inconsistent error behavior + auto const lpTokenBalance = ammLPHolds(sb, *ammSle, holder, j_); + if (lpTokenBalance == beast::zero) + return tecAMM_BALANCE; + + if (auto const res = verifyAndAdjustLPTokenBalance( + sb, lpTokenBalance, ammSle, holder); + !res) + return res.error(); // LCOV_EXCL_LINE + } + auto const expected = ammHolds( sb, *ammSle, @@ -248,10 +262,11 @@ AMMClawback::equalWithdrawMatchingOneAmount( STAmount const& amount) { auto frac = Number{amount} / amountBalance; - auto const amount2Withdraw = amount2Balance * frac; + auto amount2Withdraw = amount2Balance * frac; auto const lpTokensWithdraw = toSTAmount(lptAMMBalance.issue(), lptAMMBalance * frac); + if (lpTokensWithdraw > holdLPtokens) // if lptoken balance less than what the issuer intended to clawback, // clawback all the tokens. Because we are doing a two-asset withdrawal, @@ -272,6 +287,42 @@ AMMClawback::equalWithdrawMatchingOneAmount( mPriorBalance, ctx_.journal); + auto const& rules = sb.rules(); + if (rules.enabled(fixAMMClawbackRounding)) + { + auto tokensAdj = + getRoundedLPTokens(rules, lptAMMBalance, frac, IsDeposit::No); + + // LCOV_EXCL_START + if (tokensAdj == beast::zero) + return { + tecAMM_INVALID_TOKENS, STAmount{}, STAmount{}, std::nullopt}; + // LCOV_EXCL_STOP + + frac = adjustFracByTokens(rules, lptAMMBalance, tokensAdj, frac); + auto amount2Rounded = + getRoundedAsset(rules, amount2Balance, frac, IsDeposit::No); + + auto amountRounded = + getRoundedAsset(rules, amountBalance, frac, IsDeposit::No); + + return AMMWithdraw::withdraw( + sb, + ammSle, + ammAccount, + holder, + amountBalance, + amountRounded, + amount2Rounded, + lptAMMBalance, + tokensAdj, + 0, + FreezeHandling::fhIGNORE_FREEZE, + WithdrawAll::No, + mPriorBalance, + ctx_.journal); + } + // Because we are doing a two-asset withdrawal, // tfee is actually not used, so pass tfee as 0. return AMMWithdraw::withdraw( diff --git a/src/xrpld/app/tx/detail/AMMWithdraw.cpp b/src/xrpld/app/tx/detail/AMMWithdraw.cpp index 69243f3f48..2ad1a19df5 100644 --- a/src/xrpld/app/tx/detail/AMMWithdraw.cpp +++ b/src/xrpld/app/tx/detail/AMMWithdraw.cpp @@ -311,24 +311,9 @@ AMMWithdraw::applyGuts(Sandbox& sb) if (sb.rules().enabled(fixAMMv1_1)) { if (auto const res = - isOnlyLiquidityProvider(sb, lpTokens.issue(), account_); + verifyAndAdjustLPTokenBalance(sb, lpTokens, ammSle, account_); !res) return {res.error(), false}; - else if (res.value()) - { - if (withinRelativeDistance( - lpTokens, - ammSle->getFieldAmount(sfLPTokenBalance), - Number{1, -3})) - { - ammSle->setFieldAmount(sfLPTokenBalance, lpTokens); - sb.update(ammSle); - } - else - { - return {tecAMM_INVALID_TOKENS, false}; - } - } } auto const tfee = getTradingFee(ctx_.view(), *ammSle, account_); From 452263eaa552ce84f5b326e86877fc3daf1feb91 Mon Sep 17 00:00:00 2001 From: Michael Legleux Date: Tue, 15 Jul 2025 15:17:22 -0700 Subject: [PATCH 077/244] chore: Update CI to use Conan 2 (#5556) This is a minimally invasive update to use Conan 2 provided by our new build images. --- .github/actions/dependencies/action.yml | 43 +++--- .github/workflows/clang-format.yml | 20 +-- .github/workflows/macos.yml | 45 ++++-- .github/workflows/nix.yml | 197 ++++++++++-------------- .github/workflows/windows.yml | 52 +++++-- .pre-commit-config.yaml | 2 +- conan/profiles/libxrpl | 23 +++ conanfile.py | 20 ++- tests/conan/CMakeLists.txt | 2 +- tests/conan/conanfile.py | 61 +++----- 10 files changed, 237 insertions(+), 228 deletions(-) create mode 100644 conan/profiles/libxrpl diff --git a/.github/actions/dependencies/action.yml b/.github/actions/dependencies/action.yml index afce1557d3..731e3e862f 100644 --- a/.github/actions/dependencies/action.yml +++ b/.github/actions/dependencies/action.yml @@ -6,36 +6,33 @@ inputs: runs: using: composite steps: - - name: unlock Conan - shell: bash - run: conan remove --locks - name: export custom recipes shell: bash run: | - conan config set general.revisions_enabled=1 - conan export external/snappy snappy/1.1.10@ - conan export external/rocksdb rocksdb/9.7.3@ - conan export external/soci soci/4.0.3@ - conan export external/nudb nudb/2.0.8@ + conan export --version 1.1.10 external/snappy + conan export --version 9.7.3 external/rocksdb + conan export --version 4.0.3 external/soci + conan export --version 2.0.8 external/nudb - name: add Ripple Conan remote + if: env.CONAN_URL != '' shell: bash run: | - conan remote list - conan remote remove ripple || true - # Do not quote the URL. An empty string will be accepted (with - # a non-fatal warning), but a missing argument will not. - conan remote add ripple ${{ env.CONAN_URL }} --insert 0 + if conan remote list | grep -q "ripple"; then + conan remote remove ripple + echo "Removed conan remote ripple" + fi + conan remote add --index 0 ripple "${CONAN_URL}" + echo "Added conan remote ripple at ${CONAN_URL}" + - name: try to authenticate to Ripple Conan remote + if: env.CONAN_LOGIN_USERNAME_RIPPLE != '' && env.CONAN_PASSWORD_RIPPLE != '' id: remote shell: bash run: | - # `conan user` implicitly uses the environment variables - # CONAN_LOGIN_USERNAME_ and CONAN_PASSWORD_. - # https://docs.conan.io/1/reference/commands/misc/user.html#using-environment-variables - # https://docs.conan.io/1/reference/env_vars.html#conan-login-username-conan-login-username-remote-name - # https://docs.conan.io/1/reference/env_vars.html#conan-password-conan-password-remote-name - echo outcome=$(conan user --remote ripple --password >&2 \ - && echo success || echo failure) | tee ${GITHUB_OUTPUT} + echo "Authenticating to ripple remote..." + conan remote auth ripple --force + conan remote list-users + - name: list missing binaries id: binaries shell: bash @@ -51,7 +48,7 @@ runs: conan install \ --output-folder . \ --build missing \ - --options tests=True \ - --options xrpld=True \ - --settings build_type=${{ inputs.configuration }} \ + --options:host "&:tests=True" \ + --options:host "&:xrpld=True" \ + --settings:all build_type=${{ inputs.configuration }} \ .. diff --git a/.github/workflows/clang-format.yml b/.github/workflows/clang-format.yml index ac6154ab9f..83752c4780 100644 --- a/.github/workflows/clang-format.yml +++ b/.github/workflows/clang-format.yml @@ -9,24 +9,16 @@ jobs: check: if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }} runs-on: ubuntu-24.04 - env: - CLANG_VERSION: 18 + container: ghcr.io/xrplf/ci/tools-rippled-clang-format steps: - uses: actions/checkout@v4 - - name: Install clang-format - run: | - codename=$( lsb_release --codename --short ) - sudo tee /etc/apt/sources.list.d/llvm.list >/dev/null <> $GITHUB_PATH + brew install conan - name: install Ninja if: matrix.generator == 'Ninja' run: brew install ninja - name: install python - run: | + run: | if which python > /dev/null 2>&1; then echo "Python executable exists" else @@ -76,14 +87,28 @@ jobs: clang --version - name: configure Conan run : | - conan profile new default --detect || true - conan profile update settings.compiler.cppstd=20 default + echo "${CONAN_GLOBAL_CONF}" > global.conf + conan config install conan/profiles/ -tf $(conan config home)/profiles/ + conan profile show + - name: export custom recipes + shell: bash + run: | + conan export --version 1.1.10 external/snappy + conan export --version 9.7.3 external/rocksdb + conan export --version 4.0.3 external/soci + conan export --version 2.0.8 external/nudb + - name: add Ripple Conan remote + if: env.CONAN_URL != '' + shell: bash + run: | + if conan remote list | grep -q "ripple"; then + conan remote remove ripple + echo "Removed conan remote ripple" + fi + conan remote add --index 0 ripple "${CONAN_URL}" + echo "Added conan remote ripple at ${CONAN_URL}" - name: build dependencies uses: ./.github/actions/dependencies - env: - CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod - CONAN_LOGIN_USERNAME_RIPPLE: ${{ secrets.CONAN_USERNAME }} - CONAN_PASSWORD_RIPPLE: ${{ secrets.CONAN_TOKEN }} with: configuration: ${{ matrix.configuration }} - name: build diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index 0ba7b0f212..409a1defc0 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -16,6 +16,19 @@ concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true +# This part of Conan configuration is specific to this workflow only; we do not want +# to pollute conan/profiles directory with settings which might not work for others +env: + CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/dev + CONAN_LOGIN_USERNAME_RIPPLE: ${{ secrets.CONAN_USERNAME }} + CONAN_PASSWORD_RIPPLE: ${{ secrets.CONAN_TOKEN }} + CONAN_GLOBAL_CONF: | + core.download:parallel={{ os.cpu_count() }} + core.upload:parallel={{ os.cpu_count() }} + tools.build:jobs={{ (os.cpu_count() * 4/5) | int }} + tools.build:verbosity=verbose + tools.compilation:verbosity=verbose + # This workflow has multiple job matrixes. # They can be considered phases because most of the matrices ("test", # "coverage", "conan", ) depend on the first ("dependencies"). @@ -54,59 +67,45 @@ jobs: - Release include: - compiler: gcc - profile: - version: 11 - cc: /usr/bin/gcc - cxx: /usr/bin/g++ + compiler_version: 12 + distro: ubuntu + codename: jammy - compiler: clang - profile: - version: 14 - cc: /usr/bin/clang-14 - cxx: /usr/bin/clang++-14 + compiler_version: 16 + distro: debian + codename: bookworm runs-on: [self-hosted, heavy] - container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e + container: ghcr.io/xrplf/ci/${{ matrix.distro }}-${{ matrix.codename }}:${{ matrix.compiler }}-${{ matrix.compiler_version }} env: build_dir: .build steps: - - name: upgrade conan - run: | - pip install --upgrade "conan<2" - name: checkout - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - name: check environment run: | echo ${PATH} | tr ':' '\n' lsb_release -a || true - ${{ matrix.profile.cc }} --version + ${{ matrix.compiler }}-${{ matrix.compiler_version }} --version conan --version cmake --version env | sort - name: configure Conan run: | - conan profile new default --detect - conan profile update settings.compiler.cppstd=20 default - conan profile update settings.compiler=${{ matrix.compiler }} default - conan profile update settings.compiler.version=${{ matrix.profile.version }} default - conan profile update settings.compiler.libcxx=libstdc++11 default - conan profile update env.CC=${{ matrix.profile.cc }} default - conan profile update env.CXX=${{ matrix.profile.cxx }} default - conan profile update conf.tools.build:compiler_executables='{"c": "${{ matrix.profile.cc }}", "cpp": "${{ matrix.profile.cxx }}"}' default + echo "${CONAN_GLOBAL_CONF}" >> ${CONAN_HOME}/global.conf + conan profile show - name: archive profile # Create this archive before dependencies are added to the local cache. - run: tar -czf conan.tar -C ~/.conan . + run: tar -czf conan.tar.gz -C ${CONAN_HOME} . - name: build dependencies uses: ./.github/actions/dependencies - env: - CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod - CONAN_LOGIN_USERNAME_RIPPLE: ${{ secrets.CONAN_USERNAME }} - CONAN_PASSWORD_RIPPLE: ${{ secrets.CONAN_TOKEN }} + with: configuration: ${{ matrix.configuration }} - name: upload archive - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 with: name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }} - path: conan.tar + path: conan.tar.gz if-no-files-found: error test: @@ -121,26 +120,32 @@ jobs: configuration: - Debug - Release + include: + - compiler: gcc + compiler_version: 12 + distro: ubuntu + codename: jammy + - compiler: clang + compiler_version: 16 + distro: debian + codename: bookworm cmake-args: - - "-Dunity=ON" needs: dependencies runs-on: [self-hosted, heavy] - container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e + container: ghcr.io/xrplf/ci/${{ matrix.distro }}-${{ matrix.codename }}:${{ matrix.compiler }}-${{ matrix.compiler_version }} env: build_dir: .build steps: - - name: upgrade conan - run: | - pip install --upgrade "conan<2" - name: download cache - uses: actions/download-artifact@v4 + uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 with: name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }} - name: extract cache run: | - mkdir -p ~/.conan - tar -xzf conan.tar -C ~/.conan + mkdir -p ${CONAN_HOME} + tar -xzf conan.tar.gz -C ${CONAN_HOME} - name: check environment run: | env | sort @@ -148,11 +153,9 @@ jobs: conan --version cmake --version - name: checkout - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - name: dependencies uses: ./.github/actions/dependencies - env: - CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod with: configuration: ${{ matrix.configuration }} - name: build @@ -182,21 +185,18 @@ jobs: - "-DUNIT_TEST_REFERENCE_FEE=1000" needs: dependencies runs-on: [self-hosted, heavy] - container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e + container: ghcr.io/xrplf/ci/ubuntu-jammy:gcc-12 env: build_dir: .build steps: - - name: upgrade conan - run: | - pip install --upgrade "conan<2" - name: download cache - uses: actions/download-artifact@v4 + uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 with: name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }} - name: extract cache run: | - mkdir -p ~/.conan - tar -xzf conan.tar -C ~/.conan + mkdir -p ${CONAN_HOME} + tar -xzf conan.tar.gz -C ${CONAN_HOME} - name: check environment run: | env | sort @@ -204,11 +204,9 @@ jobs: conan --version cmake --version - name: checkout - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - name: dependencies uses: ./.github/actions/dependencies - env: - CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod with: configuration: ${{ matrix.configuration }} - name: build @@ -234,23 +232,18 @@ jobs: - Debug needs: dependencies runs-on: [self-hosted, heavy] - container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e + container: ghcr.io/xrplf/ci/ubuntu-jammy:gcc-12 env: build_dir: .build steps: - - name: upgrade conan - run: | - pip install --upgrade "conan<2" - name: download cache - uses: actions/download-artifact@v4 + uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 with: name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }} - name: extract cache run: | - mkdir -p ~/.conan - tar -xzf conan.tar -C ~/.conan - - name: install gcovr - run: pip install "gcovr>=7,<9" + mkdir -p ${CONAN_HOME} + tar -xzf conan.tar.gz -C ${CONAN_HOME} - name: check environment run: | echo ${PATH} | tr ':' '\n' @@ -258,13 +251,11 @@ jobs: cmake --version gcovr --version env | sort - ls ~/.conan + ls ${CONAN_HOME} - name: checkout - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - name: dependencies uses: ./.github/actions/dependencies - env: - CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod with: configuration: ${{ matrix.configuration }} - name: build @@ -286,7 +277,7 @@ jobs: run: | mv "${build_dir}/coverage.xml" ./ - name: archive coverage report - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 with: name: coverage.xml path: coverage.xml @@ -305,25 +296,28 @@ jobs: attempt_limit: 5 attempt_delay: 210000 # in milliseconds + conan: needs: dependencies runs-on: [self-hosted, heavy] - container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e + container: + image: ghcr.io/xrplf/ci/ubuntu-jammy:gcc-12 env: build_dir: .build + platform: linux + compiler: gcc + compiler_version: 12 configuration: Release steps: - - name: upgrade conan - run: | - pip install --upgrade "conan<2" - name: download cache - uses: actions/download-artifact@v4 + uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 with: - name: linux-gcc-${{ env.configuration }} + name: ${{ env.platform }}-${{ env.compiler }}-${{ env.configuration }} + - name: extract cache run: | - mkdir -p ~/.conan - tar -xzf conan.tar -C ~/.conan + mkdir -p ${CONAN_HOME} + tar -xzf conan.tar.gz -C ${CONAN_HOME} - name: check environment run: | env | sort @@ -331,27 +325,22 @@ jobs: conan --version cmake --version - name: checkout - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - name: dependencies uses: ./.github/actions/dependencies - env: - CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod with: configuration: ${{ env.configuration }} - name: export run: | - version=$(conan inspect --raw version .) - reference="xrpl/${version}@local/test" - conan remove -f ${reference} || true - conan export . local/test - echo "reference=${reference}" >> "${GITHUB_ENV}" + conan export . --version head - name: build run: | cd tests/conan - mkdir ${build_dir} - cd ${build_dir} - conan install .. --output-folder . \ - --require-override ${reference} --build missing + mkdir ${build_dir} && cd ${build_dir} + conan install .. \ + --settings:all build_type=${configuration} \ + --output-folder . \ + --build missing cmake .. \ -DCMAKE_TOOLCHAIN_FILE:FILEPATH=./build/${configuration}/generators/conan_toolchain.cmake \ -DCMAKE_BUILD_TYPE=${configuration} @@ -366,60 +355,30 @@ jobs: if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }} env: CLANG_RELEASE: 16 - strategy: - fail-fast: false runs-on: [self-hosted, heavy] - container: debian:bookworm - steps: - - name: install prerequisites - env: - DEBIAN_FRONTEND: noninteractive - run: | - apt-get update - apt-get install --yes --no-install-recommends \ - clang-${CLANG_RELEASE} clang++-${CLANG_RELEASE} \ - python3-pip python-is-python3 make cmake git wget - apt-get clean - update-alternatives --install \ - /usr/bin/clang clang /usr/bin/clang-${CLANG_RELEASE} 100 \ - --slave /usr/bin/clang++ clang++ /usr/bin/clang++-${CLANG_RELEASE} - update-alternatives --auto clang - pip install --no-cache --break-system-packages "conan<2" + container: ghcr.io/xrplf/ci/debian-bookworm:clang-16 + steps: - name: checkout - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - name: prepare environment run: | mkdir ${GITHUB_WORKSPACE}/.build echo "SOURCE_DIR=$GITHUB_WORKSPACE" >> $GITHUB_ENV echo "BUILD_DIR=$GITHUB_WORKSPACE/.build" >> $GITHUB_ENV - echo "CC=/usr/bin/clang" >> $GITHUB_ENV - echo "CXX=/usr/bin/clang++" >> $GITHUB_ENV - name: configure Conan run: | - conan profile new --detect default - conan profile update settings.compiler=clang default - conan profile update settings.compiler.version=${CLANG_RELEASE} default - conan profile update settings.compiler.libcxx=libstdc++11 default - conan profile update settings.compiler.cppstd=20 default - conan profile update options.rocksdb=False default - conan profile update \ - 'conf.tools.build:compiler_executables={"c": "/usr/bin/clang", "cpp": "/usr/bin/clang++"}' default - conan profile update 'env.CXXFLAGS="-DBOOST_ASIO_DISABLE_CONCEPTS"' default - conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_ASIO_DISABLE_CONCEPTS"]' default - conan export external/snappy snappy/1.1.10@ - conan export external/soci soci/4.0.3@ - + echo "${CONAN_GLOBAL_CONF}" >> ${CONAN_HOME}/global.conf + conan profile show - name: build dependencies run: | cd ${BUILD_DIR} conan install ${SOURCE_DIR} \ --output-folder ${BUILD_DIR} \ - --install-folder ${BUILD_DIR} \ --build missing \ - --settings build_type=Debug + --settings:all build_type=Debug - name: build with instrumentation run: | diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 7c83e7f300..30ad32a89c 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -18,6 +18,18 @@ on: concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true +# This part of Conan configuration is specific to this workflow only; we do not want +# to pollute conan/profiles directory with settings which might not work for others +env: + CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/dev + CONAN_LOGIN_USERNAME_RIPPLE: ${{ secrets.CONAN_USERNAME }} + CONAN_PASSWORD_RIPPLE: ${{ secrets.CONAN_TOKEN }} + CONAN_GLOBAL_CONF: | + core.download:parallel={{os.cpu_count()}} + core.upload:parallel={{os.cpu_count()}} + tools.build:jobs=24 + tools.build:verbosity=verbose + tools.compilation:verbosity=verbose jobs: @@ -42,11 +54,11 @@ jobs: build_dir: .build steps: - name: checkout - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - name: choose Python - uses: actions/setup-python@v5 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 with: - python-version: 3.9 + python-version: 3.13 - name: learn Python cache directory id: pip-cache shell: bash @@ -54,12 +66,12 @@ jobs: python -m pip install --upgrade pip echo "dir=$(pip cache dir)" | tee ${GITHUB_OUTPUT} - name: restore Python cache directory - uses: actions/cache@v4 + uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 with: path: ${{ steps.pip-cache.outputs.dir }} key: ${{ runner.os }}-${{ hashFiles('.github/workflows/windows.yml') }} - name: install Conan - run: pip install wheel 'conan<2' + run: pip install wheel conan - name: check environment run: | dir env: @@ -70,17 +82,29 @@ jobs: - name: configure Conan shell: bash run: | - conan profile new default --detect - conan profile update settings.compiler.cppstd=20 default - conan profile update \ - settings.compiler.runtime=MT${{ matrix.configuration.runtime }} \ - default + echo "${CONAN_GLOBAL_CONF}" > global.conf + mv conan/profiles/libxrpl conan/profiles/default + conan config install conan/profiles/ -tf $(conan config home)/profiles/ + conan profile show + - name: export custom recipes + shell: bash + run: | + conan export --version 1.1.10 external/snappy + conan export --version 9.7.3 external/rocksdb + conan export --version 4.0.3 external/soci + conan export --version 2.0.8 external/nudb + - name: add Ripple Conan remote + if: env.CONAN_URL != '' + shell: bash + run: | + if conan remote list | grep -q "ripple"; then + conan remote remove ripple + echo "Removed conan remote ripple" + fi + conan remote add --index 0 ripple "${CONAN_URL}" + echo "Added conan remote ripple at ${CONAN_URL}" - name: build dependencies uses: ./.github/actions/dependencies - env: - CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod - CONAN_LOGIN_USERNAME_RIPPLE: ${{ secrets.CONAN_USERNAME }} - CONAN_PASSWORD_RIPPLE: ${{ secrets.CONAN_TOKEN }} with: configuration: ${{ matrix.configuration.type }} - name: build diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9f69d41379..abfbd887c7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ # .pre-commit-config.yaml repos: - repo: https://github.com/pre-commit/mirrors-clang-format - rev: v18.1.3 + rev: v18.1.8 hooks: - id: clang-format diff --git a/conan/profiles/libxrpl b/conan/profiles/libxrpl new file mode 100644 index 0000000000..862244536b --- /dev/null +++ b/conan/profiles/libxrpl @@ -0,0 +1,23 @@ +{% set os = detect_api.detect_os() %} +{% set arch = detect_api.detect_arch() %} +{% set compiler, version, compiler_exe = detect_api.detect_default_compiler() %} +{% set compiler_version = version %} +{% if os == "Linux" %} +{% set compiler_version = detect_api.default_compiler_version(compiler, version) %} +{% endif %} + +{% if os == "Linux" %} +include(default) +{% endif %} + +[settings] +os={{ os }} +arch={{ arch }} +compiler={{compiler}} +compiler.version={{ compiler_version }} +compiler.cppstd=20 +{% if os == "Windows" %} +compiler.runtime=static +{% else %} +compiler.libcxx={{detect_api.detect_libcxx(compiler, version, compiler_exe)}} +{% endif %} diff --git a/conanfile.py b/conanfile.py index 2df08544cd..8e964784f8 100644 --- a/conanfile.py +++ b/conanfile.py @@ -24,7 +24,6 @@ class Xrpl(ConanFile): } requires = [ - 'doctest/2.4.11', 'grpc/1.50.1', 'libarchive/3.7.6', 'nudb/2.0.8', @@ -33,6 +32,10 @@ class Xrpl(ConanFile): 'zlib/1.3.1', ] + test_requires = [ + 'doctest/2.4.11', + ] + tool_requires = [ 'protobuf/3.21.9', ] @@ -86,12 +89,13 @@ class Xrpl(ConanFile): } def set_version(self): - path = f'{self.recipe_folder}/src/libxrpl/protocol/BuildInfo.cpp' - regex = r'versionString\s?=\s?\"(.*)\"' - with open(path, 'r') as file: - matches = (re.search(regex, line) for line in file) - match = next(m for m in matches if m) - self.version = match.group(1) + if self.version is None: + path = f'{self.recipe_folder}/src/libxrpl/protocol/BuildInfo.cpp' + regex = r'versionString\s?=\s?\"(.*)\"' + with open(path, encoding='utf-8') as file: + matches = (re.search(regex, line) for line in file) + match = next(m for m in matches if m) + self.version = match.group(1) def configure(self): if self.settings.compiler == 'apple-clang': @@ -139,6 +143,8 @@ class Xrpl(ConanFile): tc.variables['static'] = self.options.static tc.variables['unity'] = self.options.unity tc.variables['xrpld'] = self.options.xrpld + if self.settings.compiler == 'clang' and self.settings.compiler.version == 16: + tc.extra_cxxflags = ["-DBOOST_ASIO_DISABLE_CONCEPTS"] tc.generate() def build(self): diff --git a/tests/conan/CMakeLists.txt b/tests/conan/CMakeLists.txt index 83aa24880d..f1b37e7a69 100644 --- a/tests/conan/CMakeLists.txt +++ b/tests/conan/CMakeLists.txt @@ -9,7 +9,7 @@ project( LANGUAGES CXX ) -find_package(xrpl REQUIRED) +find_package(xrpl CONFIG REQUIRED) add_executable(example) target_sources(example PRIVATE src/example.cpp) diff --git a/tests/conan/conanfile.py b/tests/conan/conanfile.py index be3750bf9e..1ea1b333fc 100644 --- a/tests/conan/conanfile.py +++ b/tests/conan/conanfile.py @@ -1,59 +1,42 @@ -from conan import ConanFile, conan_version +from pathlib import Path + +from conan import ConanFile +from conan.tools.build import can_run from conan.tools.cmake import CMake, cmake_layout class Example(ConanFile): - def set_name(self): - if self.name is None: - self.name = 'example' + name = 'example' + license = 'ISC' + author = 'John Freeman , Michael Legleux Date: Wed, 16 Jul 2025 11:53:13 +0100 Subject: [PATCH 078/244] Retire Flow Cross amendment (#5562) The FlowCross amendment is now permanently enabled, so all code branches that have this amendment disabled are removed. --- include/xrpl/protocol/detail/features.macro | 3 +- src/test/app/CrossingLimits_test.cpp | 56 ++--------- src/test/app/DeliverMin_test.cpp | 2 - src/test/app/Discrepancy_test.cpp | 1 - src/test/app/Flow_test.cpp | 4 - src/test/app/Freeze_test.cpp | 33 ++---- src/test/app/Offer_test.cpp | 106 +++++--------------- src/test/app/PayStrand_test.cpp | 3 - src/test/app/PermissionedDEX_test.cpp | 18 ---- src/test/app/SetAuth_test.cpp | 1 - src/test/app/TrustAndBalance_test.cpp | 1 - src/test/ledger/BookDirs_test.cpp | 1 - src/test/ledger/PaymentSandbox_test.cpp | 1 - src/test/rpc/GatewayBalances_test.cpp | 5 +- src/test/rpc/NoRipple_test.cpp | 1 - src/xrpld/app/paths/detail/BookStep.cpp | 5 +- src/xrpld/app/tx/detail/CreateOffer.cpp | 75 +------------- src/xrpld/app/tx/detail/CreateOffer.h | 7 -- 18 files changed, 52 insertions(+), 271 deletions(-) diff --git a/include/xrpl/protocol/detail/features.macro b/include/xrpl/protocol/detail/features.macro index 93b4dedae3..63c1b2258b 100644 --- a/include/xrpl/protocol/detail/features.macro +++ b/include/xrpl/protocol/detail/features.macro @@ -98,7 +98,6 @@ XRPL_FEATURE(HardenedValidations, Supported::yes, VoteBehavior::DefaultYe // fix1781: XRPEndpointSteps should be included in the circular payment check XRPL_FIX (1781, Supported::yes, VoteBehavior::DefaultYes) XRPL_FEATURE(RequireFullyCanonicalSig, Supported::yes, VoteBehavior::DefaultYes) -// fixQualityUpperBound should be activated before FlowCross XRPL_FIX (QualityUpperBound, Supported::yes, VoteBehavior::DefaultYes) XRPL_FEATURE(DeletableAccounts, Supported::yes, VoteBehavior::DefaultYes) XRPL_FIX (PayChanRecipientOwnerDir, Supported::yes, VoteBehavior::DefaultYes) @@ -116,7 +115,6 @@ XRPL_FIX (1571, Supported::yes, VoteBehavior::DefaultYe XRPL_FEATURE(Checks, Supported::yes, VoteBehavior::DefaultYes) XRPL_FEATURE(DepositAuth, Supported::yes, VoteBehavior::DefaultYes) XRPL_FIX (1513, Supported::yes, VoteBehavior::DefaultYes) -XRPL_FEATURE(FlowCross, Supported::yes, VoteBehavior::DefaultYes) XRPL_FEATURE(Flow, Supported::yes, VoteBehavior::DefaultYes) // The following amendments are obsolete, but must remain supported @@ -159,3 +157,4 @@ XRPL_RETIRE(fix1201) XRPL_RETIRE(fix1512) XRPL_RETIRE(fix1523) XRPL_RETIRE(fix1528) +XRPL_RETIRE(FlowCross) diff --git a/src/test/app/CrossingLimits_test.cpp b/src/test/app/CrossingLimits_test.cpp index cef0b03399..ff4650a1f8 100644 --- a/src/test/app/CrossingLimits_test.cpp +++ b/src/test/app/CrossingLimits_test.cpp @@ -77,10 +77,8 @@ public: auto const gw = Account("gateway"); auto const USD = gw["USD"]; - // The number of allowed offers to cross is different between - // Taker and FlowCross. Taker allows 850 and FlowCross allows 1000. - // Accommodate that difference in the test. - int const maxConsumed = features[featureFlowCross] ? 1000 : 850; + // The payment engine allows 1000 offers to cross. + int const maxConsumed = 1000; env.fund(XRP(100000000), gw, "alice", "bob", "carol"); int const bobsOfferCount = maxConsumed + 150; @@ -119,11 +117,8 @@ public: env.fund(XRP(100000000), gw, "alice", "bob", "carol", "dan", "evita"); - // The number of offers allowed to cross is different between - // Taker and FlowCross. Taker allows 850 and FlowCross allows 1000. - // Accommodate that difference in the test. - bool const isFlowCross{features[featureFlowCross]}; - int const maxConsumed = isFlowCross ? 1000 : 850; + // The payment engine allows 1000 offers to cross. + int const maxConsumed = 1000; int const evitasOfferCount{maxConsumed + 49}; env.trust(USD(1000), "alice"); @@ -133,14 +128,8 @@ public: env.trust(USD(evitasOfferCount + 1), "evita"); env(pay(gw, "evita", USD(evitasOfferCount + 1))); - // Taker and FlowCross have another difference we must accommodate. - // Taker allows a total of 1000 unfunded offers to be consumed - // beyond the 850 offers it can take. FlowCross draws no such - // distinction; its limit is 1000 funded or unfunded. - // - // Give carol an extra 150 (unfunded) offers when we're using Taker - // to accommodate that difference. - int const carolsOfferCount{isFlowCross ? 700 : 850}; + // The payment engine has a limit of 1000 funded or unfunded offers. + int const carolsOfferCount{700}; n_offers(env, 400, "alice", XRP(1), USD(1)); n_offers(env, carolsOfferCount, "carol", XRP(1), USD(1)); n_offers(env, evitasOfferCount, "evita", XRP(1), USD(1)); @@ -268,9 +257,9 @@ public: } void - testAutoBridgedLimitsFlowCross(FeatureBitset features) + testAutoBridgedLimits(FeatureBitset features) { - testcase("Auto Bridged Limits FlowCross"); + testcase("Auto Bridged Limits"); // If any book step in a payment strand consumes 1000 offers, the // liquidity from the offers is used, but that strand will be marked as @@ -452,26 +441,6 @@ public: } } - void - testAutoBridgedLimits(FeatureBitset features) - { - // Taker and FlowCross are too different in the way they handle - // autobridging to make one test suit both approaches. - // - // o Taker alternates between books, completing one full increment - // before returning to make another pass. - // - // o FlowCross extracts as much as possible in one book at one Quality - // before proceeding to the other book. This reduces the number of - // times we change books. - // - // So the tests for the two forms of autobridging are separate. - if (features[featureFlowCross]) - testAutoBridgedLimitsFlowCross(features); - else - testAutoBridgedLimitsTaker(features); - } - void testOfferOverflow(FeatureBitset features) { @@ -522,11 +491,10 @@ public: n_offers(env, 998, alice, XRP(0.96), USD(1)); n_offers(env, 998, alice, XRP(0.95), USD(1)); - bool const withFlowCross = features[featureFlowCross]; bool const withSortStrands = features[featureFlowSortStrands]; auto const expectedTER = [&]() -> TER { - if (withFlowCross && !withSortStrands) + if (!withSortStrands) return TER{tecOVERSIZE}; return tesSUCCESS; }(); @@ -535,8 +503,6 @@ public: env.close(); auto const expectedUSD = [&] { - if (!withFlowCross) - return USD(850); if (!withSortStrands) return USD(0); return USD(1996); @@ -558,11 +524,9 @@ public: using namespace jtx; auto const sa = supported_amendments(); testAll(sa); + testAll(sa - featureFlowSortStrands); testAll(sa - featurePermissionedDEX); testAll(sa - featureFlowSortStrands - featurePermissionedDEX); - testAll( - sa - featureFlowCross - featureFlowSortStrands - - featurePermissionedDEX); } }; diff --git a/src/test/app/DeliverMin_test.cpp b/src/test/app/DeliverMin_test.cpp index 4ee7c9c72e..83d7c4a1b9 100644 --- a/src/test/app/DeliverMin_test.cpp +++ b/src/test/app/DeliverMin_test.cpp @@ -143,8 +143,6 @@ public: { using namespace jtx; auto const sa = supported_amendments(); - test_convert_all_of_an_asset( - sa - featureFlowCross - featurePermissionedDEX); test_convert_all_of_an_asset(sa - featurePermissionedDEX); test_convert_all_of_an_asset(sa); } diff --git a/src/test/app/Discrepancy_test.cpp b/src/test/app/Discrepancy_test.cpp index bc72b2fd16..ab8d5a605f 100644 --- a/src/test/app/Discrepancy_test.cpp +++ b/src/test/app/Discrepancy_test.cpp @@ -147,7 +147,6 @@ public: { using namespace test::jtx; auto const sa = supported_amendments(); - testXRPDiscrepancy(sa - featureFlowCross - featurePermissionedDEX); testXRPDiscrepancy(sa - featurePermissionedDEX); testXRPDiscrepancy(sa); } diff --git a/src/test/app/Flow_test.cpp b/src/test/app/Flow_test.cpp index 68485f4eee..0953b6f44f 100644 --- a/src/test/app/Flow_test.cpp +++ b/src/test/app/Flow_test.cpp @@ -1333,7 +1333,6 @@ struct Flow_test : public beast::unit_test::suite using namespace jtx; auto const sa = supported_amendments(); - testWithFeats(sa - featureFlowCross - featurePermissionedDEX); testWithFeats(sa - featurePermissionedDEX); testWithFeats(sa); testEmptyStrand(sa); @@ -1347,12 +1346,9 @@ struct Flow_manual_test : public Flow_test { using namespace jtx; auto const all = supported_amendments(); - FeatureBitset const flowCross{featureFlowCross}; FeatureBitset const f1513{fix1513}; FeatureBitset const permDex{featurePermissionedDEX}; - testWithFeats(all - flowCross - f1513 - permDex); - testWithFeats(all - flowCross - permDex); testWithFeats(all - f1513 - permDex); testWithFeats(all - permDex); testWithFeats(all); diff --git a/src/test/app/Freeze_test.cpp b/src/test/app/Freeze_test.cpp index 8c2021d657..8002aa2a3b 100644 --- a/src/test/app/Freeze_test.cpp +++ b/src/test/app/Freeze_test.cpp @@ -961,24 +961,12 @@ class Freeze_test : public beast::unit_test::suite env.close(); // test: A1 wants to buy, must fail - if (features[featureFlowCross]) - { - env(offer(A1, USD(1), XRP(2)), - txflags(tfFillOrKill), - ter(tecKILLED)); - env.close(); - env.require( - balance(A1, USD(1002)), - balance(A2, USD(997)), - offers(A1, 0)); - } - else - { - // The transaction that should be here would succeed. - // I don't want to adjust balances in following tests. Flow - // cross feature flag is not relevant to this particular test - // case so we're not missing out some corner cases checks. - } + env(offer(A1, USD(1), XRP(2)), + txflags(tfFillOrKill), + ter(tecKILLED)); + env.close(); + env.require( + balance(A1, USD(1002)), balance(A2, USD(997)), offers(A1, 0)); // test: A1 can create passive sell offer env(offer(A1, XRP(2), USD(1)), txflags(tfPassive)); @@ -2107,17 +2095,14 @@ public: }; using namespace test::jtx; auto const sa = supported_amendments(); - testAll( - sa - featureFlowCross - featureDeepFreeze - featurePermissionedDEX - - fixEnforceNFTokenTrustlineV2); - testAll( - sa - featureFlowCross - featurePermissionedDEX - - fixEnforceNFTokenTrustlineV2); testAll( sa - featureDeepFreeze - featurePermissionedDEX - fixEnforceNFTokenTrustlineV2); testAll(sa - featurePermissionedDEX - fixEnforceNFTokenTrustlineV2); + testAll(sa - featureDeepFreeze - featurePermissionedDEX); + testAll(sa - featurePermissionedDEX); testAll(sa - fixEnforceNFTokenTrustlineV2); + testAll(sa - featureDeepFreeze); testAll(sa); } }; diff --git a/src/test/app/Offer_test.cpp b/src/test/app/Offer_test.cpp index d3481881c4..1c877fedef 100644 --- a/src/test/app/Offer_test.cpp +++ b/src/test/app/Offer_test.cpp @@ -1343,18 +1343,10 @@ public: // NOTE : // At this point, all offers are expected to be consumed. - // Alas, they are not - because of a bug in the Taker auto-bridging - // implementation which is addressed by fixTakerDryOfferRemoval. - // The pre-fixTakerDryOfferRemoval implementation (incorrect) leaves - // an empty offer in the second leg of the bridge. Validate both the - // old and the new behavior. { auto acctOffers = offersOnAccount(env, account_to_test); - bool const noStaleOffers{ - features[featureFlowCross] || - features[fixTakerDryOfferRemoval]}; - BEAST_EXPECT(acctOffers.size() == (noStaleOffers ? 0 : 1)); + BEAST_EXPECT(acctOffers.size() == 0); for (auto const& offerPtr : acctOffers) { auto const& offer = *offerPtr; @@ -1464,8 +1456,7 @@ public: std::uint32_t const bobOfferSeq = env.seq(bob); env(offer(bob, XRP(2000), USD(1))); - if (localFeatures[featureFlowCross] && - localFeatures[fixReducedOffersV2]) + if (localFeatures[fixReducedOffersV2]) { // With the rounding introduced by fixReducedOffersV2, bob's // offer does not cross alice's offer and goes straight into @@ -1489,8 +1480,7 @@ public: // crossing algorithms becomes apparent. The old offer crossing // would consume small_amount and transfer no XRP. The new offer // crossing transfers a single drop, rather than no drops. - auto const crossingDelta = - localFeatures[featureFlowCross] ? drops(1) : drops(0); + auto const crossingDelta = drops(1); jrr = ledgerEntryState(env, alice, gw, "USD"); BEAST_EXPECT( @@ -2044,15 +2034,9 @@ public: env.require(balance(carol, USD(0))); env.require(balance(carol, EUR(none))); - // If neither featureFlowCross nor fixTakerDryOfferRemoval are defined - // then carol's offer will be left on the books, but with zero value. - int const emptyOfferCount{ - features[featureFlowCross] || features[fixTakerDryOfferRemoval] - ? 0 - : 1}; - env.require(offers(carol, 0 + emptyOfferCount)); - env.require(owners(carol, 1 + emptyOfferCount)); + env.require(offers(carol, 0)); + env.require(owners(carol, 1)); } void @@ -4236,22 +4220,13 @@ public: }; // clang-format off - TestData const takerTests[]{ - // btcStart ------------------- actor[0] -------------------- ------------------- actor[1] -------------------- - {0, 0, 1, BTC(5), {{"deb", 0, drops(3900000'000000 - 4 * baseFee), BTC(5), USD(3000)}, {"dan", 0, drops(4100000'000000 - 3 * baseFee), BTC(0), USD(750)}}}, // no BTC xfer fee - {0, 0, 0, BTC(5), {{"flo", 0, drops(4000000'000000 - 5 * baseFee), BTC(5), USD(2000)} }} // no xfer fee - }; - - TestData const flowTests[]{ + TestData const tests[]{ // btcStart ------------------- actor[0] -------------------- ------------------- actor[1] -------------------- {0, 0, 1, BTC(5), {{"gay", 1, drops(3950000'000000 - 4 * baseFee), BTC(5), USD(2500)}, {"gar", 1, drops(4050000'000000 - 3 * baseFee), BTC(0), USD(1375)}}}, // no BTC xfer fee {0, 0, 0, BTC(5), {{"hye", 2, drops(4000000'000000 - 5 * baseFee), BTC(5), USD(2000)} }} // no xfer fee }; // clang-format on - // Pick the right tests. - auto const& tests = features[featureFlowCross] ? flowTests : takerTests; - for (auto const& t : tests) { Account const& self = t.actors[t.self].acct; @@ -4378,9 +4353,8 @@ public: // 1. alice creates an offer to acquire USD/gw, an asset for which // she does not have a trust line. At some point in the future, // gw adds lsfRequireAuth. Then, later, alice's offer is crossed. - // a. With Taker alice's unauthorized offer is consumed. - // b. With FlowCross alice's offer is deleted, not consumed, - // since alice is not authorized to hold USD/gw. + // Alice's offer is deleted, not consumed, since alice is not + // authorized to hold USD/gw. // // 2. alice tries to create an offer for USD/gw, now that gw has // lsfRequireAuth set. This time the offer create fails because @@ -4428,33 +4402,17 @@ public: // gw now requires authorization and bob has gwUSD(50). Let's see if // bob can cross alice's offer. // - // o With Taker bob's offer should cross alice's. - // o With FlowCross bob's offer shouldn't cross and alice's - // unauthorized offer should be deleted. + // Bob's offer shouldn't cross and alice's unauthorized offer should be + // deleted. env(offer(bob, XRP(4000), gwUSD(40))); env.close(); std::uint32_t const bobOfferSeq = env.seq(bob) - 1; - bool const flowCross = features[featureFlowCross]; - env.require(offers(alice, 0)); - if (flowCross) - { - // alice's unauthorized offer is deleted & bob's offer not crossed. - env.require(balance(alice, gwUSD(none))); - env.require(offers(bob, 1)); - env.require(balance(bob, gwUSD(50))); - } - else - { - // alice's offer crosses bob's - env.require(balance(alice, gwUSD(40))); - env.require(offers(bob, 0)); - env.require(balance(bob, gwUSD(10))); - - // The rest of the test verifies FlowCross behavior. - return; - } + // alice's unauthorized offer is deleted & bob's offer not crossed. + env.require(balance(alice, gwUSD(none))); + env.require(offers(bob, 1)); + env.require(balance(bob, gwUSD(50))); // See if alice can create an offer without authorization. alice // should not be able to create the offer and bob's offer should be @@ -5186,9 +5144,7 @@ public: // tfFillOrKill, TakerPays must be filled { TER const err = - features[fixFillOrKill] || !features[featureFlowCross] - ? TER(tesSUCCESS) - : tecKILLED; + features[fixFillOrKill] ? TER(tesSUCCESS) : tecKILLED; env(offer(maker, XRP(100), USD(100))); env.close(); @@ -5410,7 +5366,6 @@ public: { using namespace jtx; static FeatureBitset const all{supported_amendments()}; - static FeatureBitset const flowCross{featureFlowCross}; static FeatureBitset const takerDryOffer{fixTakerDryOfferRemoval}; static FeatureBitset const rmSmallIncreasedQOffers{ fixRmSmallIncreasedQOffers}; @@ -5419,10 +5374,9 @@ public: FeatureBitset const fillOrKill{fixFillOrKill}; FeatureBitset const permDEX{featurePermissionedDEX}; - static std::array const feats{ + static std::array const feats{ all - takerDryOffer - immediateOfferKilled - permDEX, - all - flowCross - takerDryOffer - immediateOfferKilled - permDEX, - all - flowCross - immediateOfferKilled - permDEX, + all - immediateOfferKilled - permDEX, all - rmSmallIncreasedQOffers - immediateOfferKilled - fillOrKill - permDEX, all - fillOrKill - permDEX, @@ -5444,7 +5398,7 @@ public: } }; -class OfferWOFlowCross_test : public OfferBaseUtil_test +class OfferWTakerDryOffer_test : public OfferBaseUtil_test { void run() override @@ -5453,7 +5407,7 @@ class OfferWOFlowCross_test : public OfferBaseUtil_test } }; -class OfferWTakerDryOffer_test : public OfferBaseUtil_test +class OfferWOSmallQOffers_test : public OfferBaseUtil_test { void run() override @@ -5462,7 +5416,7 @@ class OfferWTakerDryOffer_test : public OfferBaseUtil_test } }; -class OfferWOSmallQOffers_test : public OfferBaseUtil_test +class OfferWOFillOrKill_test : public OfferBaseUtil_test { void run() override @@ -5471,7 +5425,7 @@ class OfferWOSmallQOffers_test : public OfferBaseUtil_test } }; -class OfferWOFillOrKill_test : public OfferBaseUtil_test +class OfferWOPermDEX_test : public OfferBaseUtil_test { void run() override @@ -5480,21 +5434,12 @@ class OfferWOFillOrKill_test : public OfferBaseUtil_test } }; -class OfferWOPermDEX_test : public OfferBaseUtil_test -{ - void - run() override - { - OfferBaseUtil_test::run(5); - } -}; - class OfferAllFeatures_test : public OfferBaseUtil_test { void run() override { - OfferBaseUtil_test::run(6, true); + OfferBaseUtil_test::run(5, true); } }; @@ -5505,26 +5450,23 @@ class Offer_manual_test : public OfferBaseUtil_test { using namespace jtx; FeatureBitset const all{supported_amendments()}; - FeatureBitset const flowCross{featureFlowCross}; FeatureBitset const f1513{fix1513}; FeatureBitset const immediateOfferKilled{featureImmediateOfferKilled}; FeatureBitset const takerDryOffer{fixTakerDryOfferRemoval}; FeatureBitset const fillOrKill{fixFillOrKill}; FeatureBitset const permDEX{featurePermissionedDEX}; - testAll(all - flowCross - f1513 - immediateOfferKilled - permDEX); - testAll(all - flowCross - immediateOfferKilled - permDEX); + testAll(all - f1513 - immediateOfferKilled - permDEX); testAll(all - immediateOfferKilled - fillOrKill - permDEX); testAll(all - fillOrKill - permDEX); testAll(all - permDEX); testAll(all); - testAll(all - flowCross - takerDryOffer - permDEX); + testAll(all - takerDryOffer - permDEX); } }; BEAST_DEFINE_TESTSUITE_PRIO(OfferBaseUtil, tx, ripple, 2); -BEAST_DEFINE_TESTSUITE_PRIO(OfferWOFlowCross, tx, ripple, 2); BEAST_DEFINE_TESTSUITE_PRIO(OfferWTakerDryOffer, tx, ripple, 2); BEAST_DEFINE_TESTSUITE_PRIO(OfferWOSmallQOffers, tx, ripple, 2); BEAST_DEFINE_TESTSUITE_PRIO(OfferWOFillOrKill, tx, ripple, 2); diff --git a/src/test/app/PayStrand_test.cpp b/src/test/app/PayStrand_test.cpp index 9188da62ac..fe9c11a318 100644 --- a/src/test/app/PayStrand_test.cpp +++ b/src/test/app/PayStrand_test.cpp @@ -1268,15 +1268,12 @@ struct PayStrand_test : public beast::unit_test::suite { using namespace jtx; auto const sa = supported_amendments(); - testToStrand(sa - featureFlowCross - featurePermissionedDEX); testToStrand(sa - featurePermissionedDEX); testToStrand(sa); - testRIPD1373(sa - featureFlowCross - featurePermissionedDEX); testRIPD1373(sa - featurePermissionedDEX); testRIPD1373(sa); - testLoop(sa - featureFlowCross - featurePermissionedDEX); testLoop(sa - featurePermissionedDEX); testLoop(sa); diff --git a/src/test/app/PermissionedDEX_test.cpp b/src/test/app/PermissionedDEX_test.cpp index 693381debf..f2d40140cd 100644 --- a/src/test/app/PermissionedDEX_test.cpp +++ b/src/test/app/PermissionedDEX_test.cpp @@ -207,24 +207,6 @@ class PermissionedDEX_test : public beast::unit_test::suite env.close(); } - // test preflight: permissioned dex cannot be used without enable - // flowcross - { - Env env(*this, features - featureFlowCross); - auto const& [gw, domainOwner, alice, bob, carol, USD, domainID, credType] = - PermissionedDEX(env); - - env(offer(bob, XRP(10), USD(10)), - domain(domainID), - ter(temDISABLED)); - env.close(); - - env.enableFeature(featureFlowCross); - env.close(); - env(offer(bob, XRP(10), USD(10)), domain(domainID)); - env.close(); - } - // preclaim - someone outside of the domain cannot create domain offer { Env env(*this, features); diff --git a/src/test/app/SetAuth_test.cpp b/src/test/app/SetAuth_test.cpp index a4c2df6228..28a5b3be91 100644 --- a/src/test/app/SetAuth_test.cpp +++ b/src/test/app/SetAuth_test.cpp @@ -75,7 +75,6 @@ struct SetAuth_test : public beast::unit_test::suite { using namespace jtx; auto const sa = supported_amendments(); - testAuth(sa - featureFlowCross - featurePermissionedDEX); testAuth(sa - featurePermissionedDEX); testAuth(sa); } diff --git a/src/test/app/TrustAndBalance_test.cpp b/src/test/app/TrustAndBalance_test.cpp index 8f092a725f..d57e8ec418 100644 --- a/src/test/app/TrustAndBalance_test.cpp +++ b/src/test/app/TrustAndBalance_test.cpp @@ -481,7 +481,6 @@ public: using namespace test::jtx; auto const sa = supported_amendments(); - testWithFeatures(sa - featureFlowCross - featurePermissionedDEX); testWithFeatures(sa - featurePermissionedDEX); testWithFeatures(sa); } diff --git a/src/test/ledger/BookDirs_test.cpp b/src/test/ledger/BookDirs_test.cpp index 28d9d2c102..45cd7e332d 100644 --- a/src/test/ledger/BookDirs_test.cpp +++ b/src/test/ledger/BookDirs_test.cpp @@ -104,7 +104,6 @@ struct BookDirs_test : public beast::unit_test::suite { using namespace jtx; auto const sa = supported_amendments(); - test_bookdir(sa - featureFlowCross - featurePermissionedDEX); test_bookdir(sa - featurePermissionedDEX); test_bookdir(sa); } diff --git a/src/test/ledger/PaymentSandbox_test.cpp b/src/test/ledger/PaymentSandbox_test.cpp index 8bb0666e06..7901cec364 100644 --- a/src/test/ledger/PaymentSandbox_test.cpp +++ b/src/test/ledger/PaymentSandbox_test.cpp @@ -421,7 +421,6 @@ public: }; using namespace jtx; auto const sa = supported_amendments(); - testAll(sa - featureFlowCross - featurePermissionedDEX); testAll(sa - featurePermissionedDEX); testAll(sa); } diff --git a/src/test/rpc/GatewayBalances_test.cpp b/src/test/rpc/GatewayBalances_test.cpp index 7e9273d25e..93e88b78ac 100644 --- a/src/test/rpc/GatewayBalances_test.cpp +++ b/src/test/rpc/GatewayBalances_test.cpp @@ -252,10 +252,7 @@ public: { using namespace jtx; auto const sa = supported_amendments(); - for (auto feature : - {sa - featureFlowCross - featurePermissionedDEX, - sa - featurePermissionedDEX, - sa}) + for (auto feature : {sa - featurePermissionedDEX, sa}) { testGWB(feature); testGWBApiVersions(feature); diff --git a/src/test/rpc/NoRipple_test.cpp b/src/test/rpc/NoRipple_test.cpp index 42c86b34bb..1450abef69 100644 --- a/src/test/rpc/NoRipple_test.cpp +++ b/src/test/rpc/NoRipple_test.cpp @@ -294,7 +294,6 @@ public: }; using namespace jtx; auto const sa = supported_amendments(); - withFeatsTests(sa - featureFlowCross - featurePermissionedDEX); withFeatsTests(sa - featurePermissionedDEX); withFeatsTests(sa); } diff --git a/src/xrpld/app/paths/detail/BookStep.cpp b/src/xrpld/app/paths/detail/BookStep.cpp index 8d20a9900c..554d2525f5 100644 --- a/src/xrpld/app/paths/detail/BookStep.cpp +++ b/src/xrpld/app/paths/detail/BookStep.cpp @@ -743,7 +743,6 @@ BookStep::forEachOffer( FlowOfferStream offers( sb, afView, book_, sb.parentCloseTime(), counter, j_); - bool const flowCross = afView.rules().enabled(featureFlowCross); bool offerAttempted = false; std::optional ofrQ; auto execOffer = [&](auto& offer) { @@ -760,8 +759,8 @@ BookStep::forEachOffer( // Make sure offer owner has authorization to own IOUs from issuer. // An account can always own XRP or their own IOUs. - if (flowCross && (!isXRP(offer.issueIn().currency)) && - (offer.owner() != offer.issueIn().account)) + if (!isXRP(offer.issueIn().currency) && + offer.owner() != offer.issueIn().account) { auto const& issuerID = offer.issueIn().account; auto const issuer = afView.read(keylet::account(issuerID)); diff --git a/src/xrpld/app/tx/detail/CreateOffer.cpp b/src/xrpld/app/tx/detail/CreateOffer.cpp index 7ccecd7a47..9543a4fcd9 100644 --- a/src/xrpld/app/tx/detail/CreateOffer.cpp +++ b/src/xrpld/app/tx/detail/CreateOffer.cpp @@ -50,12 +50,6 @@ CreateOffer::preflight(PreflightContext const& ctx) !ctx.rules.enabled(featurePermissionedDEX)) return temDISABLED; - // Permissioned offers should use the PE (which must be enabled by - // featureFlowCross amendment) - if (ctx.rules.enabled(featurePermissionedDEX) && - !ctx.rules.enabled(featureFlowCross)) - return temDISABLED; - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) return ret; @@ -685,54 +679,6 @@ CreateOffer::step_account(OfferStream& stream, Taker const& taker) return false; } -// Fill as much of the offer as possible by consuming offers -// already on the books. Return the status and the amount of -// the offer to left unfilled. -std::pair -CreateOffer::takerCross( - Sandbox& sb, - Sandbox& sbCancel, - Amounts const& takerAmount) -{ - NetClock::time_point const when = sb.parentCloseTime(); - - beast::WrappedSink takerSink(j_, "Taker "); - - Taker taker( - cross_type_, - sb, - account_, - takerAmount, - ctx_.tx.getFlags(), - beast::Journal(takerSink)); - - // If the taker is unfunded before we begin crossing - // there's nothing to do - just return an error. - // - // We check this in preclaim, but when selling XRP - // charged fees can cause a user's available balance - // to go to 0 (by causing it to dip below the reserve) - // so we check this case again. - if (taker.unfunded()) - { - JLOG(j_.debug()) << "Not crossing: taker is unfunded."; - return {tecUNFUNDED_OFFER, takerAmount}; - } - - try - { - if (cross_type_ == CrossType::IouToIou) - return bridged_cross(taker, sb, sbCancel, when); - - return direct_cross(taker, sb, sbCancel, when); - } - catch (std::exception const& e) - { - JLOG(j_.error()) << "Exception during offer crossing: " << e.what(); - return {tecINTERNAL, taker.remaining_offer()}; - } -} - std::pair CreateOffer::flowCross( PaymentSandbox& psb, @@ -944,22 +890,11 @@ CreateOffer::cross( Amounts const& takerAmount, std::optional const& domainID) { - if (sb.rules().enabled(featureFlowCross)) - { - PaymentSandbox psbFlow{&sb}; - PaymentSandbox psbCancelFlow{&sbCancel}; - auto const ret = - flowCross(psbFlow, psbCancelFlow, takerAmount, domainID); - psbFlow.apply(sb); - psbCancelFlow.apply(sbCancel); - return ret; - } - - Sandbox sbTaker{&sb}; - Sandbox sbCancelTaker{&sbCancel}; - auto const ret = takerCross(sbTaker, sbCancelTaker, takerAmount); - sbTaker.apply(sb); - sbCancelTaker.apply(sbCancel); + PaymentSandbox psbFlow{&sb}; + PaymentSandbox psbCancelFlow{&sbCancel}; + auto const ret = flowCross(psbFlow, psbCancelFlow, takerAmount, domainID); + psbFlow.apply(sb); + psbCancelFlow.apply(sbCancel); return ret; } diff --git a/src/xrpld/app/tx/detail/CreateOffer.h b/src/xrpld/app/tx/detail/CreateOffer.h index 9b35062d8a..f995f4a5d6 100644 --- a/src/xrpld/app/tx/detail/CreateOffer.h +++ b/src/xrpld/app/tx/detail/CreateOffer.h @@ -109,13 +109,6 @@ private: bool reachedOfferCrossingLimit(Taker const& taker) const; - // Fill offer as much as possible by consuming offers already on the books, - // and adjusting account balances accordingly. - // - // Charges fees on top to taker. - std::pair - takerCross(Sandbox& sb, Sandbox& sbCancel, Amounts const& takerAmount); - // Use the payment flow code to perform offer crossing. std::pair flowCross( From 8bfaa7fe0aed0cfda56b27be96f0d6e2cde5dd71 Mon Sep 17 00:00:00 2001 From: Vlad <129996061+vvysokikh1@users.noreply.github.com> Date: Wed, 16 Jul 2025 12:47:54 +0100 Subject: [PATCH 079/244] test: Run unit tests regardless of 'Supported' amendment status (#5537) --- src/test/app/AMMClawback_test.cpp | 2 +- src/test/app/AMMExtended_test.cpp | 20 ++++++------ src/test/app/AMM_test.cpp | 38 ++++++++++++--------- src/test/app/AccountDelete_test.cpp | 8 ++--- src/test/app/AmendmentTable_test.cpp | 2 +- src/test/app/Batch_test.cpp | 2 +- src/test/app/Check_test.cpp | 2 +- src/test/app/Clawback_test.cpp | 2 +- src/test/app/Credentials_test.cpp | 2 +- src/test/app/CrossingLimits_test.cpp | 2 +- src/test/app/DID_test.cpp | 2 +- src/test/app/Delegate_test.cpp | 2 +- src/test/app/DeliverMin_test.cpp | 2 +- src/test/app/DepositAuth_test.cpp | 16 ++++----- src/test/app/Discrepancy_test.cpp | 2 +- src/test/app/EscrowToken_test.cpp | 2 +- src/test/app/Escrow_test.cpp | 4 +-- src/test/app/FixNFTokenPageLinks_test.cpp | 8 ++--- src/test/app/Flow_test.cpp | 8 ++--- src/test/app/Freeze_test.cpp | 2 +- src/test/app/LPTokenTransfer_test.cpp | 2 +- src/test/app/LedgerMaster_test.cpp | 2 +- src/test/app/MPToken_test.cpp | 4 +-- src/test/app/MultiSign_test.cpp | 4 +-- src/test/app/NFTokenAuth_test.cpp | 2 +- src/test/app/NFTokenBurn_test.cpp | 2 +- src/test/app/NFTokenDir_test.cpp | 2 +- src/test/app/NFToken_test.cpp | 2 +- src/test/app/Offer_test.cpp | 4 +-- src/test/app/Oracle_test.cpp | 4 +-- src/test/app/PayChan_test.cpp | 4 +-- src/test/app/PayStrand_test.cpp | 2 +- src/test/app/PermissionedDEX_test.cpp | 2 +- src/test/app/PermissionedDomains_test.cpp | 6 ++-- src/test/app/PseudoTx_test.cpp | 2 +- src/test/app/ReducedOffer_test.cpp | 20 ++++++------ src/test/app/SetAuth_test.cpp | 2 +- src/test/app/SetRegularKey_test.cpp | 8 ++--- src/test/app/SetTrust_test.cpp | 2 +- src/test/app/TheoreticalQuality_test.cpp | 4 +-- src/test/app/Ticket_test.cpp | 6 ++-- src/test/app/TrustAndBalance_test.cpp | 2 +- src/test/app/Vault_test.cpp | 40 ++++++++++------------- src/test/app/XChain_test.cpp | 4 +-- src/test/app/tx/apply_test.cpp | 4 +-- src/test/consensus/NegativeUNL_test.cpp | 6 ++-- src/test/jtx/AMMTest.h | 4 +-- src/test/jtx/Env.h | 8 ++--- src/test/jtx/Env_test.cpp | 8 ++--- src/test/jtx/impl/xchain_bridge.cpp | 2 +- src/test/ledger/BookDirs_test.cpp | 2 +- src/test/ledger/Directory_test.cpp | 2 +- src/test/ledger/Invariants_test.cpp | 2 +- src/test/ledger/PaymentSandbox_test.cpp | 2 +- src/test/rpc/AMMInfo_test.cpp | 2 +- src/test/rpc/AccountInfo_test.cpp | 2 +- src/test/rpc/AccountObjects_test.cpp | 2 +- src/test/rpc/AccountSet_test.cpp | 4 +-- src/test/rpc/BookChanges_test.cpp | 2 +- src/test/rpc/Book_test.cpp | 4 +-- src/test/rpc/GatewayBalances_test.cpp | 2 +- src/test/rpc/LedgerData_test.cpp | 4 +-- src/test/rpc/LedgerEntry_test.cpp | 2 +- src/test/rpc/NoRipple_test.cpp | 2 +- src/test/rpc/Subscribe_test.cpp | 4 +-- src/test/rpc/Transaction_test.cpp | 2 +- 66 files changed, 167 insertions(+), 165 deletions(-) diff --git a/src/test/app/AMMClawback_test.cpp b/src/test/app/AMMClawback_test.cpp index 83257f0755..9564911664 100644 --- a/src/test/app/AMMClawback_test.cpp +++ b/src/test/app/AMMClawback_test.cpp @@ -2443,7 +2443,7 @@ class AMMClawback_test : public beast::unit_test::suite run() override { FeatureBitset const all{ - jtx::supported_amendments() | fixAMMClawbackRounding}; + jtx::testable_amendments() | fixAMMClawbackRounding}; testInvalidRequest(); testFeatureDisabled(all - featureAMMClawback); diff --git a/src/test/app/AMMExtended_test.cpp b/src/test/app/AMMExtended_test.cpp index 70b2f30e1d..893e9e4f75 100644 --- a/src/test/app/AMMExtended_test.cpp +++ b/src/test/app/AMMExtended_test.cpp @@ -1447,7 +1447,7 @@ private: testOffers() { using namespace jtx; - FeatureBitset const all{supported_amendments()}; + FeatureBitset const all{testable_amendments()}; testRmFundedOffer(all); testRmFundedOffer(all - fixAMMv1_1 - fixAMMv1_3); testEnforceNoRipple(all); @@ -2823,8 +2823,8 @@ private: for (auto const withFix : {true, false}) { auto const feats = withFix - ? supported_amendments() - : supported_amendments() - FeatureBitset{fix1781}; + ? testable_amendments() + : testable_amendments() - FeatureBitset{fix1781}; // Payment path starting with XRP Env env(*this, feats); @@ -3789,7 +3789,7 @@ private: testFlow() { using namespace jtx; - FeatureBitset const all{supported_amendments()}; + FeatureBitset const all{testable_amendments()}; testFalseDry(all); testBookStep(all); @@ -3803,7 +3803,7 @@ private: testCrossingLimits() { using namespace jtx; - FeatureBitset const all{supported_amendments()}; + FeatureBitset const all{testable_amendments()}; testStepLimit(all); testStepLimit(all - fixAMMv1_1 - fixAMMv1_3); } @@ -3812,7 +3812,7 @@ private: testDeliverMin() { using namespace jtx; - FeatureBitset const all{supported_amendments()}; + FeatureBitset const all{testable_amendments()}; test_convert_all_of_an_asset(all); test_convert_all_of_an_asset(all - fixAMMv1_1 - fixAMMv1_3); } @@ -3820,7 +3820,7 @@ private: void testDepositAuth() { - auto const supported{jtx::supported_amendments()}; + auto const supported{jtx::testable_amendments()}; testPayment(supported - featureDepositPreauth); testPayment(supported); testPayIOU(); @@ -3830,7 +3830,7 @@ private: testFreeze() { using namespace test::jtx; - auto const sa = supported_amendments(); + auto const sa = testable_amendments(); testRippleState(sa); testGlobalFreeze(sa); testOffersWhenFrozen(sa); @@ -3840,7 +3840,7 @@ private: testMultisign() { using namespace jtx; - auto const all = supported_amendments(); + auto const all = testable_amendments(); testTxMultisign( all - featureMultiSignReserve - featureExpandedSignerList); @@ -3852,7 +3852,7 @@ private: testPayStrand() { using namespace jtx; - auto const all = supported_amendments(); + auto const all = testable_amendments(); testToStrand(all); testRIPD1373(all); diff --git a/src/test/app/AMM_test.cpp b/src/test/app/AMM_test.cpp index 2ee9e5f1f3..c89aebf813 100644 --- a/src/test/app/AMM_test.cpp +++ b/src/test/app/AMM_test.cpp @@ -66,7 +66,7 @@ private: {}, 0, {}, - {supported_amendments() | featureSingleAssetVault}); + {testable_amendments() | featureSingleAssetVault}); // XRP to IOU, without featureSingleAssetVault testAMM( @@ -77,7 +77,7 @@ private: {}, 0, {}, - {supported_amendments() - featureSingleAssetVault}); + {testable_amendments() - featureSingleAssetVault}); // IOU to IOU testAMM( @@ -1385,7 +1385,7 @@ private: testcase("Deposit"); using namespace jtx; - auto const all = supported_amendments(); + auto const all = testable_amendments(); // Equal deposit: 1000000 tokens, 10% of the current pool testAMM([&](AMM& ammAlice, Env& env) { @@ -1687,7 +1687,7 @@ private: testcase("Invalid Withdraw"); using namespace jtx; - auto const all = supported_amendments(); + auto const all = testable_amendments(); testAMM( [&](AMM& ammAlice, Env& env) { @@ -2267,7 +2267,7 @@ private: testcase("Withdraw"); using namespace jtx; - auto const all = supported_amendments(); + auto const all = testable_amendments(); // Equal withdrawal by Carol: 1000000 of tokens, 10% of the current // pool @@ -2688,7 +2688,7 @@ private: { testcase("Fee Vote"); using namespace jtx; - auto const all = supported_amendments(); + auto const all = testable_amendments(); // One vote sets fee to 1%. testAMM([&](AMM& ammAlice, Env& env) { @@ -4855,7 +4855,7 @@ private: { testcase("Amendment"); using namespace jtx; - FeatureBitset const all{supported_amendments()}; + FeatureBitset const all{testable_amendments()}; FeatureBitset const noAMM{all - featureAMM}; FeatureBitset const noNumber{all - fixUniversalNumber}; FeatureBitset const noAMMAndNumber{ @@ -5646,7 +5646,7 @@ private: testcase("Auto Delete"); using namespace jtx; - FeatureBitset const all{supported_amendments()}; + FeatureBitset const all{testable_amendments()}; { Env env( @@ -6306,7 +6306,7 @@ private: { testcase("Fix Default Inner Object"); using namespace jtx; - FeatureBitset const all{supported_amendments()}; + FeatureBitset const all{testable_amendments()}; auto test = [&](FeatureBitset features, TER const& err1, @@ -7043,7 +7043,7 @@ private: {{xrpPool, iouPool}}, 889, std::nullopt, - {jtx::supported_amendments() | fixAMMv1_1}); + {jtx::testable_amendments() | fixAMMv1_1}); } void @@ -7322,7 +7322,8 @@ private: } // If featureAMMClawback is enabled, AMMCreate is allowed for // clawback-enabled issuer. Clawback from the AMM Account is not - // allowed, which will return tecAMM_ACCOUNT. We can only use + // allowed, which will return tecAMM_ACCOUNT or tecPSEUDO_ACCOUNT, + // depending on whether SingleAssetVault is enabled. We can only use // AMMClawback transaction to claw back from AMM Account. else { @@ -7333,13 +7334,16 @@ private: // By doing this, we make the clawback transaction's Amount field's // subfield `issuer` to be the AMM account, which means // we are clawing back from an AMM account. This should return an - // tecAMM_ACCOUNT error because regular Clawback transaction is not + // error because regular Clawback transaction is not // allowed for clawing back from an AMM account. Please notice the // `issuer` subfield represents the account being clawed back, which // is confusing. + auto const error = features[featureSingleAssetVault] + ? ter{tecPSEUDO_ACCOUNT} + : ter{tecAMM_ACCOUNT}; Issue usd(USD.issue().currency, amm.ammAccount()); auto amount = amountFromString(usd, "10"); - env(claw(gw, amount), ter(tecAMM_ACCOUNT)); + env(claw(gw, amount), error); } } @@ -7513,10 +7517,10 @@ private: }; testCase( - "tecDUPLICATE", supported_amendments() - featureSingleAssetVault); + "tecDUPLICATE", testable_amendments() - featureSingleAssetVault); testCase( "terADDRESS_COLLISION", - supported_amendments() | featureSingleAssetVault); + testable_amendments() | featureSingleAssetVault); } void @@ -7894,7 +7898,7 @@ private: void run() override { - FeatureBitset const all{jtx::supported_amendments()}; + FeatureBitset const all{jtx::testable_amendments()}; testInvalidInstance(); testInstanceCreate(); testInvalidDeposit(all); @@ -7945,6 +7949,8 @@ private: testLPTokenBalance(all - fixAMMv1_3); testLPTokenBalance(all - fixAMMv1_1 - fixAMMv1_3); testAMMClawback(all); + testAMMClawback(all - featureSingleAssetVault); + testAMMClawback(all - featureAMMClawback - featureSingleAssetVault); testAMMClawback(all - featureAMMClawback); testAMMClawback(all - fixAMMv1_1 - fixAMMv1_3 - featureAMMClawback); testAMMDepositWithFrozenAssets(all); diff --git a/src/test/app/AccountDelete_test.cpp b/src/test/app/AccountDelete_test.cpp index 1ac0256dcb..f7c4ddc509 100644 --- a/src/test/app/AccountDelete_test.cpp +++ b/src/test/app/AccountDelete_test.cpp @@ -292,7 +292,7 @@ public: // o New-styled PayChannels with the backlink. // So we start the test using old-style PayChannels. Then we pass // the amendment to get new-style PayChannels. - Env env{*this, supported_amendments() - fixPayChanRecipientOwnerDir}; + Env env{*this, testable_amendments() - fixPayChanRecipientOwnerDir}; Account const alice("alice"); Account const becky("becky"); Account const gw("gw"); @@ -461,7 +461,7 @@ public: // We need an old-style PayChannel that doesn't provide a backlink // from the destination. So don't enable the amendment with that fix. - Env env{*this, supported_amendments() - fixPayChanRecipientOwnerDir}; + Env env{*this, testable_amendments() - fixPayChanRecipientOwnerDir}; Account const alice("alice"); Account const becky("becky"); @@ -536,7 +536,7 @@ public: testcase("Amendment enable"); - Env env{*this, supported_amendments() - featureDeletableAccounts}; + Env env{*this, testable_amendments() - featureDeletableAccounts}; Account const alice("alice"); Account const becky("becky"); @@ -1128,7 +1128,7 @@ public: Account const becky{"becky"}; Account const carol{"carol"}; - Env env{*this, supported_amendments() - featureCredentials}; + Env env{*this, testable_amendments() - featureCredentials}; env.fund(XRP(100000), alice, becky, carol); env.close(); diff --git a/src/test/app/AmendmentTable_test.cpp b/src/test/app/AmendmentTable_test.cpp index 5ba820da95..407b2fafe1 100644 --- a/src/test/app/AmendmentTable_test.cpp +++ b/src/test/app/AmendmentTable_test.cpp @@ -1288,7 +1288,7 @@ public: void run() override { - FeatureBitset const all{test::jtx::supported_amendments()}; + FeatureBitset const all{test::jtx::testable_amendments()}; FeatureBitset const fixMajorityCalc{fixAmendmentMajorityCalc}; testConstruct(); diff --git a/src/test/app/Batch_test.cpp b/src/test/app/Batch_test.cpp index 6ce95c56d0..c8fcc4092b 100644 --- a/src/test/app/Batch_test.cpp +++ b/src/test/app/Batch_test.cpp @@ -4164,7 +4164,7 @@ public: run() override { using namespace test::jtx; - auto const sa = supported_amendments(); + auto const sa = testable_amendments(); testWithFeats(sa); } }; diff --git a/src/test/app/Check_test.cpp b/src/test/app/Check_test.cpp index 99b0c8dba3..be38b22313 100644 --- a/src/test/app/Check_test.cpp +++ b/src/test/app/Check_test.cpp @@ -2720,7 +2720,7 @@ public: run() override { using namespace test::jtx; - auto const sa = supported_amendments(); + auto const sa = testable_amendments(); testWithFeats(sa - featureCheckCashMakesTrustLine); testWithFeats(sa - disallowIncoming); testWithFeats(sa); diff --git a/src/test/app/Clawback_test.cpp b/src/test/app/Clawback_test.cpp index d41f6de556..adfe80133a 100644 --- a/src/test/app/Clawback_test.cpp +++ b/src/test/app/Clawback_test.cpp @@ -949,7 +949,7 @@ public: run() override { using namespace test::jtx; - FeatureBitset const all{supported_amendments()}; + FeatureBitset const all{testable_amendments()}; testWithFeats(all - featureMPTokensV1); testWithFeats(all); diff --git a/src/test/app/Credentials_test.cpp b/src/test/app/Credentials_test.cpp index fa6505e926..54826cbb12 100644 --- a/src/test/app/Credentials_test.cpp +++ b/src/test/app/Credentials_test.cpp @@ -1090,7 +1090,7 @@ struct Credentials_test : public beast::unit_test::suite run() override { using namespace test::jtx; - FeatureBitset const all{supported_amendments()}; + FeatureBitset const all{testable_amendments()}; testSuccessful(all); testCredentialsDelete(all); testCreateFailed(all); diff --git a/src/test/app/CrossingLimits_test.cpp b/src/test/app/CrossingLimits_test.cpp index ff4650a1f8..6e76936199 100644 --- a/src/test/app/CrossingLimits_test.cpp +++ b/src/test/app/CrossingLimits_test.cpp @@ -522,7 +522,7 @@ public: testOfferOverflow(features); }; using namespace jtx; - auto const sa = supported_amendments(); + auto const sa = testable_amendments(); testAll(sa); testAll(sa - featureFlowSortStrands); testAll(sa - featurePermissionedDEX); diff --git a/src/test/app/DID_test.cpp b/src/test/app/DID_test.cpp index c885ed0861..1f28af2d6a 100644 --- a/src/test/app/DID_test.cpp +++ b/src/test/app/DID_test.cpp @@ -390,7 +390,7 @@ struct DID_test : public beast::unit_test::suite run() override { using namespace test::jtx; - FeatureBitset const all{supported_amendments()}; + FeatureBitset const all{testable_amendments()}; FeatureBitset const emptyDID{fixEmptyDID}; testEnabled(all); testAccountReserve(all); diff --git a/src/test/app/Delegate_test.cpp b/src/test/app/Delegate_test.cpp index ca13e4f4cd..179532140d 100644 --- a/src/test/app/Delegate_test.cpp +++ b/src/test/app/Delegate_test.cpp @@ -31,7 +31,7 @@ class Delegate_test : public beast::unit_test::suite testcase("test featurePermissionDelegation not enabled"); using namespace jtx; - Env env{*this, supported_amendments() - featurePermissionDelegation}; + Env env{*this, testable_amendments() - featurePermissionDelegation}; Account gw{"gateway"}; Account alice{"alice"}; Account bob{"bob"}; diff --git a/src/test/app/DeliverMin_test.cpp b/src/test/app/DeliverMin_test.cpp index 83d7c4a1b9..a9373fb002 100644 --- a/src/test/app/DeliverMin_test.cpp +++ b/src/test/app/DeliverMin_test.cpp @@ -142,7 +142,7 @@ public: run() override { using namespace jtx; - auto const sa = supported_amendments(); + auto const sa = testable_amendments(); test_convert_all_of_an_asset(sa - featurePermissionedDEX); test_convert_all_of_an_asset(sa); } diff --git a/src/test/app/DepositAuth_test.cpp b/src/test/app/DepositAuth_test.cpp index 6f314e3a79..ffe8c4448b 100644 --- a/src/test/app/DepositAuth_test.cpp +++ b/src/test/app/DepositAuth_test.cpp @@ -53,7 +53,7 @@ struct DepositAuth_test : public beast::unit_test::suite { // featureDepositAuth is disabled. - Env env(*this, supported_amendments() - featureDepositAuth); + Env env(*this, testable_amendments() - featureDepositAuth); env.fund(XRP(10000), alice); // Note that, to support old behavior, invalid flags are ignored. @@ -352,27 +352,27 @@ struct DepositAuth_test : public beast::unit_test::suite auto const noRippleNext = i & 0x2; auto const withDepositAuth = i & 0x4; testIssuer( - supported_amendments() | featureDepositAuth, + testable_amendments() | featureDepositAuth, noRipplePrev, noRippleNext, withDepositAuth); if (!withDepositAuth) testIssuer( - supported_amendments() - featureDepositAuth, + testable_amendments() - featureDepositAuth, noRipplePrev, noRippleNext, withDepositAuth); testNonIssuer( - supported_amendments() | featureDepositAuth, + testable_amendments() | featureDepositAuth, noRipplePrev, noRippleNext, withDepositAuth); if (!withDepositAuth) testNonIssuer( - supported_amendments() - featureDepositAuth, + testable_amendments() - featureDepositAuth, noRipplePrev, noRippleNext, withDepositAuth); @@ -420,7 +420,7 @@ struct DepositPreauth_test : public beast::unit_test::suite Account const becky{"becky"}; { // featureDepositPreauth is disabled. - Env env(*this, supported_amendments() - featureDepositPreauth); + Env env(*this, testable_amendments() - featureDepositPreauth); env.fund(XRP(10000), alice, becky); env.close(); @@ -830,7 +830,7 @@ struct DepositPreauth_test : public beast::unit_test::suite { testcase("Payment failure with disabled credentials rule."); - Env env(*this, supported_amendments() - featureCredentials); + Env env(*this, testable_amendments() - featureCredentials); env.fund(XRP(5000), issuer, bob, alice); env.close(); @@ -1563,7 +1563,7 @@ struct DepositPreauth_test : public beast::unit_test::suite { testEnable(); testInvalid(); - auto const supported{jtx::supported_amendments()}; + auto const supported{jtx::testable_amendments()}; testPayment(supported - featureDepositPreauth - featureCredentials); testPayment(supported - featureDepositPreauth); testPayment(supported - featureCredentials); diff --git a/src/test/app/Discrepancy_test.cpp b/src/test/app/Discrepancy_test.cpp index ab8d5a605f..da41969885 100644 --- a/src/test/app/Discrepancy_test.cpp +++ b/src/test/app/Discrepancy_test.cpp @@ -146,7 +146,7 @@ public: run() override { using namespace test::jtx; - auto const sa = supported_amendments(); + auto const sa = testable_amendments(); testXRPDiscrepancy(sa - featurePermissionedDEX); testXRPDiscrepancy(sa); } diff --git a/src/test/app/EscrowToken_test.cpp b/src/test/app/EscrowToken_test.cpp index 6ba8c48c93..e81064c825 100644 --- a/src/test/app/EscrowToken_test.cpp +++ b/src/test/app/EscrowToken_test.cpp @@ -3875,7 +3875,7 @@ public: run() override { using namespace test::jtx; - FeatureBitset const all{supported_amendments()}; + FeatureBitset const all{testable_amendments()}; testIOUWithFeats(all); testMPTWithFeats(all); } diff --git a/src/test/app/Escrow_test.cpp b/src/test/app/Escrow_test.cpp index 21ef70a86e..3eaf0f13ea 100644 --- a/src/test/app/Escrow_test.cpp +++ b/src/test/app/Escrow_test.cpp @@ -294,7 +294,7 @@ struct Escrow_test : public beast::unit_test::suite { testcase("Implied Finish Time (without fix1571)"); - Env env(*this, supported_amendments() - fix1571); + Env env(*this, testable_amendments() - fix1571); auto const baseFee = env.current()->fees().base; env.fund(XRP(5000), "alice", "bob", "carol"); env.close(); @@ -1715,7 +1715,7 @@ public: run() override { using namespace test::jtx; - FeatureBitset const all{supported_amendments()}; + FeatureBitset const all{testable_amendments()}; testWithFeats(all); testWithFeats(all - featureTokenEscrow); } diff --git a/src/test/app/FixNFTokenPageLinks_test.cpp b/src/test/app/FixNFTokenPageLinks_test.cpp index f87d70aacf..a54e889960 100644 --- a/src/test/app/FixNFTokenPageLinks_test.cpp +++ b/src/test/app/FixNFTokenPageLinks_test.cpp @@ -139,7 +139,7 @@ class FixNFTokenPageLinks_test : public beast::unit_test::suite { // Verify that the LedgerStateFix transaction is disabled // without the fixNFTokenPageLinks amendment. - Env env{*this, supported_amendments() - fixNFTokenPageLinks}; + Env env{*this, testable_amendments() - fixNFTokenPageLinks}; env.fund(XRP(1000), alice); auto const linkFixFee = drops(env.current()->fees().increment); @@ -148,7 +148,7 @@ class FixNFTokenPageLinks_test : public beast::unit_test::suite ter(temDISABLED)); } - Env env{*this, supported_amendments()}; + Env env{*this, testable_amendments()}; env.fund(XRP(1000), alice); std::uint32_t const ticketSeq = env.seq(alice); env(ticket::create(alice, 1)); @@ -206,7 +206,7 @@ class FixNFTokenPageLinks_test : public beast::unit_test::suite Account const alice("alice"); - Env env{*this, supported_amendments()}; + Env env{*this, testable_amendments()}; env.fund(XRP(1000), alice); // These cases all return the same TER code, but they exercise @@ -259,7 +259,7 @@ class FixNFTokenPageLinks_test : public beast::unit_test::suite Account const carol("carol"); Account const daria("daria"); - Env env{*this, supported_amendments() - fixNFTokenPageLinks}; + Env env{*this, testable_amendments() - fixNFTokenPageLinks}; env.fund(XRP(1000), alice, bob, carol, daria); //********************************************************************** diff --git a/src/test/app/Flow_test.cpp b/src/test/app/Flow_test.cpp index 0953b6f44f..0f40d70b57 100644 --- a/src/test/app/Flow_test.cpp +++ b/src/test/app/Flow_test.cpp @@ -1194,8 +1194,8 @@ struct Flow_test : public beast::unit_test::suite { auto const feats = [&withFix]() -> FeatureBitset { if (withFix) - return supported_amendments(); - return supported_amendments() - FeatureBitset{fix1781}; + return testable_amendments(); + return testable_amendments() - FeatureBitset{fix1781}; }(); { // Payment path starting with XRP @@ -1332,7 +1332,7 @@ struct Flow_test : public beast::unit_test::suite testRIPD1449(); using namespace jtx; - auto const sa = supported_amendments(); + auto const sa = testable_amendments(); testWithFeats(sa - featurePermissionedDEX); testWithFeats(sa); testEmptyStrand(sa); @@ -1345,7 +1345,7 @@ struct Flow_manual_test : public Flow_test run() override { using namespace jtx; - auto const all = supported_amendments(); + auto const all = testable_amendments(); FeatureBitset const f1513{fix1513}; FeatureBitset const permDex{featurePermissionedDEX}; diff --git a/src/test/app/Freeze_test.cpp b/src/test/app/Freeze_test.cpp index 8002aa2a3b..3bde3a30af 100644 --- a/src/test/app/Freeze_test.cpp +++ b/src/test/app/Freeze_test.cpp @@ -2094,7 +2094,7 @@ public: testNFTOffersWhenFreeze(features); }; using namespace test::jtx; - auto const sa = supported_amendments(); + auto const sa = testable_amendments(); testAll( sa - featureDeepFreeze - featurePermissionedDEX - fixEnforceNFTokenTrustlineV2); diff --git a/src/test/app/LPTokenTransfer_test.cpp b/src/test/app/LPTokenTransfer_test.cpp index 96e621dccf..e95e974547 100644 --- a/src/test/app/LPTokenTransfer_test.cpp +++ b/src/test/app/LPTokenTransfer_test.cpp @@ -467,7 +467,7 @@ public: void run() override { - FeatureBitset const all{jtx::supported_amendments()}; + FeatureBitset const all{jtx::testable_amendments()}; for (auto const features : {all, all - fixFrozenLPTokenTransfer}) { diff --git a/src/test/app/LedgerMaster_test.cpp b/src/test/app/LedgerMaster_test.cpp index 19664616b1..828e4b09c2 100644 --- a/src/test/app/LedgerMaster_test.cpp +++ b/src/test/app/LedgerMaster_test.cpp @@ -124,7 +124,7 @@ public: run() override { using namespace test::jtx; - FeatureBitset const all{supported_amendments()}; + FeatureBitset const all{testable_amendments()}; testWithFeats(all); } diff --git a/src/test/app/MPToken_test.cpp b/src/test/app/MPToken_test.cpp index deee217aa8..46b64e40f2 100644 --- a/src/test/app/MPToken_test.cpp +++ b/src/test/app/MPToken_test.cpp @@ -1428,7 +1428,7 @@ class MPToken_test : public beast::unit_test::suite testcase("DepositPreauth disabled featureCredentials"); { - Env env(*this, supported_amendments() - featureCredentials); + Env env(*this, testable_amendments() - featureCredentials); std::string const credIdx = "D007AE4B6E1274B4AF872588267B810C2F82716726351D1C7D38D3E5499FC6" @@ -2293,7 +2293,7 @@ public: run() override { using namespace test::jtx; - FeatureBitset const all{supported_amendments()}; + FeatureBitset const all{testable_amendments()}; // MPTokenIssuanceCreate testCreateValidation(all - featureSingleAssetVault); diff --git a/src/test/app/MultiSign_test.cpp b/src/test/app/MultiSign_test.cpp index 8c1880c1a0..571ec33417 100644 --- a/src/test/app/MultiSign_test.cpp +++ b/src/test/app/MultiSign_test.cpp @@ -1478,7 +1478,7 @@ public: Account const cheri{"cheri", KeyType::secp256k1}; Account const daria{"daria", KeyType::ed25519}; - Env env{*this, supported_amendments() - featureMultiSignReserve}; + Env env{*this, testable_amendments() - featureMultiSignReserve}; env.fund(XRP(1000), alice, becky, cheri, daria); env.close(); @@ -1729,7 +1729,7 @@ public: run() override { using namespace jtx; - auto const all = supported_amendments(); + auto const all = testable_amendments(); // The reserve required on a signer list changes based on // featureMultiSignReserve. Limits on the number of signers diff --git a/src/test/app/NFTokenAuth_test.cpp b/src/test/app/NFTokenAuth_test.cpp index 9558a03f7a..1a59dc579a 100644 --- a/src/test/app/NFTokenAuth_test.cpp +++ b/src/test/app/NFTokenAuth_test.cpp @@ -599,7 +599,7 @@ public: run() override { using namespace test::jtx; - static FeatureBitset const all{supported_amendments()}; + static FeatureBitset const all{testable_amendments()}; static std::array const features = { all - fixEnforceNFTokenTrustlineV2, all}; diff --git a/src/test/app/NFTokenBurn_test.cpp b/src/test/app/NFTokenBurn_test.cpp index a970b11789..21b0a1ffd8 100644 --- a/src/test/app/NFTokenBurn_test.cpp +++ b/src/test/app/NFTokenBurn_test.cpp @@ -1385,7 +1385,7 @@ protected: run(std::uint32_t instance, bool last = false) { using namespace test::jtx; - static FeatureBitset const all{supported_amendments()}; + static FeatureBitset const all{testable_amendments()}; static FeatureBitset const fixNFTV1_2{fixNonFungibleTokensV1_2}; static FeatureBitset const fixNFTDir{fixNFTokenDirV1}; static FeatureBitset const fixNFTRemint{fixNFTokenRemint}; diff --git a/src/test/app/NFTokenDir_test.cpp b/src/test/app/NFTokenDir_test.cpp index fe21e02739..df40781590 100644 --- a/src/test/app/NFTokenDir_test.cpp +++ b/src/test/app/NFTokenDir_test.cpp @@ -1100,7 +1100,7 @@ public: run() override { using namespace test::jtx; - FeatureBitset const all{supported_amendments()}; + FeatureBitset const all{testable_amendments()}; FeatureBitset const fixNFTDir{ fixNFTokenDirV1, featureNonFungibleTokensV1_1}; diff --git a/src/test/app/NFToken_test.cpp b/src/test/app/NFToken_test.cpp index 41bcc673d5..b79ebf3c40 100644 --- a/src/test/app/NFToken_test.cpp +++ b/src/test/app/NFToken_test.cpp @@ -8075,7 +8075,7 @@ public: run(std::uint32_t instance, bool last = false) { using namespace test::jtx; - static FeatureBitset const all{supported_amendments()}; + static FeatureBitset const all{testable_amendments()}; static FeatureBitset const fixNFTDir{fixNFTokenDirV1}; static std::array const feats{ diff --git a/src/test/app/Offer_test.cpp b/src/test/app/Offer_test.cpp index 1c877fedef..96f68fb2ad 100644 --- a/src/test/app/Offer_test.cpp +++ b/src/test/app/Offer_test.cpp @@ -5365,7 +5365,7 @@ public: run(std::uint32_t instance, bool last = false) { using namespace jtx; - static FeatureBitset const all{supported_amendments()}; + static FeatureBitset const all{testable_amendments()}; static FeatureBitset const takerDryOffer{fixTakerDryOfferRemoval}; static FeatureBitset const rmSmallIncreasedQOffers{ fixRmSmallIncreasedQOffers}; @@ -5449,7 +5449,7 @@ class Offer_manual_test : public OfferBaseUtil_test run() override { using namespace jtx; - FeatureBitset const all{supported_amendments()}; + FeatureBitset const all{testable_amendments()}; FeatureBitset const f1513{fix1513}; FeatureBitset const immediateOfferKilled{featureImmediateOfferKilled}; FeatureBitset const takerDryOffer{fixTakerDryOfferRemoval}; diff --git a/src/test/app/Oracle_test.cpp b/src/test/app/Oracle_test.cpp index a968970395..aaa7f9a746 100644 --- a/src/test/app/Oracle_test.cpp +++ b/src/test/app/Oracle_test.cpp @@ -783,7 +783,7 @@ private: testcase("Amendment"); using namespace jtx; - auto const features = supported_amendments() - featurePriceOracle; + auto const features = testable_amendments() - featurePriceOracle; Account const owner("owner"); Env env(*this, features); auto const baseFee = @@ -806,7 +806,7 @@ public: run() override { using namespace jtx; - auto const all = supported_amendments(); + auto const all = testable_amendments(); testInvalidSet(); testInvalidDelete(); testCreate(); diff --git a/src/test/app/PayChan_test.cpp b/src/test/app/PayChan_test.cpp index 7cb1542453..3a5d3d6ff5 100644 --- a/src/test/app/PayChan_test.cpp +++ b/src/test/app/PayChan_test.cpp @@ -1035,7 +1035,7 @@ struct PayChan_test : public beast::unit_test::suite { // Credentials amendment not enabled - Env env(*this, supported_amendments() - featureCredentials); + Env env(*this, testable_amendments() - featureCredentials); env.fund(XRP(5000), "alice", "bob"); env.close(); @@ -2344,7 +2344,7 @@ public: run() override { using namespace test::jtx; - FeatureBitset const all{supported_amendments()}; + FeatureBitset const all{testable_amendments()}; testWithFeats(all - disallowIncoming); testWithFeats(all); testDepositAuthCreds(); diff --git a/src/test/app/PayStrand_test.cpp b/src/test/app/PayStrand_test.cpp index fe9c11a318..936fe403d4 100644 --- a/src/test/app/PayStrand_test.cpp +++ b/src/test/app/PayStrand_test.cpp @@ -1267,7 +1267,7 @@ struct PayStrand_test : public beast::unit_test::suite run() override { using namespace jtx; - auto const sa = supported_amendments(); + auto const sa = testable_amendments(); testToStrand(sa - featurePermissionedDEX); testToStrand(sa); diff --git a/src/test/app/PermissionedDEX_test.cpp b/src/test/app/PermissionedDEX_test.cpp index f2d40140cd..3fd3a35f45 100644 --- a/src/test/app/PermissionedDEX_test.cpp +++ b/src/test/app/PermissionedDEX_test.cpp @@ -1551,7 +1551,7 @@ public: void run() override { - FeatureBitset const all{jtx::supported_amendments()}; + FeatureBitset const all{jtx::testable_amendments()}; // Test domain offer (w/o hyrbid) testOfferCreate(all); diff --git a/src/test/app/PermissionedDomains_test.cpp b/src/test/app/PermissionedDomains_test.cpp index e33a88fa08..31e34ccf17 100644 --- a/src/test/app/PermissionedDomains_test.cpp +++ b/src/test/app/PermissionedDomains_test.cpp @@ -53,9 +53,9 @@ exceptionExpected(Env& env, Json::Value const& jv) class PermissionedDomains_test : public beast::unit_test::suite { FeatureBitset withoutFeature_{ - supported_amendments() - featurePermissionedDomains}; + testable_amendments() - featurePermissionedDomains}; FeatureBitset withFeature_{ - supported_amendments() // + testable_amendments() // | featurePermissionedDomains | featureCredentials}; // Verify that each tx type can execute if the feature is enabled. @@ -81,7 +81,7 @@ class PermissionedDomains_test : public beast::unit_test::suite void testCredentialsDisabled() { - auto amendments = supported_amendments(); + auto amendments = testable_amendments(); amendments.set(featurePermissionedDomains); amendments.reset(featureCredentials); testcase("Credentials disabled"); diff --git a/src/test/app/PseudoTx_test.cpp b/src/test/app/PseudoTx_test.cpp index d96828a50b..53adf795c2 100644 --- a/src/test/app/PseudoTx_test.cpp +++ b/src/test/app/PseudoTx_test.cpp @@ -115,7 +115,7 @@ struct PseudoTx_test : public beast::unit_test::suite run() override { using namespace test::jtx; - FeatureBitset const all{supported_amendments()}; + FeatureBitset const all{testable_amendments()}; FeatureBitset const xrpFees{featureXRPFees}; testPrevented(all - featureXRPFees); diff --git a/src/test/app/ReducedOffer_test.cpp b/src/test/app/ReducedOffer_test.cpp index 546a07d93e..5142aaab0e 100644 --- a/src/test/app/ReducedOffer_test.cpp +++ b/src/test/app/ReducedOffer_test.cpp @@ -82,8 +82,8 @@ public: // Make one test run without fixReducedOffersV1 and one with. for (FeatureBitset features : - {supported_amendments() - fixReducedOffersV1, - supported_amendments() | fixReducedOffersV1}) + {testable_amendments() - fixReducedOffersV1, + testable_amendments() | fixReducedOffersV1}) { Env env{*this, features}; @@ -238,8 +238,8 @@ public: // Make one test run without fixReducedOffersV1 and one with. for (FeatureBitset features : - {supported_amendments() - fixReducedOffersV1, - supported_amendments() | fixReducedOffersV1}) + {testable_amendments() - fixReducedOffersV1, + testable_amendments() | fixReducedOffersV1}) { // Make sure none of the offers we generate are under funded. Env env{*this, features}; @@ -401,8 +401,8 @@ public: // Make one test run without fixReducedOffersV1 and one with. for (FeatureBitset features : - {supported_amendments() - fixReducedOffersV1, - supported_amendments() | fixReducedOffersV1}) + {testable_amendments() - fixReducedOffersV1, + testable_amendments() | fixReducedOffersV1}) { Env env{*this, features}; @@ -509,8 +509,8 @@ public: // Make one test run without fixReducedOffersV1 and one with. for (FeatureBitset features : - {supported_amendments() - fixReducedOffersV1, - supported_amendments() | fixReducedOffersV1}) + {testable_amendments() - fixReducedOffersV1, + testable_amendments() | fixReducedOffersV1}) { Env env{*this, features}; @@ -639,8 +639,8 @@ public: // Make one test run without fixReducedOffersV2 and one with. for (FeatureBitset features : - {supported_amendments() - fixReducedOffersV2, - supported_amendments() | fixReducedOffersV2}) + {testable_amendments() - fixReducedOffersV2, + testable_amendments() | fixReducedOffersV2}) { // Make sure none of the offers we generate are under funded. Env env{*this, features}; diff --git a/src/test/app/SetAuth_test.cpp b/src/test/app/SetAuth_test.cpp index 28a5b3be91..4c63560770 100644 --- a/src/test/app/SetAuth_test.cpp +++ b/src/test/app/SetAuth_test.cpp @@ -74,7 +74,7 @@ struct SetAuth_test : public beast::unit_test::suite run() override { using namespace jtx; - auto const sa = supported_amendments(); + auto const sa = testable_amendments(); testAuth(sa - featurePermissionedDEX); testAuth(sa); } diff --git a/src/test/app/SetRegularKey_test.cpp b/src/test/app/SetRegularKey_test.cpp index 6a3a5ff2a9..78b75fc458 100644 --- a/src/test/app/SetRegularKey_test.cpp +++ b/src/test/app/SetRegularKey_test.cpp @@ -32,7 +32,7 @@ public: using namespace test::jtx; testcase("Set regular key"); - Env env{*this, supported_amendments() - fixMasterKeyAsRegularKey}; + Env env{*this, testable_amendments() - fixMasterKeyAsRegularKey}; Account const alice("alice"); Account const bob("bob"); env.fund(XRP(10000), alice, bob); @@ -72,7 +72,7 @@ public: using namespace test::jtx; testcase("Set regular key"); - Env env{*this, supported_amendments() | fixMasterKeyAsRegularKey}; + Env env{*this, testable_amendments() | fixMasterKeyAsRegularKey}; Account const alice("alice"); Account const bob("bob"); env.fund(XRP(10000), alice, bob); @@ -109,7 +109,7 @@ public: // See https://ripplelabs.atlassian.net/browse/RIPD-1721. testcase( "Set regular key to master key (before fixMasterKeyAsRegularKey)"); - Env env{*this, supported_amendments() - fixMasterKeyAsRegularKey}; + Env env{*this, testable_amendments() - fixMasterKeyAsRegularKey}; Account const alice("alice"); env.fund(XRP(10000), alice); @@ -139,7 +139,7 @@ public: testcase( "Set regular key to master key (after fixMasterKeyAsRegularKey)"); - Env env{*this, supported_amendments() | fixMasterKeyAsRegularKey}; + Env env{*this, testable_amendments() | fixMasterKeyAsRegularKey}; Account const alice("alice"); env.fund(XRP(10000), alice); diff --git a/src/test/app/SetTrust_test.cpp b/src/test/app/SetTrust_test.cpp index 9b4048bf9c..18457b5faf 100644 --- a/src/test/app/SetTrust_test.cpp +++ b/src/test/app/SetTrust_test.cpp @@ -648,7 +648,7 @@ public: run() override { using namespace test::jtx; - auto const sa = supported_amendments(); + auto const sa = testable_amendments(); testWithFeats(sa - disallowIncoming); testWithFeats(sa); } diff --git a/src/test/app/TheoreticalQuality_test.cpp b/src/test/app/TheoreticalQuality_test.cpp index dcffd810ed..a8713ec69a 100644 --- a/src/test/app/TheoreticalQuality_test.cpp +++ b/src/test/app/TheoreticalQuality_test.cpp @@ -359,7 +359,7 @@ public: // Tests are sped up by a factor of 2 if a new environment isn't created // on every iteration. - Env env(*this, supported_amendments()); + Env env(*this, testable_amendments()); for (int i = 0; i < numTestIterations; ++i) { auto const iterAsStr = std::to_string(i); @@ -434,7 +434,7 @@ public: // Speed up tests by creating the environment outside the loop // (factor of 2 speedup on the DirectStep tests) - Env env(*this, supported_amendments()); + Env env(*this, testable_amendments()); for (int i = 0; i < numTestIterations; ++i) { auto const iterAsStr = std::to_string(i); diff --git a/src/test/app/Ticket_test.cpp b/src/test/app/Ticket_test.cpp index dd83e3036e..f8ac64679e 100644 --- a/src/test/app/Ticket_test.cpp +++ b/src/test/app/Ticket_test.cpp @@ -385,7 +385,7 @@ class Ticket_test : public beast::unit_test::suite testcase("Feature Not Enabled"); using namespace test::jtx; - Env env{*this, supported_amendments() - featureTicketBatch}; + Env env{*this, testable_amendments() - featureTicketBatch}; env(ticket::create(env.master, 1), ter(temDISABLED)); env.close(); @@ -933,7 +933,7 @@ class Ticket_test : public beast::unit_test::suite // Try the test without featureTicketBatch enabled. using namespace test::jtx; { - Env env{*this, supported_amendments() - featureTicketBatch}; + Env env{*this, testable_amendments() - featureTicketBatch}; Account alice{"alice"}; env.fund(XRP(10000), alice); @@ -957,7 +957,7 @@ class Ticket_test : public beast::unit_test::suite } // Try the test with featureTicketBatch enabled. { - Env env{*this, supported_amendments()}; + Env env{*this, testable_amendments()}; Account alice{"alice"}; env.fund(XRP(10000), alice); diff --git a/src/test/app/TrustAndBalance_test.cpp b/src/test/app/TrustAndBalance_test.cpp index d57e8ec418..f39d9e0313 100644 --- a/src/test/app/TrustAndBalance_test.cpp +++ b/src/test/app/TrustAndBalance_test.cpp @@ -480,7 +480,7 @@ public: }; using namespace test::jtx; - auto const sa = supported_amendments(); + auto const sa = testable_amendments(); testWithFeatures(sa - featurePermissionedDEX); testWithFeatures(sa); } diff --git a/src/test/app/Vault_test.cpp b/src/test/app/Vault_test.cpp index 25e486f1a2..ce97eff24f 100644 --- a/src/test/app/Vault_test.cpp +++ b/src/test/app/Vault_test.cpp @@ -340,7 +340,7 @@ class Vault_test : public beast::unit_test::suite Account const& owner, Account const& depositor, Account const& charlie)> setup) { - Env env{*this, supported_amendments() | featureSingleAssetVault}; + Env env{*this, testable_amendments() | featureSingleAssetVault}; Account issuer{"issuer"}; Account owner{"owner"}; Account depositor{"depositor"}; @@ -416,7 +416,7 @@ class Vault_test : public beast::unit_test::suite struct CaseArgs { FeatureBitset features = - supported_amendments() | featureSingleAssetVault; + testable_amendments() | featureSingleAssetVault; }; auto testCase = [&, this]( @@ -494,7 +494,7 @@ class Vault_test : public beast::unit_test::suite env(tx, ter{temDISABLED}); } }, - {.features = supported_amendments() - featureSingleAssetVault}); + {.features = testable_amendments() - featureSingleAssetVault}); testCase([&](Env& env, Account const& issuer, @@ -625,7 +625,7 @@ class Vault_test : public beast::unit_test::suite env(tx, ter{temDISABLED}); } }, - {.features = (supported_amendments() | featureSingleAssetVault) - + {.features = (testable_amendments() | featureSingleAssetVault) - featurePermissionedDomains}); testCase([&](Env& env, @@ -950,7 +950,7 @@ class Vault_test : public beast::unit_test::suite Account const& depositor, Asset const& asset, Vault& vault)> test) { - Env env{*this, supported_amendments() | featureSingleAssetVault}; + Env env{*this, testable_amendments() | featureSingleAssetVault}; Account issuer{"issuer"}; Account owner{"owner"}; Account depositor{"depositor"}; @@ -1092,8 +1092,7 @@ class Vault_test : public beast::unit_test::suite { { testcase("IOU fail create frozen"); - Env env{ - *this, supported_amendments() | featureSingleAssetVault}; + Env env{*this, testable_amendments() | featureSingleAssetVault}; Account issuer{"issuer"}; Account owner{"owner"}; env.fund(XRP(1000), issuer, owner); @@ -1112,8 +1111,7 @@ class Vault_test : public beast::unit_test::suite { testcase("IOU fail create no ripling"); - Env env{ - *this, supported_amendments() | featureSingleAssetVault}; + Env env{*this, testable_amendments() | featureSingleAssetVault}; Account issuer{"issuer"}; Account owner{"owner"}; env.fund(XRP(1000), issuer, owner); @@ -1131,8 +1129,7 @@ class Vault_test : public beast::unit_test::suite { testcase("IOU no issuer"); - Env env{ - *this, supported_amendments() | featureSingleAssetVault}; + Env env{*this, testable_amendments() | featureSingleAssetVault}; Account issuer{"issuer"}; Account owner{"owner"}; env.fund(XRP(1000), owner); @@ -1151,7 +1148,7 @@ class Vault_test : public beast::unit_test::suite { testcase("IOU fail create vault for AMM LPToken"); - Env env{*this, supported_amendments() | featureSingleAssetVault}; + Env env{*this, testable_amendments() | featureSingleAssetVault}; Account const gw("gateway"); Account const alice("alice"); Account const carol("carol"); @@ -1202,7 +1199,7 @@ class Vault_test : public beast::unit_test::suite { using namespace test::jtx; - Env env{*this, supported_amendments() | featureSingleAssetVault}; + Env env{*this, testable_amendments() | featureSingleAssetVault}; Account issuer{"issuer"}; Account owner{"owner"}; Account depositor{"depositor"}; @@ -1224,7 +1221,7 @@ class Vault_test : public beast::unit_test::suite { using namespace test::jtx; - Env env{*this, supported_amendments() | featureSingleAssetVault}; + Env env{*this, testable_amendments() | featureSingleAssetVault}; Account issuer{"issuer"}; Account owner{"owner"}; Account depositor{"depositor"}; @@ -1350,7 +1347,7 @@ class Vault_test : public beast::unit_test::suite Vault& vault, MPTTester& mptt)> test, CaseArgs args = {}) { - Env env{*this, supported_amendments() | featureSingleAssetVault}; + Env env{*this, testable_amendments() | featureSingleAssetVault}; Account issuer{"issuer"}; Account owner{"owner"}; Account depositor{"depositor"}; @@ -1746,7 +1743,7 @@ class Vault_test : public beast::unit_test::suite { testcase("MPT shares to a vault"); - Env env{*this, supported_amendments() | featureSingleAssetVault}; + Env env{*this, testable_amendments() | featureSingleAssetVault}; Account owner{"owner"}; Account issuer{"issuer"}; env.fund(XRP(1000000), owner, issuer); @@ -1795,8 +1792,7 @@ class Vault_test : public beast::unit_test::suite std::function issuanceId, std::function vaultBalance)> test) { - Env env{ - *this, supported_amendments() | featureSingleAssetVault}; + Env env{*this, testable_amendments() | featureSingleAssetVault}; Account const owner{"owner"}; Account const issuer{"issuer"}; Account const charlie{"charlie"}; @@ -2241,7 +2237,7 @@ class Vault_test : public beast::unit_test::suite testcase("private vault"); - Env env{*this, supported_amendments() | featureSingleAssetVault}; + Env env{*this, testable_amendments() | featureSingleAssetVault}; Account issuer{"issuer"}; Account owner{"owner"}; Account depositor{"depositor"}; @@ -2526,7 +2522,7 @@ class Vault_test : public beast::unit_test::suite testcase("private XRP vault"); - Env env{*this, supported_amendments() | featureSingleAssetVault}; + Env env{*this, testable_amendments() | featureSingleAssetVault}; Account owner{"owner"}; Account depositor{"depositor"}; Account alice{"charlie"}; @@ -2629,7 +2625,7 @@ class Vault_test : public beast::unit_test::suite using namespace test::jtx; testcase("failed pseudo-account allocation"); - Env env{*this, supported_amendments() | featureSingleAssetVault}; + Env env{*this, testable_amendments() | featureSingleAssetVault}; Account const owner{"owner"}; Vault vault{env}; env.fund(XRP(1000), owner); @@ -2658,7 +2654,7 @@ class Vault_test : public beast::unit_test::suite using namespace test::jtx; testcase("RPC"); - Env env{*this, supported_amendments() | featureSingleAssetVault}; + Env env{*this, testable_amendments() | featureSingleAssetVault}; Account const owner{"owner"}; Account const issuer{"issuer"}; Vault vault{env}; diff --git a/src/test/app/XChain_test.cpp b/src/test/app/XChain_test.cpp index 85cd636b3d..311ddda59b 100644 --- a/src/test/app/XChain_test.cpp +++ b/src/test/app/XChain_test.cpp @@ -192,7 +192,7 @@ struct SEnv }; // XEnv class used for XChain tests. The only difference with SEnv is that it -// funds some default accounts, and that it enables `supported_amendments() | +// funds some default accounts, and that it enables `testable_amendments() | // FeatureBitset{featureXChainBridge}` by default. // ----------------------------------------------------------------------------- template @@ -526,7 +526,7 @@ struct XChain_test : public beast::unit_test::suite, // coverage test: BridgeCreate::preflight() - create bridge when feature // disabled. { - Env env(*this, supported_amendments() - featureXChainBridge); + Env env(*this, testable_amendments() - featureXChainBridge); env(create_bridge(Account::master, jvb), ter(temDISABLED)); } diff --git a/src/test/app/tx/apply_test.cpp b/src/test/app/tx/apply_test.cpp index 44a2c10b4e..0f5ccf5a55 100644 --- a/src/test/app/tx/apply_test.cpp +++ b/src/test/app/tx/apply_test.cpp @@ -55,7 +55,7 @@ public: { test::jtx::Env no_fully_canonical( *this, - test::jtx::supported_amendments() - + test::jtx::testable_amendments() - featureRequireFullyCanonicalSig); Validity valid = checkValidity( @@ -71,7 +71,7 @@ public: { test::jtx::Env fully_canonical( - *this, test::jtx::supported_amendments()); + *this, test::jtx::testable_amendments()); Validity valid = checkValidity( fully_canonical.app().getHashRouter(), diff --git a/src/test/consensus/NegativeUNL_test.cpp b/src/test/consensus/NegativeUNL_test.cpp index 7eb05e68bb..56558f525f 100644 --- a/src/test/consensus/NegativeUNL_test.cpp +++ b/src/test/consensus/NegativeUNL_test.cpp @@ -227,7 +227,7 @@ class NegativeUNL_test : public beast::unit_test::suite testcase("Create UNLModify Tx and apply to ledgers"); - jtx::Env env(*this, jtx::supported_amendments() | featureNegativeUNL); + jtx::Env env(*this, jtx::testable_amendments() | featureNegativeUNL); std::vector publicKeys = createPublicKeys(3); // genesis ledger auto l = std::make_shared( @@ -526,7 +526,7 @@ class NegativeUNLNoAmendment_test : public beast::unit_test::suite { testcase("No negative UNL amendment"); - jtx::Env env(*this, jtx::supported_amendments() - featureNegativeUNL); + jtx::Env env(*this, jtx::testable_amendments() - featureNegativeUNL); std::vector publicKeys = createPublicKeys(1); // genesis ledger auto l = std::make_shared( @@ -582,7 +582,7 @@ struct NetworkHistory }; NetworkHistory(beast::unit_test::suite& suite, Parameter const& p) - : env(suite, jtx::supported_amendments() | featureNegativeUNL) + : env(suite, jtx::testable_amendments() | featureNegativeUNL) , param(p) , validations(env.app().getValidations()) { diff --git a/src/test/jtx/AMMTest.h b/src/test/jtx/AMMTest.h index 28b9affa8f..17011d7633 100644 --- a/src/test/jtx/AMMTest.h +++ b/src/test/jtx/AMMTest.h @@ -40,7 +40,7 @@ struct TestAMMArg std::optional> pool = std::nullopt; std::uint16_t tfee = 0; std::optional ter = std::nullopt; - std::vector features = {supported_amendments()}; + std::vector features = {testable_amendments()}; bool noLog = false; }; @@ -95,7 +95,7 @@ protected: std::optional> const& pool = std::nullopt, std::uint16_t tfee = 0, std::optional const& ter = std::nullopt, - std::vector const& features = {supported_amendments()}); + std::vector const& features = {testable_amendments()}); void testAMM( diff --git a/src/test/jtx/Env.h b/src/test/jtx/Env.h index 53417a6079..21a239e3d7 100644 --- a/src/test/jtx/Env.h +++ b/src/test/jtx/Env.h @@ -71,10 +71,10 @@ noripple(Account const& account, Args const&... args) } inline FeatureBitset -supported_amendments() +testable_amendments() { static FeatureBitset const ids = [] { - auto const& sa = ripple::detail::supportedAmendments(); + auto const& sa = allAmendments(); std::vector feats; feats.reserve(sa.size()); for (auto const& [s, vote] : sa) @@ -84,7 +84,7 @@ supported_amendments() feats.push_back(*f); else Throw( - "Unknown feature: " + s + " in supportedAmendments."); + "Unknown feature: " + s + " in allAmendments."); } return FeatureBitset(feats); }(); @@ -236,7 +236,7 @@ public: beast::severities::Severity thresh = beast::severities::kError) : Env(suite_, std::move(config), - supported_amendments(), + testable_amendments(), std::move(logs), thresh) { diff --git a/src/test/jtx/Env_test.cpp b/src/test/jtx/Env_test.cpp index f32343d6dd..2be20d6e33 100644 --- a/src/test/jtx/Env_test.cpp +++ b/src/test/jtx/Env_test.cpp @@ -265,7 +265,7 @@ public: { using namespace jtx; - Env env{*this, supported_amendments() | fixMasterKeyAsRegularKey}; + Env env{*this, testable_amendments() | fixMasterKeyAsRegularKey}; Account const alice("alice", KeyType::ed25519); Account const bob("bob", KeyType::secp256k1); Account const carol("carol"); @@ -776,7 +776,7 @@ public: { testcase("Env features"); using namespace jtx; - auto const supported = supported_amendments(); + auto const supported = testable_amendments(); // this finds a feature that is not in // the supported amendments list and tests that it can be @@ -827,7 +827,7 @@ public: } auto const missingSomeFeatures = - supported_amendments() - featureMultiSignReserve - featureFlow; + testable_amendments() - featureMultiSignReserve - featureFlow; BEAST_EXPECT(missingSomeFeatures.count() == (supported.count() - 2)); { // a Env supported_features_except is missing *only* those features @@ -887,7 +887,7 @@ public: // add a feature that is NOT in the supported amendments list // along with all supported amendments // the unsupported features should be enabled - Env env{*this, supported_amendments().set(*neverSupportedFeat)}; + Env env{*this, testable_amendments().set(*neverSupportedFeat)}; // this app will have all supported amendments and then the // one additional never supported feature flag diff --git a/src/test/jtx/impl/xchain_bridge.cpp b/src/test/jtx/impl/xchain_bridge.cpp index 86e9deda7c..6f167d7508 100644 --- a/src/test/jtx/impl/xchain_bridge.cpp +++ b/src/test/jtx/impl/xchain_bridge.cpp @@ -389,7 +389,7 @@ XChainBridgeObjects::XChainBridgeObjects() bridge_rpc(mcDoor, xrpIssue(), Account::master, xrpIssue())) , jvb(bridge(mcDoor, xrpIssue(), Account::master, xrpIssue())) , jvub(bridge(mcuDoor, xrpIssue(), Account::master, xrpIssue())) - , features(supported_amendments() | FeatureBitset{featureXChainBridge}) + , features(testable_amendments() | FeatureBitset{featureXChainBridge}) , signers([] { constexpr int numSigners = UT_XCHAIN_DEFAULT_NUM_SIGNERS; std::vector result; diff --git a/src/test/ledger/BookDirs_test.cpp b/src/test/ledger/BookDirs_test.cpp index 45cd7e332d..52b618e9a0 100644 --- a/src/test/ledger/BookDirs_test.cpp +++ b/src/test/ledger/BookDirs_test.cpp @@ -103,7 +103,7 @@ struct BookDirs_test : public beast::unit_test::suite run() override { using namespace jtx; - auto const sa = supported_amendments(); + auto const sa = testable_amendments(); test_bookdir(sa - featurePermissionedDEX); test_bookdir(sa); } diff --git a/src/test/ledger/Directory_test.cpp b/src/test/ledger/Directory_test.cpp index 7aa6f149b8..9e8d40e0cc 100644 --- a/src/test/ledger/Directory_test.cpp +++ b/src/test/ledger/Directory_test.cpp @@ -421,7 +421,7 @@ struct Directory_test : public beast::unit_test::suite }; // fixPreviousTxnID is disabled. - Env env(*this, supported_amendments() - fixPreviousTxnID); + Env env(*this, testable_amendments() - fixPreviousTxnID); env.fund(XRP(10000), alice, gw); env.close(); env.trust(USD(1000), alice); diff --git a/src/test/ledger/Invariants_test.cpp b/src/test/ledger/Invariants_test.cpp index 6178b413d5..fadd9c0eae 100644 --- a/src/test/ledger/Invariants_test.cpp +++ b/src/test/ledger/Invariants_test.cpp @@ -78,7 +78,7 @@ class Invariants_test : public beast::unit_test::suite Preclose const& preclose = {}) { using namespace test::jtx; - FeatureBitset amendments = supported_amendments() | + FeatureBitset amendments = testable_amendments() | featureInvariantsV1_1 | featureSingleAssetVault; Env env{*this, amendments}; diff --git a/src/test/ledger/PaymentSandbox_test.cpp b/src/test/ledger/PaymentSandbox_test.cpp index 7901cec364..26b06a0034 100644 --- a/src/test/ledger/PaymentSandbox_test.cpp +++ b/src/test/ledger/PaymentSandbox_test.cpp @@ -420,7 +420,7 @@ public: testBalanceHook(features); }; using namespace jtx; - auto const sa = supported_amendments(); + auto const sa = testable_amendments(); testAll(sa - featurePermissionedDEX); testAll(sa); } diff --git a/src/test/rpc/AMMInfo_test.cpp b/src/test/rpc/AMMInfo_test.cpp index 1c54580aa7..a6e866b1c8 100644 --- a/src/test/rpc/AMMInfo_test.cpp +++ b/src/test/rpc/AMMInfo_test.cpp @@ -359,7 +359,7 @@ public: run() override { using namespace jtx; - auto const all = supported_amendments(); + auto const all = testable_amendments(); testErrors(); testSimpleRpc(); testVoteAndBid(all); diff --git a/src/test/rpc/AccountInfo_test.cpp b/src/test/rpc/AccountInfo_test.cpp index 0b41da2ded..18c8bf5a1c 100644 --- a/src/test/rpc/AccountInfo_test.cpp +++ b/src/test/rpc/AccountInfo_test.cpp @@ -710,7 +710,7 @@ public: testSignerListsV2(); FeatureBitset const allFeatures{ - ripple::test::jtx::supported_amendments()}; + ripple::test::jtx::testable_amendments()}; testAccountFlags(allFeatures); testAccountFlags(allFeatures - featureDisallowIncoming); testAccountFlags( diff --git a/src/test/rpc/AccountObjects_test.cpp b/src/test/rpc/AccountObjects_test.cpp index 7a48db73bd..546bbe8715 100644 --- a/src/test/rpc/AccountObjects_test.cpp +++ b/src/test/rpc/AccountObjects_test.cpp @@ -577,7 +577,7 @@ public: Account const gw{"gateway"}; auto const USD = gw["USD"]; - auto const features = supported_amendments() | featureXChainBridge | + auto const features = testable_amendments() | featureXChainBridge | featurePermissionedDomains; Env env(*this, features); diff --git a/src/test/rpc/AccountSet_test.cpp b/src/test/rpc/AccountSet_test.cpp index c056279bf1..5c0ca89305 100644 --- a/src/test/rpc/AccountSet_test.cpp +++ b/src/test/rpc/AccountSet_test.cpp @@ -53,7 +53,7 @@ public: Account const alice("alice"); // Test without DepositAuth enabled initially. - Env env(*this, supported_amendments() - featureDepositAuth); + Env env(*this, testable_amendments() - featureDepositAuth); env.fund(XRP(10000), noripple(alice)); // Give alice a regular key so she can legally set and clear @@ -357,7 +357,7 @@ public: }; doTests( - supported_amendments(), + testable_amendments(), {{1.0, tesSUCCESS, 1.0}, {1.1, tesSUCCESS, 1.1}, {2.0, tesSUCCESS, 2.0}, diff --git a/src/test/rpc/BookChanges_test.cpp b/src/test/rpc/BookChanges_test.cpp index 1f059c2bf7..1f7b6775f2 100644 --- a/src/test/rpc/BookChanges_test.cpp +++ b/src/test/rpc/BookChanges_test.cpp @@ -94,7 +94,7 @@ public: using namespace jtx; FeatureBitset const all{ - jtx::supported_amendments() | featurePermissionedDomains | + jtx::testable_amendments() | featurePermissionedDomains | featureCredentials | featurePermissionedDEX}; Env env(*this, all); diff --git a/src/test/rpc/Book_test.cpp b/src/test/rpc/Book_test.cpp index 0ec36eca53..e885762644 100644 --- a/src/test/rpc/Book_test.cpp +++ b/src/test/rpc/Book_test.cpp @@ -1737,7 +1737,7 @@ public: using namespace jtx; FeatureBitset const all{ - jtx::supported_amendments() | featurePermissionedDomains | + jtx::testable_amendments() | featurePermissionedDomains | featureCredentials | featurePermissionedDEX}; Env env(*this, all); @@ -1868,7 +1868,7 @@ public: using namespace jtx; FeatureBitset const all{ - jtx::supported_amendments() | featurePermissionedDomains | + jtx::testable_amendments() | featurePermissionedDomains | featureCredentials | featurePermissionedDEX}; Env env(*this, all); diff --git a/src/test/rpc/GatewayBalances_test.cpp b/src/test/rpc/GatewayBalances_test.cpp index 93e88b78ac..691f32317e 100644 --- a/src/test/rpc/GatewayBalances_test.cpp +++ b/src/test/rpc/GatewayBalances_test.cpp @@ -251,7 +251,7 @@ public: run() override { using namespace jtx; - auto const sa = supported_amendments(); + auto const sa = testable_amendments(); for (auto feature : {sa - featurePermissionedDEX, sa}) { testGWB(feature); diff --git a/src/test/rpc/LedgerData_test.cpp b/src/test/rpc/LedgerData_test.cpp index c2b22efc00..54f51255d1 100644 --- a/src/test/rpc/LedgerData_test.cpp +++ b/src/test/rpc/LedgerData_test.cpp @@ -304,8 +304,8 @@ public: // Make sure fixInnerObjTemplate2 doesn't break amendments. for (FeatureBitset const& features : - {supported_amendments() - fixInnerObjTemplate2, - supported_amendments() | fixInnerObjTemplate2}) + {testable_amendments() - fixInnerObjTemplate2, + testable_amendments() | fixInnerObjTemplate2}) { using namespace std::chrono; Env env{*this, envconfig(validator, ""), features}; diff --git a/src/test/rpc/LedgerEntry_test.cpp b/src/test/rpc/LedgerEntry_test.cpp index 83232f79c8..b5cab9d13c 100644 --- a/src/test/rpc/LedgerEntry_test.cpp +++ b/src/test/rpc/LedgerEntry_test.cpp @@ -2221,7 +2221,7 @@ class LedgerEntry_test : public beast::unit_test::suite using namespace test::jtx; - Env env(*this, supported_amendments() | featurePermissionedDomains); + Env env(*this, testable_amendments() | featurePermissionedDomains); Account const issuer{"issuer"}; Account const alice{"alice"}; Account const bob{"bob"}; diff --git a/src/test/rpc/NoRipple_test.cpp b/src/test/rpc/NoRipple_test.cpp index 1450abef69..926de31e83 100644 --- a/src/test/rpc/NoRipple_test.cpp +++ b/src/test/rpc/NoRipple_test.cpp @@ -293,7 +293,7 @@ public: testPairwise(features); }; using namespace jtx; - auto const sa = supported_amendments(); + auto const sa = testable_amendments(); withFeatsTests(sa - featurePermissionedDEX); withFeatsTests(sa); } diff --git a/src/test/rpc/Subscribe_test.cpp b/src/test/rpc/Subscribe_test.cpp index e414b60f93..989afc0acc 100644 --- a/src/test/rpc/Subscribe_test.cpp +++ b/src/test/rpc/Subscribe_test.cpp @@ -1307,7 +1307,7 @@ public: using namespace jtx; using namespace std::chrono_literals; FeatureBitset const all{ - jtx::supported_amendments() | featurePermissionedDomains | + jtx::testable_amendments() | featurePermissionedDomains | featureCredentials | featurePermissionedDEX}; Env env(*this, all); @@ -1577,7 +1577,7 @@ public: run() override { using namespace test::jtx; - FeatureBitset const all{supported_amendments()}; + FeatureBitset const all{testable_amendments()}; FeatureBitset const xrpFees{featureXRPFees}; testServer(); diff --git a/src/test/rpc/Transaction_test.cpp b/src/test/rpc/Transaction_test.cpp index 724a3a0517..e1db485572 100644 --- a/src/test/rpc/Transaction_test.cpp +++ b/src/test/rpc/Transaction_test.cpp @@ -941,7 +941,7 @@ public: forAllApiVersions( std::bind_front(&Transaction_test::testBinaryRequest, this)); - FeatureBitset const all{supported_amendments()}; + FeatureBitset const all{testable_amendments()}; testWithFeats(all); } From 90e63803838e15f659d35590062baf59e7234b40 Mon Sep 17 00:00:00 2001 From: Bart Date: Fri, 18 Jul 2025 12:55:15 -0400 Subject: [PATCH 080/244] refactor: Update date, libarchive, nudb, openssl, sqlite3, xxhash packages (#5567) This PR updates several dependencies to their latest versions. Not all dependencies have been updated, as some need to be patched and some require additional code changes due to backward incompatibilities introduced by the version bump. --- .github/actions/dependencies/action.yml | 1 - .github/workflows/macos.yml | 1 - .github/workflows/nix.yml | 13 +++- .github/workflows/windows.yml | 1 - BUILD.md | 17 ----- conanfile.py | 16 ++--- external/nudb/conandata.yml | 10 --- external/nudb/conanfile.py | 72 ------------------- ...-0001-add-include-stdexcept-for-msvc.patch | 24 ------- 9 files changed, 19 insertions(+), 136 deletions(-) delete mode 100644 external/nudb/conandata.yml delete mode 100644 external/nudb/conanfile.py delete mode 100644 external/nudb/patches/2.0.8-0001-add-include-stdexcept-for-msvc.patch diff --git a/.github/actions/dependencies/action.yml b/.github/actions/dependencies/action.yml index 731e3e862f..eeb8df105c 100644 --- a/.github/actions/dependencies/action.yml +++ b/.github/actions/dependencies/action.yml @@ -12,7 +12,6 @@ runs: conan export --version 1.1.10 external/snappy conan export --version 9.7.3 external/rocksdb conan export --version 4.0.3 external/soci - conan export --version 2.0.8 external/nudb - name: add Ripple Conan remote if: env.CONAN_URL != '' shell: bash diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 4c386ead62..13c817087a 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -96,7 +96,6 @@ jobs: conan export --version 1.1.10 external/snappy conan export --version 9.7.3 external/rocksdb conan export --version 4.0.3 external/soci - conan export --version 2.0.8 external/nudb - name: add Ripple Conan remote if: env.CONAN_URL != '' shell: bash diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index 409a1defc0..7543ade692 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -164,6 +164,16 @@ jobs: generator: Ninja configuration: ${{ matrix.configuration }} cmake-args: "-Dassert=TRUE -Dwerr=TRUE ${{ matrix.cmake-args }}" + - name: check linking + run: | + cd ${build_dir} + ldd ./rippled + if [ "$(ldd ./rippled | grep -E '(libstdc\+\+|libgcc)' | wc -l)" -eq 0 ]; then + echo 'The binary is statically linked.' + else + echo 'The binary is dynamically linked.' + exit 1 + fi - name: test run: | cd ${build_dir} @@ -220,6 +230,7 @@ jobs: cd ${build_dir} ./rippled --unittest --unittest-jobs $(nproc) ctest -j $(nproc) --output-on-failure + coverage: strategy: fail-fast: false @@ -296,7 +307,6 @@ jobs: attempt_limit: 5 attempt_delay: 210000 # in milliseconds - conan: needs: dependencies runs-on: [self-hosted, heavy] @@ -313,7 +323,6 @@ jobs: uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 with: name: ${{ env.platform }}-${{ env.compiler }}-${{ env.configuration }} - - name: extract cache run: | mkdir -p ${CONAN_HOME} diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 30ad32a89c..ae0302f2a7 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -92,7 +92,6 @@ jobs: conan export --version 1.1.10 external/snappy conan export --version 9.7.3 external/rocksdb conan export --version 4.0.3 external/soci - conan export --version 2.0.8 external/nudb - name: add Ripple Conan remote if: env.CONAN_URL != '' shell: bash diff --git a/BUILD.md b/BUILD.md index 1ba767cd88..fba238d2bc 100644 --- a/BUILD.md +++ b/BUILD.md @@ -167,8 +167,6 @@ It does not explicitly link the C++ standard library, which allows you to statically link it with GCC, if you want. ``` - # Conan 1.x - conan export external/snappy snappy/1.1.10@ # Conan 2.x conan export --version 1.1.10 external/snappy ``` @@ -177,8 +175,6 @@ Export our [Conan recipe for RocksDB](./external/rocksdb). It does not override paths to dependencies when building with Visual Studio. ``` - # Conan 1.x - conan export external/rocksdb rocksdb/9.7.3@ # Conan 2.x conan export --version 9.7.3 external/rocksdb ``` @@ -187,23 +183,10 @@ Export our [Conan recipe for SOCI](./external/soci). It patches their CMake to correctly import its dependencies. ``` - # Conan 1.x - conan export external/soci soci/4.0.3@ # Conan 2.x conan export --version 4.0.3 external/soci ``` -Export our [Conan recipe for NuDB](./external/nudb). -It fixes some source files to add missing `#include`s. - - - ``` - # Conan 1.x - conan export external/nudb nudb/2.0.8@ - # Conan 2.x - conan export --version 2.0.8 external/nudb - ``` - ### Build and Test 1. Create a build directory and move into it. diff --git a/conanfile.py b/conanfile.py index 8e964784f8..d79b47bc6f 100644 --- a/conanfile.py +++ b/conanfile.py @@ -25,9 +25,9 @@ class Xrpl(ConanFile): requires = [ 'grpc/1.50.1', - 'libarchive/3.7.6', - 'nudb/2.0.8', - 'openssl/1.1.1v', + 'libarchive/3.8.1', + 'nudb/2.0.9', + 'openssl/1.1.1w', 'soci/4.0.3', 'zlib/1.3.1', ] @@ -37,7 +37,7 @@ class Xrpl(ConanFile): ] tool_requires = [ - 'protobuf/3.21.9', + 'protobuf/3.21.12', ] default_options = { @@ -105,15 +105,15 @@ class Xrpl(ConanFile): # Conan 2 requires transitive headers to be specified transitive_headers_opt = {'transitive_headers': True} if conan_version.split('.')[0] == '2' else {} self.requires('boost/1.83.0', force=True, **transitive_headers_opt) - self.requires('date/3.0.3', **transitive_headers_opt) + self.requires('date/3.0.4', **transitive_headers_opt) self.requires('lz4/1.10.0', force=True) - self.requires('protobuf/3.21.9', force=True) - self.requires('sqlite3/3.47.0', force=True) + self.requires('protobuf/3.21.12', force=True) + self.requires('sqlite3/3.49.1', force=True) if self.options.jemalloc: self.requires('jemalloc/5.3.0') if self.options.rocksdb: self.requires('rocksdb/9.7.3') - self.requires('xxhash/0.8.2', **transitive_headers_opt) + self.requires('xxhash/0.8.3', **transitive_headers_opt) exports_sources = ( 'CMakeLists.txt', diff --git a/external/nudb/conandata.yml b/external/nudb/conandata.yml deleted file mode 100644 index 721129f88e..0000000000 --- a/external/nudb/conandata.yml +++ /dev/null @@ -1,10 +0,0 @@ -sources: - "2.0.8": - url: "https://github.com/CPPAlliance/NuDB/archive/2.0.8.tar.gz" - sha256: "9b71903d8ba111cd893ab064b9a8b6ac4124ed8bd6b4f67250205bc43c7f13a8" -patches: - "2.0.8": - - patch_file: "patches/2.0.8-0001-add-include-stdexcept-for-msvc.patch" - patch_description: "Fix build for MSVC by including stdexcept" - patch_type: "portability" - patch_source: "https://github.com/cppalliance/NuDB/pull/100/files" diff --git a/external/nudb/conanfile.py b/external/nudb/conanfile.py deleted file mode 100644 index a046e2ba89..0000000000 --- a/external/nudb/conanfile.py +++ /dev/null @@ -1,72 +0,0 @@ -import os - -from conan import ConanFile -from conan.tools.build import check_min_cppstd -from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get -from conan.tools.layout import basic_layout - -required_conan_version = ">=1.52.0" - - -class NudbConan(ConanFile): - name = "nudb" - description = "A fast key/value insert-only database for SSD drives in C++11" - license = "BSL-1.0" - url = "https://github.com/conan-io/conan-center-index" - homepage = "https://github.com/CPPAlliance/NuDB" - topics = ("header-only", "KVS", "insert-only") - - package_type = "header-library" - settings = "os", "arch", "compiler", "build_type" - no_copy_source = True - - @property - def _min_cppstd(self): - return 11 - - def export_sources(self): - export_conandata_patches(self) - - def layout(self): - basic_layout(self, src_folder="src") - - def requirements(self): - self.requires("boost/1.83.0") - - def package_id(self): - self.info.clear() - - def validate(self): - if self.settings.compiler.cppstd: - check_min_cppstd(self, self._min_cppstd) - - def source(self): - get(self, **self.conan_data["sources"][self.version], strip_root=True) - - def build(self): - apply_conandata_patches(self) - - def package(self): - copy(self, "LICENSE*", - dst=os.path.join(self.package_folder, "licenses"), - src=self.source_folder) - copy(self, "*", - dst=os.path.join(self.package_folder, "include"), - src=os.path.join(self.source_folder, "include")) - - def package_info(self): - self.cpp_info.bindirs = [] - self.cpp_info.libdirs = [] - - self.cpp_info.set_property("cmake_target_name", "NuDB") - self.cpp_info.set_property("cmake_target_aliases", ["NuDB::nudb"]) - self.cpp_info.set_property("cmake_find_mode", "both") - - self.cpp_info.components["core"].set_property("cmake_target_name", "nudb") - self.cpp_info.components["core"].names["cmake_find_package"] = "nudb" - self.cpp_info.components["core"].names["cmake_find_package_multi"] = "nudb" - self.cpp_info.components["core"].requires = ["boost::thread", "boost::system"] - - # TODO: to remove in conan v2 once cmake_find_package_* generators removed - self.cpp_info.names["cmake_find_package"] = "NuDB" - self.cpp_info.names["cmake_find_package_multi"] = "NuDB" diff --git a/external/nudb/patches/2.0.8-0001-add-include-stdexcept-for-msvc.patch b/external/nudb/patches/2.0.8-0001-add-include-stdexcept-for-msvc.patch deleted file mode 100644 index 2d5264f3ce..0000000000 --- a/external/nudb/patches/2.0.8-0001-add-include-stdexcept-for-msvc.patch +++ /dev/null @@ -1,24 +0,0 @@ -diff --git a/include/nudb/detail/stream.hpp b/include/nudb/detail/stream.hpp -index 6c07bf1..e0ce8ed 100644 ---- a/include/nudb/detail/stream.hpp -+++ b/include/nudb/detail/stream.hpp -@@ -14,6 +14,7 @@ - #include - #include - #include -+#include - - namespace nudb { - namespace detail { -diff --git a/include/nudb/impl/context.ipp b/include/nudb/impl/context.ipp -index beb7058..ffde0b3 100644 ---- a/include/nudb/impl/context.ipp -+++ b/include/nudb/impl/context.ipp -@@ -9,6 +9,7 @@ - #define NUDB_IMPL_CONTEXT_IPP - - #include -+#include - - namespace nudb { - From 1a40f18bddd4c1553f2655b4c816988c48d4f563 Mon Sep 17 00:00:00 2001 From: Chenna Keshava B S <21219765+ckeshava@users.noreply.github.com> Date: Fri, 18 Jul 2025 10:58:46 -0700 Subject: [PATCH 081/244] Remove the type filter from "ledger" RPC command (#4934) This issue was reported on the Javascript client library: XRPLF/xrpl.js#2611 The type filter (Note: as of the latest version of rippled, type parameter is deprecated) does not work as expected. This PR removes the type filter from the ledger command. --- include/xrpl/protocol/ErrorCodes.h | 2 ++ src/test/rpc/LedgerRPC_test.cpp | 38 ++++++++++++++++---- src/xrpld/app/ledger/LedgerToJson.h | 6 ++-- src/xrpld/app/ledger/detail/LedgerToJson.cpp | 19 +++++----- src/xrpld/rpc/handlers/LedgerHandler.cpp | 4 --- src/xrpld/rpc/handlers/LedgerHandler.h | 18 ++++++++-- 6 files changed, 60 insertions(+), 27 deletions(-) diff --git a/include/xrpl/protocol/ErrorCodes.h b/include/xrpl/protocol/ErrorCodes.h index 9c9319ba42..f06b927566 100644 --- a/include/xrpl/protocol/ErrorCodes.h +++ b/include/xrpl/protocol/ErrorCodes.h @@ -169,6 +169,8 @@ enum warning_code_i { warnRPC_AMENDMENT_BLOCKED = 1002, warnRPC_EXPIRED_VALIDATOR_LIST = 1003, // unused = 1004 + warnRPC_FIELDS_DEPRECATED = 2004, // rippled needs to maintain + // compatibility with Clio on this code. }; //------------------------------------------------------------------------------ diff --git a/src/test/rpc/LedgerRPC_test.cpp b/src/test/rpc/LedgerRPC_test.cpp index 5b26f43161..9ba9c9a655 100644 --- a/src/test/rpc/LedgerRPC_test.cpp +++ b/src/test/rpc/LedgerRPC_test.cpp @@ -711,6 +711,7 @@ class LedgerRPC_test : public beast::unit_test::suite env.close(); std::string index; + int hashesLedgerEntryIndex = -1; { Json::Value jvParams; jvParams[jss::ledger_index] = 3u; @@ -721,11 +722,27 @@ class LedgerRPC_test : public beast::unit_test::suite env.rpc("json", "ledger", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr[jss::ledger].isMember(jss::accountState)); BEAST_EXPECT(jrr[jss::ledger][jss::accountState].isArray()); - BEAST_EXPECT(jrr[jss::ledger][jss::accountState].size() == 1u); + + for (auto i = 0; i < jrr[jss::ledger][jss::accountState].size(); + i++) + if (jrr[jss::ledger][jss::accountState][i]["LedgerEntryType"] == + jss::LedgerHashes) + { + index = jrr[jss::ledger][jss::accountState][i]["index"] + .asString(); + hashesLedgerEntryIndex = i; + } + + for (auto const& object : jrr[jss::ledger][jss::accountState]) + if (object["LedgerEntryType"] == jss::LedgerHashes) + index = object["index"].asString(); + + // jss::type is a deprecated field BEAST_EXPECT( - jrr[jss::ledger][jss::accountState][0u]["LedgerEntryType"] == - jss::LedgerHashes); - index = jrr[jss::ledger][jss::accountState][0u]["index"].asString(); + jrr.isMember(jss::warnings) && jrr[jss::warnings].isArray() && + jrr[jss::warnings].size() == 1 && + jrr[jss::warnings][0u][jss::id].asInt() == + warnRPC_FIELDS_DEPRECATED); } { Json::Value jvParams; @@ -737,8 +754,17 @@ class LedgerRPC_test : public beast::unit_test::suite env.rpc("json", "ledger", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr[jss::ledger].isMember(jss::accountState)); BEAST_EXPECT(jrr[jss::ledger][jss::accountState].isArray()); - BEAST_EXPECT(jrr[jss::ledger][jss::accountState].size() == 1u); - BEAST_EXPECT(jrr[jss::ledger][jss::accountState][0u] == index); + BEAST_EXPECT( + hashesLedgerEntryIndex > 0 && + jrr[jss::ledger][jss::accountState][hashesLedgerEntryIndex] == + index); + + // jss::type is a deprecated field + BEAST_EXPECT( + jrr.isMember(jss::warnings) && jrr[jss::warnings].isArray() && + jrr[jss::warnings].size() == 1 && + jrr[jss::warnings][0u][jss::id].asInt() == + warnRPC_FIELDS_DEPRECATED); } } diff --git a/src/xrpld/app/ledger/LedgerToJson.h b/src/xrpld/app/ledger/LedgerToJson.h index 40be57fc9c..be017bca86 100644 --- a/src/xrpld/app/ledger/LedgerToJson.h +++ b/src/xrpld/app/ledger/LedgerToJson.h @@ -37,9 +37,8 @@ struct LedgerFill ReadView const& l, RPC::Context* ctx, int o = 0, - std::vector q = {}, - LedgerEntryType t = ltANY) - : ledger(l), options(o), txQueue(std::move(q)), type(t), context(ctx) + std::vector q = {}) + : ledger(l), options(o), txQueue(std::move(q)), context(ctx) { if (context) closeTime = context->ledgerMaster.getCloseTimeBySeq(ledger.seq()); @@ -58,7 +57,6 @@ struct LedgerFill ReadView const& ledger; int options; std::vector txQueue; - LedgerEntryType type; RPC::Context* context; std::optional closeTime; }; diff --git a/src/xrpld/app/ledger/detail/LedgerToJson.cpp b/src/xrpld/app/ledger/detail/LedgerToJson.cpp index 3e4f4b8f0a..0e6f81dfbc 100644 --- a/src/xrpld/app/ledger/detail/LedgerToJson.cpp +++ b/src/xrpld/app/ledger/detail/LedgerToJson.cpp @@ -268,19 +268,16 @@ fillJsonState(Object& json, LedgerFill const& fill) for (auto const& sle : ledger.sles) { - if (fill.type == ltANY || sle->getType() == fill.type) + if (binary) { - if (binary) - { - auto&& obj = appendObject(array); - obj[jss::hash] = to_string(sle->key()); - obj[jss::tx_blob] = serializeHex(*sle); - } - else if (expanded) - array.append(sle->getJson(JsonOptions::none)); - else - array.append(to_string(sle->key())); + auto&& obj = appendObject(array); + obj[jss::hash] = to_string(sle->key()); + obj[jss::tx_blob] = serializeHex(*sle); } + else if (expanded) + array.append(sle->getJson(JsonOptions::none)); + else + array.append(to_string(sle->key())); } } diff --git a/src/xrpld/rpc/handlers/LedgerHandler.cpp b/src/xrpld/rpc/handlers/LedgerHandler.cpp index 4015bb9fcc..8987f2d07e 100644 --- a/src/xrpld/rpc/handlers/LedgerHandler.cpp +++ b/src/xrpld/rpc/handlers/LedgerHandler.cpp @@ -54,10 +54,6 @@ LedgerHandler::check() bool const binary = params[jss::binary].asBool(); bool const owner_funds = params[jss::owner_funds].asBool(); bool const queue = params[jss::queue].asBool(); - auto type = chooseLedgerEntryType(params); - if (type.first) - return type.first; - type_ = type.second; options_ = (full ? LedgerFill::full : 0) | (expand ? LedgerFill::expand : 0) | diff --git a/src/xrpld/rpc/handlers/LedgerHandler.h b/src/xrpld/rpc/handlers/LedgerHandler.h index 0e47164ad3..a573589cbc 100644 --- a/src/xrpld/rpc/handlers/LedgerHandler.h +++ b/src/xrpld/rpc/handlers/LedgerHandler.h @@ -76,7 +76,6 @@ private: std::vector queueTxs_; Json::Value result_; int options_ = 0; - LedgerEntryType type_; }; //////////////////////////////////////////////////////////////////////////////// @@ -91,7 +90,7 @@ LedgerHandler::writeResult(Object& value) if (ledger_) { Json::copyFrom(value, result_); - addJson(value, {*ledger_, &context_, options_, queueTxs_, type_}); + addJson(value, {*ledger_, &context_, options_, queueTxs_}); } else { @@ -105,6 +104,21 @@ LedgerHandler::writeResult(Object& value) addJson(open, {*master.getCurrentLedger(), &context_, 0}); } } + + Json::Value warnings{Json::arrayValue}; + if (context_.params.isMember(jss::type)) + { + Json::Value& w = warnings.append(Json::objectValue); + w[jss::id] = warnRPC_FIELDS_DEPRECATED; + w[jss::message] = + "Some fields from your request are deprecated. Please check the " + "documentation at " + "https://xrpl.org/docs/references/http-websocket-apis/ " + "and update your request. Field `type` is deprecated."; + } + + if (warnings.size()) + value[jss::warnings] = std::move(warnings); } } // namespace RPC From 13353ae36de247396c162e50ce41b70748a43a62 Mon Sep 17 00:00:00 2001 From: Jingchen Date: Mon, 21 Jul 2025 13:22:32 +0100 Subject: [PATCH 082/244] Fix macos runner (#5585) This change fixes the MacOS pipeline issue by limiting GitHub to choose the existing runners, ensuring the new experimental runners are excluded until they are ready. --- .github/workflows/macos.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 13c817087a..e9a4db3bed 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -40,7 +40,7 @@ jobs: - Ninja configuration: - Release - runs-on: [self-hosted, macOS] + runs-on: [self-hosted, macOS, mac-runner-m1] env: # The `build` action requires these variables. build_dir: .build From e95683a0fbdf102a92c104c4e4f407f599a96f3a Mon Sep 17 00:00:00 2001 From: Vito Tumas <5780819+Tapanito@users.noreply.github.com> Date: Mon, 21 Jul 2025 15:14:22 +0200 Subject: [PATCH 083/244] refactor: Change boost::shared_mutex to std::shared_mutex (#5576) This change reverts the usage of boost::shared_mutex back to std::shared_mutex. The change was originally introduced as a workaround for a bug in glibc 2.28 and older versions, which could cause threads using std::shared_mutex to stall. This issue primarily affected Ubuntu 18.04 and earlier distributions, which we no longer support. --- src/xrpld/app/misc/ValidatorList.h | 2 +- src/xrpld/overlay/detail/PeerImp.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/xrpld/app/misc/ValidatorList.h b/src/xrpld/app/misc/ValidatorList.h index 4cb32282db..1f5d728824 100644 --- a/src/xrpld/app/misc/ValidatorList.h +++ b/src/xrpld/app/misc/ValidatorList.h @@ -226,7 +226,7 @@ class ValidatorList TimeKeeper& timeKeeper_; boost::filesystem::path const dataPath_; beast::Journal const j_; - boost::shared_mutex mutable mutex_; + std::shared_mutex mutable mutex_; using lock_guard = std::lock_guard; using shared_lock = std::shared_lock; diff --git a/src/xrpld/overlay/detail/PeerImp.h b/src/xrpld/overlay/detail/PeerImp.h index ecd3fc7f63..d5f8e4d179 100644 --- a/src/xrpld/overlay/detail/PeerImp.h +++ b/src/xrpld/overlay/detail/PeerImp.h @@ -98,7 +98,7 @@ private: // Node public key of peer. PublicKey const publicKey_; std::string name_; - boost::shared_mutex mutable nameMutex_; + std::shared_mutex mutable nameMutex_; // The indices of the smallest and largest ledgers this peer has available // @@ -214,7 +214,7 @@ private: total_bytes() const; private: - boost::shared_mutex mutable mutex_; + std::shared_mutex mutable mutex_; boost::circular_buffer rollingAvg_{30, 0ull}; clock_type::time_point intervalStart_{clock_type::now()}; std::uint64_t totalBytes_{0}; From 03e46cd02617b3d7ce32c1a6820f91e2a04b961c Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Mon, 21 Jul 2025 15:03:53 +0100 Subject: [PATCH 084/244] Remove `include(default)` from libxrpl profile (#5587) Remove `include(default)` from `conan/profiles/libxrpl`. This means that we will now rely on compiler workarounds stored elsewhere e.g. in global.conf. --- .github/workflows/macos.yml | 4 +++- .github/workflows/nix.yml | 8 ++++++-- .github/workflows/windows.yml | 5 +++-- conan/profiles/libxrpl | 4 ---- 4 files changed, 12 insertions(+), 9 deletions(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index e9a4db3bed..8acd90eeff 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -24,6 +24,8 @@ env: CONAN_GLOBAL_CONF: | core.download:parallel={{os.cpu_count()}} core.upload:parallel={{os.cpu_count()}} + core:default_build_profile=libxrpl + core:default_profile=libxrpl tools.build:jobs={{ (os.cpu_count() * 4/5) | int }} tools.build:verbosity=verbose tools.compilation:verbosity=verbose @@ -87,7 +89,7 @@ jobs: clang --version - name: configure Conan run : | - echo "${CONAN_GLOBAL_CONF}" > global.conf + echo "${CONAN_GLOBAL_CONF}" >> $(conan config home)/global.conf conan config install conan/profiles/ -tf $(conan config home)/profiles/ conan profile show - name: export custom recipes diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index 7543ade692..8218dcc276 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -25,6 +25,8 @@ env: CONAN_GLOBAL_CONF: | core.download:parallel={{ os.cpu_count() }} core.upload:parallel={{ os.cpu_count() }} + core:default_build_profile=libxrpl + core:default_profile=libxrpl tools.build:jobs={{ (os.cpu_count() * 4/5) | int }} tools.build:verbosity=verbose tools.compilation:verbosity=verbose @@ -91,7 +93,8 @@ jobs: env | sort - name: configure Conan run: | - echo "${CONAN_GLOBAL_CONF}" >> ${CONAN_HOME}/global.conf + echo "${CONAN_GLOBAL_CONF}" >> $(conan config home)/global.conf + conan config install conan/profiles/ -tf $(conan config home)/profiles/ conan profile show - name: archive profile # Create this archive before dependencies are added to the local cache. @@ -379,7 +382,8 @@ jobs: - name: configure Conan run: | - echo "${CONAN_GLOBAL_CONF}" >> ${CONAN_HOME}/global.conf + echo "${CONAN_GLOBAL_CONF}" >> $(conan config home)/global.conf + conan config install conan/profiles/ -tf $(conan config home)/profiles/ conan profile show - name: build dependencies run: | diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index ae0302f2a7..254850f26a 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -27,6 +27,8 @@ env: CONAN_GLOBAL_CONF: | core.download:parallel={{os.cpu_count()}} core.upload:parallel={{os.cpu_count()}} + core:default_build_profile=libxrpl + core:default_profile=libxrpl tools.build:jobs=24 tools.build:verbosity=verbose tools.compilation:verbosity=verbose @@ -82,8 +84,7 @@ jobs: - name: configure Conan shell: bash run: | - echo "${CONAN_GLOBAL_CONF}" > global.conf - mv conan/profiles/libxrpl conan/profiles/default + echo "${CONAN_GLOBAL_CONF}" >> $(conan config home)/global.conf conan config install conan/profiles/ -tf $(conan config home)/profiles/ conan profile show - name: export custom recipes diff --git a/conan/profiles/libxrpl b/conan/profiles/libxrpl index 862244536b..b037b8c4a2 100644 --- a/conan/profiles/libxrpl +++ b/conan/profiles/libxrpl @@ -6,10 +6,6 @@ {% set compiler_version = detect_api.default_compiler_version(compiler, version) %} {% endif %} -{% if os == "Linux" %} -include(default) -{% endif %} - [settings] os={{ os }} arch={{ arch }} From 60909655d3bfdadfe86cd9620f5e2b6018a5bba0 Mon Sep 17 00:00:00 2001 From: Luc des Trois Maisons <3maisons@gmail.com> Date: Tue, 22 Jul 2025 11:42:43 -0400 Subject: [PATCH 085/244] Restructure beast::rngfill (#5563) The current implementation of rngfill is prone to false warnings from GCC about array bounds violations. Looking at the code, the implementation naively manipulates both the bytes count and the buffer pointer directly to ensure the trailing memcpy doesn't overrun the buffer. As expressed, there is a data dependency on both fields between loop iterations. Now, ideally, an optimizing compiler would realize that these dependencies were unnecessary and end up restructuring its intermediate representation into a functionally equivalent form with them absent. However, the point at which this occurs may be disjoint from when warning analyses are performed, potentially rendering them more difficult to determine precisely. In addition, it may also consume a portion of the budget the optimizer has allocated to attempting to improve a translation unit's performance. Given this is a function template which requires context-sensitive instantiation, this code would be more prone than most to being inlined, with a decrease in optimization budget corresponding to the effort the optimizer has already expended, having already optimized one or more calling functions. Thus, the scope for impacting the the ultimate quality of the code generated is elevated. For this change, we rearrange things so that the location and contents of each memcpy can be computed independently, relying on a simple loop iteration counter as the only changing input between iterations. --- include/xrpl/beast/utility/rngfill.h | 38 ++++++++++------------------ 1 file changed, 14 insertions(+), 24 deletions(-) diff --git a/include/xrpl/beast/utility/rngfill.h b/include/xrpl/beast/utility/rngfill.h index 2b5a9ba040..e1b47618ba 100644 --- a/include/xrpl/beast/utility/rngfill.h +++ b/include/xrpl/beast/utility/rngfill.h @@ -31,38 +31,28 @@ namespace beast { template void -rngfill(void* buffer, std::size_t bytes, Generator& g) +rngfill(void* const buffer, std::size_t const bytes, Generator& g) { using result_type = typename Generator::result_type; + constexpr std::size_t result_size = sizeof(result_type); - while (bytes >= sizeof(result_type)) + std::uint8_t* const buffer_start = static_cast(buffer); + std::size_t const complete_iterations = bytes / result_size; + std::size_t const bytes_remaining = bytes % result_size; + + for (std::size_t count = 0; count < complete_iterations; ++count) { - auto const v = g(); - std::memcpy(buffer, &v, sizeof(v)); - buffer = reinterpret_cast(buffer) + sizeof(v); - bytes -= sizeof(v); + result_type const v = g(); + std::size_t const offset = count * result_size; + std::memcpy(buffer_start + offset, &v, result_size); } - XRPL_ASSERT( - bytes < sizeof(result_type), "beast::rngfill(void*) : maximum bytes"); - -#ifdef __GNUC__ - // gcc 11.1 (falsely) warns about an array-bounds overflow in release mode. - // gcc 12.1 (also falsely) warns about an string overflow in release mode. -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Warray-bounds" -#pragma GCC diagnostic ignored "-Wstringop-overflow" -#endif - - if (bytes > 0) + if (bytes_remaining > 0) { - auto const v = g(); - std::memcpy(buffer, &v, bytes); + result_type const v = g(); + std::size_t const offset = complete_iterations * result_size; + std::memcpy(buffer_start + offset, &v, bytes_remaining); } - -#ifdef __GNUC__ -#pragma GCC diagnostic pop -#endif } template < From 7ff4f79d304dc2145f74718a9fcdf33578c32479 Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Wed, 23 Jul 2025 11:44:18 +0100 Subject: [PATCH 086/244] Fix clang-format CI job (#5598) For jobs running in containers, $GITHUB_WORKSPACE and ${{ github.workspace }} might not be the same directory. The actions/checkout step is supposed to checkout into `$GITHUB_WORKSPACE` and then add it to safe.directory (see instructions at https://github.com/actions/checkout), but that's apparently not happening for some container images. We can't be sure what is actually happening, so we preemptively add both directories to `safe.directory`. See also the GitHub issue opened in 2022 that still has not been resolved https://github.com/actions/runner/issues/2058. --- .github/workflows/clang-format.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.github/workflows/clang-format.yml b/.github/workflows/clang-format.yml index 83752c4780..0d81f87791 100644 --- a/.github/workflows/clang-format.yml +++ b/.github/workflows/clang-format.yml @@ -11,6 +11,15 @@ jobs: runs-on: ubuntu-24.04 container: ghcr.io/xrplf/ci/tools-rippled-clang-format steps: + # For jobs running in containers, $GITHUB_WORKSPACE and ${{ github.workspace }} might not be the + # same directory. The actions/checkout step is *supposed* to checkout into $GITHUB_WORKSPACE and + # then add it to safe.directory (see instructions at https://github.com/actions/checkout) + # but that's apparently not happening for some container images. We can't be sure what is actually + # happening, so let's pre-emptively add both directories to safe.directory. There's a + # Github issue opened in 2022 and not resolved in 2025 https://github.com/actions/runner/issues/2058 ¯\_(ツ)_/¯ + - run: | + git config --global --add safe.directory $GITHUB_WORKSPACE + git config --global --add safe.directory ${{ github.workspace }} - uses: actions/checkout@v4 - name: Format first-party sources run: | From c233df720a240c6cd5b774ab4a557c272d51c7aa Mon Sep 17 00:00:00 2001 From: Valentin Balaschenko <13349202+vlntb@users.noreply.github.com> Date: Wed, 23 Jul 2025 13:03:12 +0100 Subject: [PATCH 087/244] refactor: Makes HashRouter flags more type-safe (#5371) This change addresses the issue #5336: Refactor HashRouter flags to be more type-safe. * Switched numeric flags to enum type. * Updated unit tests --- src/test/app/Batch_test.cpp | 10 ++- src/test/app/HashRouter_test.cpp | 127 ++++++++++++++++++--------- src/xrpld/app/ledger/Ledger.cpp | 3 +- src/xrpld/app/misc/HashRouter.cpp | 14 +-- src/xrpld/app/misc/HashRouter.h | 82 ++++++++++++----- src/xrpld/app/misc/NetworkOPs.cpp | 21 +++-- src/xrpld/app/tx/detail/Escrow.cpp | 14 +-- src/xrpld/app/tx/detail/apply.cpp | 27 +++--- src/xrpld/overlay/detail/PeerImp.cpp | 25 +++--- src/xrpld/overlay/detail/PeerImp.h | 3 +- 10 files changed, 219 insertions(+), 107 deletions(-) diff --git a/src/test/app/Batch_test.cpp b/src/test/app/Batch_test.cpp index c8fcc4092b..92f286ca6a 100644 --- a/src/test/app/Batch_test.cpp +++ b/src/test/app/Batch_test.cpp @@ -3652,14 +3652,18 @@ class Batch_test : public beast::unit_test::suite { // Submit a tx with tfInnerBatchTxn uint256 const txBad = submitTx(tfInnerBatchTxn); - BEAST_EXPECT(env.app().getHashRouter().getFlags(txBad) == 0); + BEAST_EXPECT( + env.app().getHashRouter().getFlags(txBad) == + HashRouterFlags::UNDEFINED); } // Validate: NetworkOPs::processTransaction() { uint256 const txid = processTxn(tfInnerBatchTxn); - // HashRouter::getFlags() should return SF_BAD - BEAST_EXPECT(env.app().getHashRouter().getFlags(txid) == SF_BAD); + // HashRouter::getFlags() should return LedgerFlags::BAD + BEAST_EXPECT( + env.app().getHashRouter().getFlags(txid) == + HashRouterFlags::BAD); } } diff --git a/src/test/app/HashRouter_test.cpp b/src/test/app/HashRouter_test.cpp index 0737116f13..44170e152e 100644 --- a/src/test/app/HashRouter_test.cpp +++ b/src/test/app/HashRouter_test.cpp @@ -45,15 +45,19 @@ class HashRouter_test : public beast::unit_test::suite TestStopwatch stopwatch; HashRouter router(getSetup(2s, 1s), stopwatch); - uint256 const key1(1); - uint256 const key2(2); - uint256 const key3(3); + HashRouterFlags key1(HashRouterFlags::PRIVATE1); + HashRouterFlags key2(HashRouterFlags::PRIVATE2); + HashRouterFlags key3(HashRouterFlags::PRIVATE3); + + auto const ukey1 = uint256{static_cast(key1)}; + auto const ukey2 = uint256{static_cast(key2)}; + auto const ukey3 = uint256{static_cast(key3)}; // t=0 - router.setFlags(key1, 11111); - BEAST_EXPECT(router.getFlags(key1) == 11111); - router.setFlags(key2, 22222); - BEAST_EXPECT(router.getFlags(key2) == 22222); + router.setFlags(ukey1, HashRouterFlags::PRIVATE1); + BEAST_EXPECT(router.getFlags(ukey1) == HashRouterFlags::PRIVATE1); + router.setFlags(ukey2, HashRouterFlags::PRIVATE2); + BEAST_EXPECT(router.getFlags(ukey2) == HashRouterFlags::PRIVATE2); // key1 : 0 // key2 : 0 // key3: null @@ -62,7 +66,7 @@ class HashRouter_test : public beast::unit_test::suite // Because we are accessing key1 here, it // will NOT be expired for another two ticks - BEAST_EXPECT(router.getFlags(key1) == 11111); + BEAST_EXPECT(router.getFlags(ukey1) == HashRouterFlags::PRIVATE1); // key1 : 1 // key2 : 0 // key3 null @@ -70,9 +74,9 @@ class HashRouter_test : public beast::unit_test::suite ++stopwatch; // t=3 - router.setFlags(key3, 33333); // force expiration - BEAST_EXPECT(router.getFlags(key1) == 11111); - BEAST_EXPECT(router.getFlags(key2) == 0); + router.setFlags(ukey3, HashRouterFlags::PRIVATE3); // force expiration + BEAST_EXPECT(router.getFlags(ukey1) == HashRouterFlags::PRIVATE1); + BEAST_EXPECT(router.getFlags(ukey2) == HashRouterFlags::UNDEFINED); } void @@ -83,15 +87,21 @@ class HashRouter_test : public beast::unit_test::suite TestStopwatch stopwatch; HashRouter router(getSetup(2s, 1s), stopwatch); - uint256 const key1(1); - uint256 const key2(2); - uint256 const key3(3); - uint256 const key4(4); + HashRouterFlags key1(HashRouterFlags::PRIVATE1); + HashRouterFlags key2(HashRouterFlags::PRIVATE2); + HashRouterFlags key3(HashRouterFlags::PRIVATE3); + HashRouterFlags key4(HashRouterFlags::PRIVATE4); + + auto const ukey1 = uint256{static_cast(key1)}; + auto const ukey2 = uint256{static_cast(key2)}; + auto const ukey3 = uint256{static_cast(key3)}; + auto const ukey4 = uint256{static_cast(key4)}; + BEAST_EXPECT(key1 != key2 && key2 != key3 && key3 != key4); // t=0 - router.setFlags(key1, 12345); - BEAST_EXPECT(router.getFlags(key1) == 12345); + router.setFlags(ukey1, HashRouterFlags::BAD); + BEAST_EXPECT(router.getFlags(ukey1) == HashRouterFlags::BAD); // key1 : 0 // key2 : null // key3 : null @@ -103,26 +113,27 @@ class HashRouter_test : public beast::unit_test::suite // so key1 will be expired after the second // call to setFlags. // t=1 - router.setFlags(key2, 9999); - BEAST_EXPECT(router.getFlags(key1) == 12345); - BEAST_EXPECT(router.getFlags(key2) == 9999); + + router.setFlags(ukey2, HashRouterFlags::PRIVATE5); + BEAST_EXPECT(router.getFlags(ukey1) == HashRouterFlags::BAD); + BEAST_EXPECT(router.getFlags(ukey2) == HashRouterFlags::PRIVATE5); // key1 : 1 // key2 : 1 // key3 : null ++stopwatch; // t=2 - BEAST_EXPECT(router.getFlags(key2) == 9999); + BEAST_EXPECT(router.getFlags(ukey2) == HashRouterFlags::PRIVATE5); // key1 : 1 // key2 : 2 // key3 : null ++stopwatch; // t=3 - router.setFlags(key3, 2222); - BEAST_EXPECT(router.getFlags(key1) == 0); - BEAST_EXPECT(router.getFlags(key2) == 9999); - BEAST_EXPECT(router.getFlags(key3) == 2222); + router.setFlags(ukey3, HashRouterFlags::BAD); + BEAST_EXPECT(router.getFlags(ukey1) == HashRouterFlags::UNDEFINED); + BEAST_EXPECT(router.getFlags(ukey2) == HashRouterFlags::PRIVATE5); + BEAST_EXPECT(router.getFlags(ukey3) == HashRouterFlags::BAD); // key1 : 3 // key2 : 3 // key3 : 3 @@ -130,10 +141,10 @@ class HashRouter_test : public beast::unit_test::suite ++stopwatch; // t=4 // No insertion, no expiration - router.setFlags(key1, 7654); - BEAST_EXPECT(router.getFlags(key1) == 7654); - BEAST_EXPECT(router.getFlags(key2) == 9999); - BEAST_EXPECT(router.getFlags(key3) == 2222); + router.setFlags(ukey1, HashRouterFlags::SAVED); + BEAST_EXPECT(router.getFlags(ukey1) == HashRouterFlags::SAVED); + BEAST_EXPECT(router.getFlags(ukey2) == HashRouterFlags::PRIVATE5); + BEAST_EXPECT(router.getFlags(ukey3) == HashRouterFlags::BAD); // key1 : 4 // key2 : 4 // key3 : 4 @@ -142,11 +153,11 @@ class HashRouter_test : public beast::unit_test::suite ++stopwatch; // t=6 - router.setFlags(key4, 7890); - BEAST_EXPECT(router.getFlags(key1) == 0); - BEAST_EXPECT(router.getFlags(key2) == 0); - BEAST_EXPECT(router.getFlags(key3) == 0); - BEAST_EXPECT(router.getFlags(key4) == 7890); + router.setFlags(ukey4, HashRouterFlags::TRUSTED); + BEAST_EXPECT(router.getFlags(ukey1) == HashRouterFlags::UNDEFINED); + BEAST_EXPECT(router.getFlags(ukey2) == HashRouterFlags::UNDEFINED); + BEAST_EXPECT(router.getFlags(ukey3) == HashRouterFlags::UNDEFINED); + BEAST_EXPECT(router.getFlags(ukey4) == HashRouterFlags::TRUSTED); // key1 : 6 // key2 : 6 // key3 : 6 @@ -168,18 +179,18 @@ class HashRouter_test : public beast::unit_test::suite uint256 const key4(4); BEAST_EXPECT(key1 != key2 && key2 != key3 && key3 != key4); - int flags = 12345; // This value is ignored + HashRouterFlags flags(HashRouterFlags::BAD); // This value is ignored router.addSuppression(key1); BEAST_EXPECT(router.addSuppressionPeer(key2, 15)); BEAST_EXPECT(router.addSuppressionPeer(key3, 20, flags)); - BEAST_EXPECT(flags == 0); + BEAST_EXPECT(flags == HashRouterFlags::UNDEFINED); ++stopwatch; BEAST_EXPECT(!router.addSuppressionPeer(key1, 2)); BEAST_EXPECT(!router.addSuppressionPeer(key2, 3)); BEAST_EXPECT(!router.addSuppressionPeer(key3, 4, flags)); - BEAST_EXPECT(flags == 0); + BEAST_EXPECT(flags == HashRouterFlags::UNDEFINED); BEAST_EXPECT(router.addSuppressionPeer(key4, 5)); } @@ -192,9 +203,9 @@ class HashRouter_test : public beast::unit_test::suite HashRouter router(getSetup(2s, 1s), stopwatch); uint256 const key1(1); - BEAST_EXPECT(router.setFlags(key1, 10)); - BEAST_EXPECT(!router.setFlags(key1, 10)); - BEAST_EXPECT(router.setFlags(key1, 20)); + BEAST_EXPECT(router.setFlags(key1, HashRouterFlags::PRIVATE1)); + BEAST_EXPECT(!router.setFlags(key1, HashRouterFlags::PRIVATE1)); + BEAST_EXPECT(router.setFlags(key1, HashRouterFlags::PRIVATE2)); } void @@ -250,7 +261,7 @@ class HashRouter_test : public beast::unit_test::suite HashRouter router(getSetup(5s, 1s), stopwatch); uint256 const key(1); HashRouter::PeerShortID peer = 1; - int flags; + HashRouterFlags flags; BEAST_EXPECT(router.shouldProcess(key, peer, flags, 1s)); BEAST_EXPECT(!router.shouldProcess(key, peer, flags, 1s)); @@ -364,6 +375,39 @@ class HashRouter_test : public beast::unit_test::suite } } + void + testFlagsOps() + { + testcase("Bitwise Operations"); + + using HF = HashRouterFlags; + using UHF = std::underlying_type_t; + + HF f1 = HF::BAD; + HF f2 = HF::SAVED; + HF combined = f1 | f2; + + BEAST_EXPECT( + static_cast(combined) == + (static_cast(f1) | static_cast(f2))); + + HF temp = f1; + temp |= f2; + BEAST_EXPECT(temp == combined); + + HF intersect = combined & f1; + BEAST_EXPECT(intersect == f1); + + HF temp2 = combined; + temp2 &= f1; + BEAST_EXPECT(temp2 == f1); + + BEAST_EXPECT(any(f1)); + BEAST_EXPECT(any(f2)); + BEAST_EXPECT(any(combined)); + BEAST_EXPECT(!any(HF::UNDEFINED)); + } + public: void run() override @@ -375,6 +419,7 @@ public: testRelay(); testProcess(); testSetup(); + testFlagsOps(); } }; diff --git a/src/xrpld/app/ledger/Ledger.cpp b/src/xrpld/app/ledger/Ledger.cpp index 3cdf0ab1a7..6de4f2cbde 100644 --- a/src/xrpld/app/ledger/Ledger.cpp +++ b/src/xrpld/app/ledger/Ledger.cpp @@ -996,7 +996,8 @@ pendSaveValidated( bool isSynchronous, bool isCurrent) { - if (!app.getHashRouter().setFlags(ledger->info().hash, SF_SAVED)) + if (!app.getHashRouter().setFlags( + ledger->info().hash, HashRouterFlags::SAVED)) { // We have tried to save this ledger recently auto stream = app.journal("Ledger").debug(); diff --git a/src/xrpld/app/misc/HashRouter.cpp b/src/xrpld/app/misc/HashRouter.cpp index dc87b2bce1..b241d6a98a 100644 --- a/src/xrpld/app/misc/HashRouter.cpp +++ b/src/xrpld/app/misc/HashRouter.cpp @@ -65,7 +65,10 @@ HashRouter::addSuppressionPeerWithStatus(uint256 const& key, PeerShortID peer) } bool -HashRouter::addSuppressionPeer(uint256 const& key, PeerShortID peer, int& flags) +HashRouter::addSuppressionPeer( + uint256 const& key, + PeerShortID peer, + HashRouterFlags& flags) { std::lock_guard lock(mutex_); @@ -79,7 +82,7 @@ bool HashRouter::shouldProcess( uint256 const& key, PeerShortID peer, - int& flags, + HashRouterFlags& flags, std::chrono::seconds tx_interval) { std::lock_guard lock(mutex_); @@ -91,7 +94,7 @@ HashRouter::shouldProcess( return s.shouldProcess(suppressionMap_.clock().now(), tx_interval); } -int +HashRouterFlags HashRouter::getFlags(uint256 const& key) { std::lock_guard lock(mutex_); @@ -100,9 +103,10 @@ HashRouter::getFlags(uint256 const& key) } bool -HashRouter::setFlags(uint256 const& key, int flags) +HashRouter::setFlags(uint256 const& key, HashRouterFlags flags) { - XRPL_ASSERT(flags, "ripple::HashRouter::setFlags : valid input"); + XRPL_ASSERT( + static_cast(flags), "ripple::HashRouter::setFlags : valid input"); std::lock_guard lock(mutex_); diff --git a/src/xrpld/app/misc/HashRouter.h b/src/xrpld/app/misc/HashRouter.h index d1d69623c1..60a0b01155 100644 --- a/src/xrpld/app/misc/HashRouter.h +++ b/src/xrpld/app/misc/HashRouter.h @@ -31,20 +31,59 @@ namespace ripple { -// TODO convert these macros to int constants or an enum -#define SF_BAD 0x02 // Temporarily bad -#define SF_SAVED 0x04 -#define SF_HELD 0x08 // Held by LedgerMaster after potential processing failure -#define SF_TRUSTED 0x10 // comes from trusted source +enum class HashRouterFlags : std::uint16_t { + // Public flags + UNDEFINED = 0x00, + BAD = 0x02, // Temporarily bad + SAVED = 0x04, + HELD = 0x08, // Held by LedgerMaster after potential processing failure + TRUSTED = 0x10, // Comes from a trusted source -// Private flags, used internally in apply.cpp. -// Do not attempt to read, set, or reuse. -#define SF_PRIVATE1 0x0100 -#define SF_PRIVATE2 0x0200 -#define SF_PRIVATE3 0x0400 -#define SF_PRIVATE4 0x0800 -#define SF_PRIVATE5 0x1000 -#define SF_PRIVATE6 0x2000 + // Private flags (used internally in apply.cpp) + // Do not attempt to read, set, or reuse. + PRIVATE1 = 0x0100, + PRIVATE2 = 0x0200, + PRIVATE3 = 0x0400, + PRIVATE4 = 0x0800, + PRIVATE5 = 0x1000, + PRIVATE6 = 0x2000 +}; + +constexpr HashRouterFlags +operator|(HashRouterFlags lhs, HashRouterFlags rhs) +{ + return static_cast( + static_cast>(lhs) | + static_cast>(rhs)); +} + +constexpr HashRouterFlags& +operator|=(HashRouterFlags& lhs, HashRouterFlags rhs) +{ + lhs = lhs | rhs; + return lhs; +} + +constexpr HashRouterFlags +operator&(HashRouterFlags lhs, HashRouterFlags rhs) +{ + return static_cast( + static_cast>(lhs) & + static_cast>(rhs)); +} + +constexpr HashRouterFlags& +operator&=(HashRouterFlags& lhs, HashRouterFlags rhs) +{ + lhs = lhs & rhs; + return lhs; +} + +constexpr bool +any(HashRouterFlags flags) +{ + return static_cast>(flags) != 0; +} class Config; @@ -101,14 +140,14 @@ private: peers_.insert(peer); } - int + HashRouterFlags getFlags(void) const { return flags_; } void - setFlags(int flagsToSet) + setFlags(HashRouterFlags flagsToSet) { flags_ |= flagsToSet; } @@ -154,7 +193,7 @@ private: } private: - int flags_ = 0; + HashRouterFlags flags_ = HashRouterFlags::UNDEFINED; std::set peers_; // This could be generalized to a map, if more // than one flag needs to expire independently. @@ -190,14 +229,17 @@ public: addSuppressionPeerWithStatus(uint256 const& key, PeerShortID peer); bool - addSuppressionPeer(uint256 const& key, PeerShortID peer, int& flags); + addSuppressionPeer( + uint256 const& key, + PeerShortID peer, + HashRouterFlags& flags); // Add a peer suppression and return whether the entry should be processed bool shouldProcess( uint256 const& key, PeerShortID peer, - int& flags, + HashRouterFlags& flags, std::chrono::seconds tx_interval); /** Set the flags on a hash. @@ -205,9 +247,9 @@ public: @return `true` if the flags were changed. `false` if unchanged. */ bool - setFlags(uint256 const& key, int flags); + setFlags(uint256 const& key, HashRouterFlags flags); - int + HashRouterFlags getFlags(uint256 const& key); /** Determines whether the hashed item should be relayed. diff --git a/src/xrpld/app/misc/NetworkOPs.cpp b/src/xrpld/app/misc/NetworkOPs.cpp index a7ddbe912c..1ac42579ba 100644 --- a/src/xrpld/app/misc/NetworkOPs.cpp +++ b/src/xrpld/app/misc/NetworkOPs.cpp @@ -1207,7 +1207,7 @@ NetworkOPsImp::submitTransaction(std::shared_ptr const& iTrans) auto const txid = trans->getTransactionID(); auto const flags = app_.getHashRouter().getFlags(txid); - if ((flags & SF_BAD) != 0) + if ((flags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED) { JLOG(m_journal.warn()) << "Submitted transaction cached bad"; return; @@ -1251,7 +1251,7 @@ NetworkOPsImp::preProcessTransaction(std::shared_ptr& transaction) { auto const newFlags = app_.getHashRouter().getFlags(transaction->getID()); - if ((newFlags & SF_BAD) != 0) + if ((newFlags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED) { // cached bad JLOG(m_journal.warn()) << transaction->getID() << ": cached bad!\n"; @@ -1270,7 +1270,8 @@ NetworkOPsImp::preProcessTransaction(std::shared_ptr& transaction) { transaction->setStatus(INVALID); transaction->setResult(temINVALID_FLAG); - app_.getHashRouter().setFlags(transaction->getID(), SF_BAD); + app_.getHashRouter().setFlags( + transaction->getID(), HashRouterFlags::BAD); return false; } @@ -1289,7 +1290,8 @@ NetworkOPsImp::preProcessTransaction(std::shared_ptr& transaction) JLOG(m_journal.info()) << "Transaction has bad signature: " << reason; transaction->setStatus(INVALID); transaction->setResult(temBAD_SIGNATURE); - app_.getHashRouter().setFlags(transaction->getID(), SF_BAD); + app_.getHashRouter().setFlags( + transaction->getID(), HashRouterFlags::BAD); return false; } @@ -1412,7 +1414,8 @@ NetworkOPsImp::processTransactionSet(CanonicalTXSet const& set) JLOG(m_journal.trace()) << "Exception checking transaction: " << reason; } - app_.getHashRouter().setFlags(tx->getTransactionID(), SF_BAD); + app_.getHashRouter().setFlags( + tx->getTransactionID(), HashRouterFlags::BAD); continue; } @@ -1538,7 +1541,8 @@ NetworkOPsImp::apply(std::unique_lock& batchLock) e.transaction->setResult(e.result); if (isTemMalformed(e.result)) - app_.getHashRouter().setFlags(e.transaction->getID(), SF_BAD); + app_.getHashRouter().setFlags( + e.transaction->getID(), HashRouterFlags::BAD); #ifdef DEBUG if (e.result != tesSUCCESS) @@ -1626,7 +1630,8 @@ NetworkOPsImp::apply(std::unique_lock& batchLock) // (5) ledgers into the future. (Remember that an // unseated optional compares as less than all seated // values, so it has to be checked explicitly first.) - // 3. The SF_HELD flag is not set on the txID. (setFlags + // 3. The HashRouterFlags::BAD flag is not set on the txID. + // (setFlags // checks before setting. If the flag is set, it returns // false, which means it's been held once without one of // the other conditions, so don't hold it again. Time's @@ -1635,7 +1640,7 @@ NetworkOPsImp::apply(std::unique_lock& batchLock) if (e.local || (ledgersLeft && ledgersLeft <= LocalTxs::holdLedgers) || app_.getHashRouter().setFlags( - e.transaction->getID(), SF_HELD)) + e.transaction->getID(), HashRouterFlags::HELD)) { // transaction should be held JLOG(m_journal.debug()) diff --git a/src/xrpld/app/tx/detail/Escrow.cpp b/src/xrpld/app/tx/detail/Escrow.cpp index 8f7005d55c..c62c38c675 100644 --- a/src/xrpld/app/tx/detail/Escrow.cpp +++ b/src/xrpld/app/tx/detail/Escrow.cpp @@ -34,13 +34,13 @@ #include #include +namespace ripple { + // During an EscrowFinish, the transaction must specify both // a condition and a fulfillment. We track whether that // fulfillment matches and validates the condition. -#define SF_CF_INVALID SF_PRIVATE5 -#define SF_CF_VALID SF_PRIVATE6 - -namespace ripple { +constexpr HashRouterFlags SF_CF_INVALID = HashRouterFlags::PRIVATE5; +constexpr HashRouterFlags SF_CF_VALID = HashRouterFlags::PRIVATE6; /* Escrow @@ -663,7 +663,7 @@ EscrowFinish::preflight(PreflightContext const& ctx) // If we haven't checked the condition, check it // now. Whether it passes or not isn't important // in preflight. - if (!(flags & (SF_CF_INVALID | SF_CF_VALID))) + if (!any(flags & (SF_CF_INVALID | SF_CF_VALID))) { if (checkCondition(*fb, *cb)) router.setFlags(id, SF_CF_VALID); @@ -1064,7 +1064,7 @@ EscrowFinish::doApply() // It's unlikely that the results of the check will // expire from the hash router, but if it happens, // simply re-run the check. - if (cb && !(flags & (SF_CF_INVALID | SF_CF_VALID))) + if (cb && !any(flags & (SF_CF_INVALID | SF_CF_VALID))) { auto const fb = ctx_.tx[~sfFulfillment]; @@ -1081,7 +1081,7 @@ EscrowFinish::doApply() // If the check failed, then simply return an error // and don't look at anything else. - if (flags & SF_CF_INVALID) + if (any(flags & SF_CF_INVALID)) return tecCRYPTOCONDITION_ERROR; // Check against condition in the ledger entry: diff --git a/src/xrpld/app/tx/detail/apply.cpp b/src/xrpld/app/tx/detail/apply.cpp index 889a520032..e2e0adae45 100644 --- a/src/xrpld/app/tx/detail/apply.cpp +++ b/src/xrpld/app/tx/detail/apply.cpp @@ -27,11 +27,16 @@ namespace ripple { -// These are the same flags defined as SF_PRIVATE1-4 in HashRouter.h -#define SF_SIGBAD SF_PRIVATE1 // Signature is bad -#define SF_SIGGOOD SF_PRIVATE2 // Signature is good -#define SF_LOCALBAD SF_PRIVATE3 // Local checks failed -#define SF_LOCALGOOD SF_PRIVATE4 // Local checks passed +// These are the same flags defined as HashRouterFlags::PRIVATE1-4 in +// HashRouter.h +constexpr HashRouterFlags SF_SIGBAD = + HashRouterFlags::PRIVATE1; // Signature is bad +constexpr HashRouterFlags SF_SIGGOOD = + HashRouterFlags::PRIVATE2; // Signature is good +constexpr HashRouterFlags SF_LOCALBAD = + HashRouterFlags::PRIVATE3; // Local checks failed +constexpr HashRouterFlags SF_LOCALGOOD = + HashRouterFlags::PRIVATE4; // Local checks passed //------------------------------------------------------------------------------ @@ -66,11 +71,11 @@ checkValidity( return {Validity::Valid, ""}; } - if (flags & SF_SIGBAD) + if (any(flags & SF_SIGBAD)) // Signature is known bad return {Validity::SigBad, "Transaction has bad signature."}; - if (!(flags & SF_SIGGOOD)) + if (!any(flags & SF_SIGGOOD)) { // Don't know signature state. Check it. auto const requireCanonicalSig = @@ -88,12 +93,12 @@ checkValidity( } // Signature is now known good - if (flags & SF_LOCALBAD) + if (any(flags & SF_LOCALBAD)) // ...but the local checks // are known bad. return {Validity::SigGoodOnly, "Local checks failed."}; - if (flags & SF_LOCALGOOD) + if (any(flags & SF_LOCALGOOD)) // ...and the local checks // are known good. return {Validity::Valid, ""}; @@ -112,7 +117,7 @@ checkValidity( void forceValidity(HashRouter& router, uint256 const& txid, Validity validity) { - int flags = 0; + HashRouterFlags flags = HashRouterFlags::UNDEFINED; switch (validity) { case Validity::Valid: @@ -125,7 +130,7 @@ forceValidity(HashRouter& router, uint256 const& txid, Validity validity) // would be silly to call directly break; } - if (flags) + if (any(flags)) router.setFlags(txid, flags); } diff --git a/src/xrpld/overlay/detail/PeerImp.cpp b/src/xrpld/overlay/detail/PeerImp.cpp index 1238833d0d..23b4760488 100644 --- a/src/xrpld/overlay/detail/PeerImp.cpp +++ b/src/xrpld/overlay/detail/PeerImp.cpp @@ -1296,13 +1296,13 @@ PeerImp::handleTransaction( } // LCOV_EXCL_STOP - int flags; + HashRouterFlags flags; constexpr std::chrono::seconds tx_interval = 10s; if (!app_.getHashRouter().shouldProcess(txID, id_, flags, tx_interval)) { // we have seen this transaction recently - if (flags & SF_BAD) + if (any(flags & HashRouterFlags::BAD)) { fee_.update(Resource::feeUselessData, "known bad"); JLOG(p_journal_.debug()) << "Ignoring known bad tx " << txID; @@ -1329,7 +1329,7 @@ PeerImp::handleTransaction( { // Skip local checks if a server we trust // put the transaction in its open ledger - flags |= SF_TRUSTED; + flags |= HashRouterFlags::TRUSTED; } // for non-validator nodes only -- localPublicKey is set for @@ -2841,7 +2841,7 @@ PeerImp::doTransactions( void PeerImp::checkTransaction( - int flags, + HashRouterFlags flags, bool checkSignature, std::shared_ptr const& stx, bool batch) @@ -2866,7 +2866,8 @@ PeerImp::checkTransaction( (stx->getFieldU32(sfLastLedgerSequence) < app_.getLedgerMaster().getValidLedgerIndex())) { - app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD); + app_.getHashRouter().setFlags( + stx->getTransactionID(), HashRouterFlags::BAD); charge(Resource::feeUselessData, "expired tx"); return; } @@ -2925,8 +2926,10 @@ PeerImp::checkTransaction( << "Exception checking transaction: " << validReason; } - // Probably not necessary to set SF_BAD, but doesn't hurt. - app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD); + // Probably not necessary to set HashRouterFlags::BAD, but + // doesn't hurt. + app_.getHashRouter().setFlags( + stx->getTransactionID(), HashRouterFlags::BAD); charge( Resource::feeInvalidSignature, "check transaction signature failure"); @@ -2949,12 +2952,13 @@ PeerImp::checkTransaction( JLOG(p_journal_.trace()) << "Exception checking transaction: " << reason; } - app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD); + app_.getHashRouter().setFlags( + stx->getTransactionID(), HashRouterFlags::BAD); charge(Resource::feeInvalidSignature, "tx (impossible)"); return; } - bool const trusted(flags & SF_TRUSTED); + bool const trusted = any(flags & HashRouterFlags::TRUSTED); app_.getOPs().processTransaction( tx, trusted, false, NetworkOPs::FailHard::no); } @@ -2962,7 +2966,8 @@ PeerImp::checkTransaction( { JLOG(p_journal_.warn()) << "Exception in " << __func__ << ": " << ex.what(); - app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD); + app_.getHashRouter().setFlags( + stx->getTransactionID(), HashRouterFlags::BAD); using namespace std::string_literals; charge(Resource::feeInvalidData, "tx "s + ex.what()); } diff --git a/src/xrpld/overlay/detail/PeerImp.h b/src/xrpld/overlay/detail/PeerImp.h index d5f8e4d179..5aa49fd152 100644 --- a/src/xrpld/overlay/detail/PeerImp.h +++ b/src/xrpld/overlay/detail/PeerImp.h @@ -22,6 +22,7 @@ #include #include +#include #include #include #include @@ -612,7 +613,7 @@ private: void checkTransaction( - int flags, + HashRouterFlags flags, bool checkSignature, std::shared_ptr const& stx, bool batch); From faa781b71f8381d8adf19f9edbe0abc8bd2d20cb Mon Sep 17 00:00:00 2001 From: Jingchen Date: Wed, 23 Jul 2025 14:27:41 +0100 Subject: [PATCH 088/244] Remove obsolete owner pays fee feature and XRPL_ABANDON stanza (#5550) If a feature was never voted on then it is safe to remove. --- include/xrpl/protocol/Feature.h | 34 ++++++++------------- include/xrpl/protocol/detail/features.macro | 8 ----- src/libxrpl/protocol/Feature.cpp | 21 ++----------- 3 files changed, 14 insertions(+), 49 deletions(-) diff --git a/include/xrpl/protocol/Feature.h b/include/xrpl/protocol/Feature.h index c55776a5ce..5844a70eb0 100644 --- a/include/xrpl/protocol/Feature.h +++ b/include/xrpl/protocol/Feature.h @@ -53,19 +53,19 @@ * then change the macro parameter in features.macro to * `VoteBehavior::DefaultYes`. The communication process is beyond * the scope of these instructions. + + * 5) If a supported feature (`Supported::yes`) was _ever_ in a released + * version, it can never be changed back to `Supported::no`, because + * it _may_ still become enabled at any time. This would cause newer + * versions of `rippled` to become amendment blocked. + * Instead, to prevent newer versions from voting on the feature, use + * `VoteBehavior::Obsolete`. Obsolete features can not be voted for + * by any versions of `rippled` built with that setting, but will still + * work correctly if they get enabled. If a feature remains obsolete + * for long enough that _all_ clients that could vote for it are + * amendment blocked, the feature can be removed from the code + * as if it was unsupported. * - * 5) A feature marked as Obsolete can mean either: - * 1) It is in the ledger (marked as Supported::yes) and it is on its way to - * become Retired - * 2) The feature is not in the ledger (has always been marked as - * Supported::no) and the code to support it has been removed - * - * If we want to discontinue a feature that we've never fully supported and - * the feature has never been enabled, we should remove all the related - * code, and mark the feature as "abandoned". To do this: - * - * 1) Open features.macro, move the feature to the abandoned section and - * change the macro to XRPL_ABANDON * * When a feature has been enabled for several years, the conditional code * may be removed, and the feature "retired". To retire a feature: @@ -99,13 +99,10 @@ namespace detail { #undef XRPL_FIX #pragma push_macro("XRPL_RETIRE") #undef XRPL_RETIRE -#pragma push_macro("XRPL_ABANDON") -#undef XRPL_ABANDON #define XRPL_FEATURE(name, supported, vote) +1 #define XRPL_FIX(name, supported, vote) +1 #define XRPL_RETIRE(name) +1 -#define XRPL_ABANDON(name) +1 // This value SHOULD be equal to the number of amendments registered in // Feature.cpp. Because it's only used to reserve storage, and determine how @@ -122,8 +119,6 @@ static constexpr std::size_t numFeatures = #pragma pop_macro("XRPL_FIX") #undef XRPL_FEATURE #pragma pop_macro("XRPL_FEATURE") -#undef XRPL_ABANDON -#pragma pop_macro("XRPL_ABANDON") /** Amendments that this server supports and the default voting behavior. Whether they are enabled depends on the Rules defined in the validated @@ -365,13 +360,10 @@ foreachFeature(FeatureBitset bs, F&& f) #undef XRPL_FIX #pragma push_macro("XRPL_RETIRE") #undef XRPL_RETIRE -#pragma push_macro("XRPL_ABANDON") -#undef XRPL_ABANDON #define XRPL_FEATURE(name, supported, vote) extern uint256 const feature##name; #define XRPL_FIX(name, supported, vote) extern uint256 const fix##name; #define XRPL_RETIRE(name) -#define XRPL_ABANDON(name) #include @@ -381,8 +373,6 @@ foreachFeature(FeatureBitset bs, F&& f) #pragma pop_macro("XRPL_FIX") #undef XRPL_FEATURE #pragma pop_macro("XRPL_FEATURE") -#undef XRPL_ABANDON -#pragma pop_macro("XRPL_ABANDON") } // namespace ripple diff --git a/include/xrpl/protocol/detail/features.macro b/include/xrpl/protocol/detail/features.macro index 63c1b2258b..c83dacfa73 100644 --- a/include/xrpl/protocol/detail/features.macro +++ b/include/xrpl/protocol/detail/features.macro @@ -26,9 +26,6 @@ #if !defined(XRPL_RETIRE) #error "undefined macro: XRPL_RETIRE" #endif -#if !defined(XRPL_ABANDON) -#error "undefined macro: XRPL_ABANDON" -#endif // Add new amendments to the top of this list. // Keep it sorted in reverse chronological order. @@ -133,11 +130,6 @@ XRPL_FIX (NFTokenDirV1, Supported::yes, VoteBehavior::Obsolete) XRPL_FEATURE(NonFungibleTokensV1, Supported::yes, VoteBehavior::Obsolete) XRPL_FEATURE(CryptoConditionsSuite, Supported::yes, VoteBehavior::Obsolete) -// The following amendments were never supported, never enabled, and -// we've abanded them. These features should never be in the ledger, -// and we've removed all the related code. -XRPL_ABANDON(OwnerPaysFee) - // The following amendments have been active for at least two years. Their // pre-amendment code has been removed and the identifiers are deprecated. // All known amendments and amendments that may appear in a validated diff --git a/src/libxrpl/protocol/Feature.cpp b/src/libxrpl/protocol/Feature.cpp index 478b155387..eeeee1c185 100644 --- a/src/libxrpl/protocol/Feature.cpp +++ b/src/libxrpl/protocol/Feature.cpp @@ -254,7 +254,7 @@ FeatureCollections::registerFeature( { check(!readOnly, "Attempting to register a feature after startup."); check( - support == Supported::yes || vote != VoteBehavior::DefaultYes, + support == Supported::yes || vote == VoteBehavior::DefaultNo, "Invalid feature parameters. Must be supported to be up-voted."); Feature const* i = getByName(name); if (!i) @@ -268,7 +268,7 @@ FeatureCollections::registerFeature( features.emplace_back(name, f); auto const getAmendmentSupport = [=]() { - if (vote == VoteBehavior::Obsolete && support == Supported::yes) + if (vote == VoteBehavior::Obsolete) return AmendmentSupport::Retired; return support == Supported::yes ? AmendmentSupport::Supported : AmendmentSupport::Unsupported; @@ -398,14 +398,6 @@ retireFeature(std::string const& name) return registerFeature(name, Supported::yes, VoteBehavior::Obsolete); } -// Abandoned features are not in the ledger and have no code controlled by the -// feature. They were never supported, and cannot be voted on. -uint256 -abandonFeature(std::string const& name) -{ - return registerFeature(name, Supported::no, VoteBehavior::Obsolete); -} - /** Tell FeatureCollections when registration is complete. */ bool registrationIsDone() @@ -440,8 +432,6 @@ featureToName(uint256 const& f) #undef XRPL_FIX #pragma push_macro("XRPL_RETIRE") #undef XRPL_RETIRE -#pragma push_macro("XRPL_ABANDON") -#undef XRPL_ABANDON #define XRPL_FEATURE(name, supported, vote) \ uint256 const feature##name = registerFeature(#name, supported, vote); @@ -453,11 +443,6 @@ featureToName(uint256 const& f) [[deprecated("The referenced amendment has been retired")]] \ [[maybe_unused]] \ uint256 const retired##name = retireFeature(#name); - -#define XRPL_ABANDON(name) \ - [[deprecated("The referenced amendment has been abandoned")]] \ - [[maybe_unused]] \ - uint256 const abandoned##name = abandonFeature(#name); // clang-format on #include @@ -468,8 +453,6 @@ featureToName(uint256 const& f) #pragma pop_macro("XRPL_FIX") #undef XRPL_FEATURE #pragma pop_macro("XRPL_FEATURE") -#undef XRPL_ABANDON -#pragma pop_macro("XRPL_ABANDON") // All of the features should now be registered, since variables in a cpp file // are initialized from top to bottom. From 433eeabfa561751f78873dafc3918f8d066c42a0 Mon Sep 17 00:00:00 2001 From: Vlad <129996061+vvysokikh1@users.noreply.github.com> Date: Wed, 23 Jul 2025 14:57:51 +0100 Subject: [PATCH 089/244] chore: Remove unused code after flow cross retirement (#5575) After the `FlowCross` amendment was retired (#5562), there was still some unused code left. This change removes the remaining remnants. --- src/test/app/Taker_test.cpp | 1394 ----------------------- src/xrpld/app/tx/detail/CreateOffer.cpp | 415 +------ src/xrpld/app/tx/detail/CreateOffer.h | 65 +- src/xrpld/app/tx/detail/Taker.cpp | 863 -------------- src/xrpld/app/tx/detail/Taker.h | 341 ------ 5 files changed, 14 insertions(+), 3064 deletions(-) delete mode 100644 src/test/app/Taker_test.cpp delete mode 100644 src/xrpld/app/tx/detail/Taker.cpp delete mode 100644 src/xrpld/app/tx/detail/Taker.h diff --git a/src/test/app/Taker_test.cpp b/src/test/app/Taker_test.cpp deleted file mode 100644 index 3b3f338625..0000000000 --- a/src/test/app/Taker_test.cpp +++ /dev/null @@ -1,1394 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2013 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include - -#include - -namespace ripple { - -class Taker_test : public beast::unit_test::suite -{ - static bool const Buy = false; - static bool const Sell = true; - - class TestTaker : public BasicTaker - { - STAmount funds_; - STAmount cross_funds; - - public: - TestTaker( - CrossType cross_type, - Amounts const& amount, - Quality const& quality, - STAmount const& funds, - std::uint32_t flags, - Rate const& rate_in, - Rate const& rate_out) - : BasicTaker( - cross_type, - AccountID(0x4701), - amount, - quality, - flags, - rate_in, - rate_out) - , funds_(funds) - { - } - - void - set_funds(STAmount const& funds) - { - cross_funds = funds; - } - - STAmount - get_funds(AccountID const& owner, STAmount const& funds) const override - { - if (owner == account()) - return funds_; - - return cross_funds; - } - - Amounts - cross(Amounts offer, Quality quality) - { - if (reject(quality)) - return Amounts(offer.in.zeroed(), offer.out.zeroed()); - - // we need to emulate "unfunded offers" behavior - if (get_funds(AccountID(0x4702), offer.out) == beast::zero) - return Amounts(offer.in.zeroed(), offer.out.zeroed()); - - if (done()) - return Amounts(offer.in.zeroed(), offer.out.zeroed()); - - auto result = do_cross(offer, quality, AccountID(0x4702)); - - funds_ -= result.order.in; - - return result.order; - } - - std::pair - cross( - Amounts offer1, - Quality quality1, - Amounts offer2, - Quality quality2) - { - /* check if composed quality should be rejected */ - Quality const quality(composed_quality(quality1, quality2)); - - if (reject(quality)) - return std::make_pair( - Amounts{offer1.in.zeroed(), offer1.out.zeroed()}, - Amounts{offer2.in.zeroed(), offer2.out.zeroed()}); - - if (done()) - return std::make_pair( - Amounts{offer1.in.zeroed(), offer1.out.zeroed()}, - Amounts{offer2.in.zeroed(), offer2.out.zeroed()}); - - auto result = do_cross( - offer1, - quality1, - AccountID(0x4703), - offer2, - quality2, - AccountID(0x4704)); - - return std::make_pair(result.first.order, result.second.order); - } - }; - -private: - Issue const& - usd() const - { - static Issue const issue( - Currency(0x5553440000000000), AccountID(0x4985601)); - return issue; - } - - Issue const& - eur() const - { - static Issue const issue( - Currency(0x4555520000000000), AccountID(0x4985602)); - return issue; - } - - Issue const& - xrp() const - { - static Issue const issue(xrpCurrency(), xrpAccount()); - return issue; - } - - STAmount - parse_amount(std::string const& amount, Issue const& issue) - { - return amountFromString(issue, amount); - } - - Amounts - parse_amounts( - std::string const& amount_in, - Issue const& issue_in, - std::string const& amount_out, - Issue const& issue_out) - { - STAmount const in(parse_amount(amount_in, issue_in)); - STAmount const out(parse_amount(amount_out, issue_out)); - - return {in, out}; - } - - struct cross_attempt_offer - { - cross_attempt_offer(std::string const& in_, std::string const& out_) - : in(in_), out(out_) - { - } - - std::string in; - std::string out; - }; - -private: - std::string - format_amount(STAmount const& amount) - { - std::string txt = amount.getText(); - txt += "/"; - txt += to_string(amount.issue().currency); - return txt; - } - - void - attempt( - bool sell, - std::string name, - Quality taker_quality, - cross_attempt_offer const offer, - std::string const funds, - Quality cross_quality, - cross_attempt_offer const cross, - std::string const cross_funds, - cross_attempt_offer const flow, - Issue const& issue_in, - Issue const& issue_out, - Rate rate_in = parityRate, - Rate rate_out = parityRate) - { - Amounts taker_offer( - parse_amounts(offer.in, issue_in, offer.out, issue_out)); - - Amounts cross_offer( - parse_amounts(cross.in, issue_in, cross.out, issue_out)); - - CrossType cross_type; - - if (isXRP(issue_out)) - cross_type = CrossType::IouToXrp; - else if (isXRP(issue_in)) - cross_type = CrossType::XrpToIou; - else - cross_type = CrossType::IouToIou; - - // FIXME: We are always invoking the IOU-to-IOU taker. We should select - // the correct type dynamically. - TestTaker taker( - cross_type, - taker_offer, - taker_quality, - parse_amount(funds, issue_in), - sell ? tfSell : 0, - rate_in, - rate_out); - - taker.set_funds(parse_amount(cross_funds, issue_out)); - - auto result = taker.cross(cross_offer, cross_quality); - - Amounts const expected( - parse_amounts(flow.in, issue_in, flow.out, issue_out)); - - BEAST_EXPECT(expected == result); - - if (expected != result) - { - log << "Expected: " << format_amount(expected.in) << " : " - << format_amount(expected.out) << '\n' - << " Actual: " << format_amount(result.in) << " : " - << format_amount(result.out) << std::endl; - } - } - - Quality - get_quality(std::string in, std::string out) - { - return Quality(parse_amounts(in, xrp(), out, xrp())); - } - -public: - // Notation for clamp scenario descriptions: - // - // IN:OUT (with the last in the list being limiting factor) - // N = Nothing - // T = Taker Offer Balance - // A = Taker Account Balance - // B = Owner Account Balance - // - // (s) = sell semantics: taker wants unlimited output - // (b) = buy semantics: taker wants a limited amount out - - // NIKB TODO: Augment TestTaker so currencies and rates can be specified - // once without need for repetition. - void - test_xrp_to_iou() - { - testcase("XRP Quantization: input"); - - Quality q1 = get_quality("1", "1"); - - for (auto NumberSwitchOver : {false, true}) - { - NumberSO stNumberSO{NumberSwitchOver}; - // TAKER OWNER - // QUAL OFFER FUNDS QUAL OFFER FUNDS - // EXPECTED - // XRP USD - attempt( - Sell, - "N:N", - q1, - {"2", "2"}, - "2", - q1, - {"2", "2"}, - "2", - {"2", "2"}, - xrp(), - usd()); - if (NumberSwitchOver) - { - attempt( - Sell, - "N:B", - q1, - {"2", "2"}, - "2", - q1, - {"2", "2"}, - "1.8", - {"2", "1.8"}, - xrp(), - usd()); - } - else - { - attempt( - Sell, - "N:B", - q1, - {"2", "2"}, - "2", - q1, - {"2", "2"}, - "1.8", - {"1", "1.8"}, - xrp(), - usd()); - } - attempt( - Buy, - "N:T", - q1, - {"1", "1"}, - "2", - q1, - {"2", "2"}, - "2", - {"1", "1"}, - xrp(), - usd()); - attempt( - Buy, - "N:BT", - q1, - {"1", "1"}, - "2", - q1, - {"2", "2"}, - "1.8", - {"1", "1"}, - xrp(), - usd()); - if (NumberSwitchOver) - { - attempt( - Buy, - "N:TB", - q1, - {"1", "1"}, - "2", - q1, - {"2", "2"}, - "0.8", - {"1", "0.8"}, - xrp(), - usd()); - } - else - { - attempt( - Buy, - "N:TB", - q1, - {"1", "1"}, - "2", - q1, - {"2", "2"}, - "0.8", - {"0", "0.8"}, - xrp(), - usd()); - } - attempt( - Sell, - "T:N", - q1, - {"1", "1"}, - "2", - q1, - {"2", "2"}, - "2", - {"1", "1"}, - xrp(), - usd()); - if (NumberSwitchOver) - { - attempt( - Sell, - "T:B", - q1, - {"1", "1"}, - "2", - q1, - {"2", "2"}, - "1.8", - {"1", "1"}, - xrp(), - usd()); - } - else - { - attempt( - Sell, - "T:B", - q1, - {"1", "1"}, - "2", - q1, - {"2", "2"}, - "1.8", - {"1", "1.8"}, - xrp(), - usd()); - } - attempt( - Buy, - "T:T", - q1, - {"1", "1"}, - "2", - q1, - {"2", "2"}, - "2", - {"1", "1"}, - xrp(), - usd()); - attempt( - Buy, - "T:BT", - q1, - {"1", "1"}, - "2", - q1, - {"2", "2"}, - "1.8", - {"1", "1"}, - xrp(), - usd()); - if (NumberSwitchOver) - { - attempt( - Buy, - "T:TB", - q1, - {"1", "1"}, - "2", - q1, - {"2", "2"}, - "0.8", - {"1", "0.8"}, - xrp(), - usd()); - } - else - { - attempt( - Buy, - "T:TB", - q1, - {"1", "1"}, - "2", - q1, - {"2", "2"}, - "0.8", - {"0", "0.8"}, - xrp(), - usd()); - } - - attempt( - Sell, - "A:N", - q1, - {"2", "2"}, - "1", - q1, - {"2", "2"}, - "2", - {"1", "1"}, - xrp(), - usd()); - if (NumberSwitchOver) - { - attempt( - Sell, - "A:B", - q1, - {"2", "2"}, - "1", - q1, - {"2", "2"}, - "1.8", - {"1", "1"}, - xrp(), - usd()); - } - else - { - attempt( - Sell, - "A:B", - q1, - {"2", "2"}, - "1", - q1, - {"2", "2"}, - "1.8", - {"1", "1.8"}, - xrp(), - usd()); - } - attempt( - Buy, - "A:T", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "3", - {"1", "1"}, - xrp(), - usd()); - attempt( - Buy, - "A:BT", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "2.4", - {"1", "1"}, - xrp(), - usd()); - if (NumberSwitchOver) - { - attempt( - Buy, - "A:TB", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "0.8", - {"1", "0.8"}, - xrp(), - usd()); - } - else - { - attempt( - Buy, - "A:TB", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "0.8", - {"0", "0.8"}, - xrp(), - usd()); - } - - attempt( - Sell, - "TA:N", - q1, - {"2", "2"}, - "1", - q1, - {"2", "2"}, - "2", - {"1", "1"}, - xrp(), - usd()); - if (NumberSwitchOver) - { - attempt( - Sell, - "TA:B", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "1.8", - {"1", "1"}, - xrp(), - usd()); - } - else - { - attempt( - Sell, - "TA:B", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "1.8", - {"1", "1.8"}, - xrp(), - usd()); - } - attempt( - Buy, - "TA:T", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "3", - {"1", "1"}, - xrp(), - usd()); - if (NumberSwitchOver) - { - attempt( - Buy, - "TA:BT", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "1.8", - {"1", "1"}, - xrp(), - usd()); - attempt( - Buy, - "TA:TB", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "1.8", - {"1", "1"}, - xrp(), - usd()); - } - else - { - attempt( - Buy, - "TA:BT", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "1.8", - {"1", "1.8"}, - xrp(), - usd()); - attempt( - Buy, - "TA:TB", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "1.8", - {"1", "1.8"}, - xrp(), - usd()); - } - - attempt( - Sell, - "AT:N", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "3", - {"1", "1"}, - xrp(), - usd()); - if (NumberSwitchOver) - { - attempt( - Sell, - "AT:B", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "1.8", - {"1", "1"}, - xrp(), - usd()); - } - else - { - attempt( - Sell, - "AT:B", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "1.8", - {"1", "1.8"}, - xrp(), - usd()); - } - attempt( - Buy, - "AT:T", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "3", - {"1", "1"}, - xrp(), - usd()); - if (NumberSwitchOver) - { - attempt( - Buy, - "AT:BT", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "1.8", - {"1", "1"}, - xrp(), - usd()); - attempt( - Buy, - "AT:TB", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "0.8", - {"1", "0.8"}, - xrp(), - usd()); - } - else - { - attempt( - Buy, - "AT:BT", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "1.8", - {"1", "1.8"}, - xrp(), - usd()); - attempt( - Buy, - "AT:TB", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "0.8", - {"0", "0.8"}, - xrp(), - usd()); - } - } - } - - void - test_iou_to_xrp() - { - testcase("XRP Quantization: output"); - - for (auto NumberSwitchOver : {false, true}) - { - NumberSO stNumberSO{NumberSwitchOver}; - Quality q1 = get_quality("1", "1"); - - // TAKER OWNER - // QUAL OFFER FUNDS QUAL OFFER FUNDS - // EXPECTED - // USD XRP - attempt( - Sell, - "N:N", - q1, - {"3", "3"}, - "3", - q1, - {"3", "3"}, - "3", - {"3", "3"}, - usd(), - xrp()); - attempt( - Sell, - "N:B", - q1, - {"3", "3"}, - "3", - q1, - {"3", "3"}, - "2", - {"2", "2"}, - usd(), - xrp()); - if (NumberSwitchOver) - { - attempt( - Buy, - "N:T", - q1, - {"3", "3"}, - "2.5", - q1, - {"5", "5"}, - "5", - {"2.5", "3"}, - usd(), - xrp()); - attempt( - Buy, - "N:BT", - q1, - {"3", "3"}, - "1.5", - q1, - {"5", "5"}, - "4", - {"1.5", "2"}, - usd(), - xrp()); - } - else - { - attempt( - Buy, - "N:T", - q1, - {"3", "3"}, - "2.5", - q1, - {"5", "5"}, - "5", - {"2.5", "2"}, - usd(), - xrp()); - attempt( - Buy, - "N:BT", - q1, - {"3", "3"}, - "1.5", - q1, - {"5", "5"}, - "4", - {"1.5", "1"}, - usd(), - xrp()); - } - attempt( - Buy, - "N:TB", - q1, - {"3", "3"}, - "2.2", - q1, - {"5", "5"}, - "1", - {"1", "1"}, - usd(), - xrp()); - - attempt( - Sell, - "T:N", - q1, - {"1", "1"}, - "2", - q1, - {"2", "2"}, - "2", - {"1", "1"}, - usd(), - xrp()); - attempt( - Sell, - "T:B", - q1, - {"2", "2"}, - "2", - q1, - {"3", "3"}, - "1", - {"1", "1"}, - usd(), - xrp()); - attempt( - Buy, - "T:T", - q1, - {"1", "1"}, - "2", - q1, - {"2", "2"}, - "2", - {"1", "1"}, - usd(), - xrp()); - attempt( - Buy, - "T:BT", - q1, - {"1", "1"}, - "2", - q1, - {"3", "3"}, - "2", - {"1", "1"}, - usd(), - xrp()); - attempt( - Buy, - "T:TB", - q1, - {"2", "2"}, - "2", - q1, - {"3", "3"}, - "1", - {"1", "1"}, - usd(), - xrp()); - - if (NumberSwitchOver) - { - attempt( - Sell, - "A:N", - q1, - {"2", "2"}, - "1.5", - q1, - {"2", "2"}, - "2", - {"1.5", "2"}, - usd(), - xrp()); - attempt( - Sell, - "A:B", - q1, - {"2", "2"}, - "1.8", - q1, - {"3", "3"}, - "2", - {"1.8", "2"}, - usd(), - xrp()); - } - else - { - attempt( - Sell, - "A:N", - q1, - {"2", "2"}, - "1.5", - q1, - {"2", "2"}, - "2", - {"1.5", "1"}, - usd(), - xrp()); - attempt( - Sell, - "A:B", - q1, - {"2", "2"}, - "1.8", - q1, - {"3", "3"}, - "2", - {"1.8", "1"}, - usd(), - xrp()); - } - attempt( - Buy, - "A:T", - q1, - {"2", "2"}, - "1.2", - q1, - {"3", "3"}, - "3", - {"1.2", "1"}, - usd(), - xrp()); - if (NumberSwitchOver) - { - attempt( - Buy, - "A:BT", - q1, - {"2", "2"}, - "1.5", - q1, - {"4", "4"}, - "3", - {"1.5", "2"}, - usd(), - xrp()); - } - else - { - attempt( - Buy, - "A:BT", - q1, - {"2", "2"}, - "1.5", - q1, - {"4", "4"}, - "3", - {"1.5", "1"}, - usd(), - xrp()); - } - attempt( - Buy, - "A:TB", - q1, - {"2", "2"}, - "1.5", - q1, - {"4", "4"}, - "1", - {"1", "1"}, - usd(), - xrp()); - - if (NumberSwitchOver) - { - attempt( - Sell, - "TA:N", - q1, - {"2", "2"}, - "1.5", - q1, - {"2", "2"}, - "2", - {"1.5", "2"}, - usd(), - xrp()); - } - else - { - attempt( - Sell, - "TA:N", - q1, - {"2", "2"}, - "1.5", - q1, - {"2", "2"}, - "2", - {"1.5", "1"}, - usd(), - xrp()); - } - attempt( - Sell, - "TA:B", - q1, - {"2", "2"}, - "1.5", - q1, - {"3", "3"}, - "1", - {"1", "1"}, - usd(), - xrp()); - if (NumberSwitchOver) - { - attempt( - Buy, - "TA:T", - q1, - {"2", "2"}, - "1.5", - q1, - {"3", "3"}, - "3", - {"1.5", "2"}, - usd(), - xrp()); - attempt( - Buy, - "TA:BT", - q1, - {"2", "2"}, - "1.8", - q1, - {"4", "4"}, - "3", - {"1.8", "2"}, - usd(), - xrp()); - } - else - { - attempt( - Buy, - "TA:T", - q1, - {"2", "2"}, - "1.5", - q1, - {"3", "3"}, - "3", - {"1.5", "1"}, - usd(), - xrp()); - attempt( - Buy, - "TA:BT", - q1, - {"2", "2"}, - "1.8", - q1, - {"4", "4"}, - "3", - {"1.8", "1"}, - usd(), - xrp()); - } - attempt( - Buy, - "TA:TB", - q1, - {"2", "2"}, - "1.2", - q1, - {"3", "3"}, - "1", - {"1", "1"}, - usd(), - xrp()); - - attempt( - Sell, - "AT:N", - q1, - {"2", "2"}, - "2.5", - q1, - {"4", "4"}, - "4", - {"2", "2"}, - usd(), - xrp()); - attempt( - Sell, - "AT:B", - q1, - {"2", "2"}, - "2.5", - q1, - {"3", "3"}, - "1", - {"1", "1"}, - usd(), - xrp()); - attempt( - Buy, - "AT:T", - q1, - {"2", "2"}, - "2.5", - q1, - {"3", "3"}, - "3", - {"2", "2"}, - usd(), - xrp()); - attempt( - Buy, - "AT:BT", - q1, - {"2", "2"}, - "2.5", - q1, - {"4", "4"}, - "3", - {"2", "2"}, - usd(), - xrp()); - attempt( - Buy, - "AT:TB", - q1, - {"2", "2"}, - "2.5", - q1, - {"3", "3"}, - "1", - {"1", "1"}, - usd(), - xrp()); - } - } - - void - test_iou_to_iou() - { - testcase("IOU to IOU"); - - for (auto NumberSwitchOver : {false, true}) - { - NumberSO stNumberSO{NumberSwitchOver}; - Quality q1 = get_quality("1", "1"); - - // Highly exaggerated 50% transfer rate for the input and output: - Rate const rate{parityRate.value + (parityRate.value / 2)}; - - // TAKER OWNER - // QUAL OFFER FUNDS QUAL OFFER FUNDS - // EXPECTED - // EUR USD - attempt( - Sell, - "N:N", - q1, - {"2", "2"}, - "10", - q1, - {"2", "2"}, - "10", - {"2", "2"}, - eur(), - usd(), - rate, - rate); - if (NumberSwitchOver) - { - attempt( - Sell, - "N:B", - q1, - {"4", "4"}, - "10", - q1, - {"4", "4"}, - "4", - {"2.666666666666667", "2.666666666666667"}, - eur(), - usd(), - rate, - rate); - } - else - { - attempt( - Sell, - "N:B", - q1, - {"4", "4"}, - "10", - q1, - {"4", "4"}, - "4", - {"2.666666666666666", "2.666666666666666"}, - eur(), - usd(), - rate, - rate); - } - attempt( - Buy, - "N:T", - q1, - {"1", "1"}, - "10", - q1, - {"2", "2"}, - "10", - {"1", "1"}, - eur(), - usd(), - rate, - rate); - attempt( - Buy, - "N:BT", - q1, - {"2", "2"}, - "10", - q1, - {"6", "6"}, - "5", - {"2", "2"}, - eur(), - usd(), - rate, - rate); - attempt( - Buy, - "N:TB", - q1, - {"2", "2"}, - "2", - q1, - {"6", "6"}, - "1", - {"0.6666666666666667", "0.6666666666666667"}, - eur(), - usd(), - rate, - rate); - if (NumberSwitchOver) - { - attempt( - Sell, - "A:N", - q1, - {"2", "2"}, - "2.5", - q1, - {"2", "2"}, - "10", - {"1.666666666666667", "1.666666666666667"}, - eur(), - usd(), - rate, - rate); - } - else - { - attempt( - Sell, - "A:N", - q1, - {"2", "2"}, - "2.5", - q1, - {"2", "2"}, - "10", - {"1.666666666666666", "1.666666666666666"}, - eur(), - usd(), - rate, - rate); - } - } - } - - void - run() override - { - test_xrp_to_iou(); - test_iou_to_xrp(); - test_iou_to_iou(); - } -}; - -BEAST_DEFINE_TESTSUITE(Taker, tx, ripple); - -} // namespace ripple diff --git a/src/xrpld/app/tx/detail/CreateOffer.cpp b/src/xrpld/app/tx/detail/CreateOffer.cpp index 9543a4fcd9..3cfae92cbd 100644 --- a/src/xrpld/app/tx/detail/CreateOffer.cpp +++ b/src/xrpld/app/tx/detail/CreateOffer.cpp @@ -26,9 +26,9 @@ #include #include #include -#include #include #include +#include #include namespace ripple { @@ -311,374 +311,6 @@ CreateOffer::checkAcceptAsset( return tesSUCCESS; } -bool -CreateOffer::dry_offer(ApplyView& view, Offer const& offer) -{ - if (offer.fully_consumed()) - return true; - auto const amount = accountFunds( - view, - offer.owner(), - offer.amount().out, - fhZERO_IF_FROZEN, - ctx_.app.journal("View")); - return (amount <= beast::zero); -} - -std::pair -CreateOffer::select_path( - bool have_direct, - OfferStream const& direct, - bool have_bridge, - OfferStream const& leg1, - OfferStream const& leg2) -{ - // If we don't have any viable path, why are we here?! - XRPL_ASSERT( - have_direct || have_bridge, - "ripple::CreateOffer::select_path : valid inputs"); - - // If there's no bridged path, the direct is the best by default. - if (!have_bridge) - return std::make_pair(true, direct.tip().quality()); - - Quality const bridged_quality( - composed_quality(leg1.tip().quality(), leg2.tip().quality())); - - if (have_direct) - { - // We compare the quality of the composed quality of the bridged - // offers and compare it against the direct offer to pick the best. - Quality const direct_quality(direct.tip().quality()); - - if (bridged_quality < direct_quality) - return std::make_pair(true, direct_quality); - } - - // Either there was no direct offer, or it didn't have a better quality - // than the bridge. - return std::make_pair(false, bridged_quality); -} - -bool -CreateOffer::reachedOfferCrossingLimit(Taker const& taker) const -{ - auto const crossings = - taker.get_direct_crossings() + (2 * taker.get_bridge_crossings()); - - // The crossing limit is part of the Ripple protocol and - // changing it is a transaction-processing change. - return crossings >= 850; -} - -std::pair -CreateOffer::bridged_cross( - Taker& taker, - ApplyView& view, - ApplyView& view_cancel, - NetClock::time_point const when) -{ - auto const& takerAmount = taker.original_offer(); - - XRPL_ASSERT( - !isXRP(takerAmount.in) && !isXRP(takerAmount.out), - "ripple::CreateOffer::bridged_cross : neither is XRP"); - - if (isXRP(takerAmount.in) || isXRP(takerAmount.out)) - Throw("Bridging with XRP and an endpoint."); - - OfferStream offers_direct( - view, - view_cancel, - Book(taker.issue_in(), taker.issue_out(), std::nullopt), - when, - stepCounter_, - j_); - - OfferStream offers_leg1( - view, - view_cancel, - Book(taker.issue_in(), xrpIssue(), std::nullopt), - when, - stepCounter_, - j_); - - OfferStream offers_leg2( - view, - view_cancel, - Book(xrpIssue(), taker.issue_out(), std::nullopt), - when, - stepCounter_, - j_); - - TER cross_result = tesSUCCESS; - - // Note the subtle distinction here: self-offers encountered in the - // bridge are taken, but self-offers encountered in the direct book - // are not. - bool have_bridge = offers_leg1.step() && offers_leg2.step(); - bool have_direct = step_account(offers_direct, taker); - int count = 0; - - auto viewJ = ctx_.app.journal("View"); - - // Modifying the order or logic of the operations in the loop will cause - // a protocol breaking change. - while (have_direct || have_bridge) - { - bool leg1_consumed = false; - bool leg2_consumed = false; - bool direct_consumed = false; - - auto const [use_direct, quality] = select_path( - have_direct, offers_direct, have_bridge, offers_leg1, offers_leg2); - - // We are always looking at the best quality; we are done with - // crossing as soon as we cross the quality boundary. - if (taker.reject(quality)) - break; - - count++; - - if (use_direct) - { - if (auto stream = j_.debug()) - { - stream << count << " Direct:"; - stream << " offer: " << offers_direct.tip(); - stream << " in: " << offers_direct.tip().amount().in; - stream << " out: " << offers_direct.tip().amount().out; - stream << " owner: " << offers_direct.tip().owner(); - stream << " funds: " - << accountFunds( - view, - offers_direct.tip().owner(), - offers_direct.tip().amount().out, - fhIGNORE_FREEZE, - viewJ); - } - - cross_result = taker.cross(offers_direct.tip()); - - JLOG(j_.debug()) << "Direct Result: " << transToken(cross_result); - - if (dry_offer(view, offers_direct.tip())) - { - direct_consumed = true; - have_direct = step_account(offers_direct, taker); - } - } - else - { - if (auto stream = j_.debug()) - { - auto const owner1_funds_before = accountFunds( - view, - offers_leg1.tip().owner(), - offers_leg1.tip().amount().out, - fhIGNORE_FREEZE, - viewJ); - - auto const owner2_funds_before = accountFunds( - view, - offers_leg2.tip().owner(), - offers_leg2.tip().amount().out, - fhIGNORE_FREEZE, - viewJ); - - stream << count << " Bridge:"; - stream << " offer1: " << offers_leg1.tip(); - stream << " in: " << offers_leg1.tip().amount().in; - stream << " out: " << offers_leg1.tip().amount().out; - stream << " owner: " << offers_leg1.tip().owner(); - stream << " funds: " << owner1_funds_before; - stream << " offer2: " << offers_leg2.tip(); - stream << " in: " << offers_leg2.tip().amount().in; - stream << " out: " << offers_leg2.tip().amount().out; - stream << " owner: " << offers_leg2.tip().owner(); - stream << " funds: " << owner2_funds_before; - } - - cross_result = taker.cross(offers_leg1.tip(), offers_leg2.tip()); - - JLOG(j_.debug()) << "Bridge Result: " << transToken(cross_result); - - if (view.rules().enabled(fixTakerDryOfferRemoval)) - { - // have_bridge can be true the next time 'round only if - // neither of the OfferStreams are dry. - leg1_consumed = dry_offer(view, offers_leg1.tip()); - if (leg1_consumed) - have_bridge &= offers_leg1.step(); - - leg2_consumed = dry_offer(view, offers_leg2.tip()); - if (leg2_consumed) - have_bridge &= offers_leg2.step(); - } - else - { - // This old behavior may leave an empty offer in the book for - // the second leg. - if (dry_offer(view, offers_leg1.tip())) - { - leg1_consumed = true; - have_bridge = (have_bridge && offers_leg1.step()); - } - if (dry_offer(view, offers_leg2.tip())) - { - leg2_consumed = true; - have_bridge = (have_bridge && offers_leg2.step()); - } - } - } - - if (cross_result != tesSUCCESS) - { - cross_result = tecFAILED_PROCESSING; - break; - } - - if (taker.done()) - { - JLOG(j_.debug()) << "The taker reports he's done during crossing!"; - break; - } - - if (reachedOfferCrossingLimit(taker)) - { - JLOG(j_.debug()) << "The offer crossing limit has been exceeded!"; - break; - } - - // Postcondition: If we aren't done, then we *must* have consumed at - // least one offer fully. - XRPL_ASSERT( - direct_consumed || leg1_consumed || leg2_consumed, - "ripple::CreateOffer::bridged_cross : consumed an offer"); - - if (!direct_consumed && !leg1_consumed && !leg2_consumed) - Throw( - "bridged crossing: nothing was fully consumed."); - } - - return std::make_pair(cross_result, taker.remaining_offer()); -} - -std::pair -CreateOffer::direct_cross( - Taker& taker, - ApplyView& view, - ApplyView& view_cancel, - NetClock::time_point const when) -{ - OfferStream offers( - view, - view_cancel, - Book(taker.issue_in(), taker.issue_out(), std::nullopt), - when, - stepCounter_, - j_); - - TER cross_result(tesSUCCESS); - int count = 0; - - bool have_offer = step_account(offers, taker); - - // Modifying the order or logic of the operations in the loop will cause - // a protocol breaking change. - while (have_offer) - { - bool direct_consumed = false; - auto& offer(offers.tip()); - - // We are done with crossing as soon as we cross the quality boundary - if (taker.reject(offer.quality())) - break; - - count++; - - if (auto stream = j_.debug()) - { - stream << count << " Direct:"; - stream << " offer: " << offer; - stream << " in: " << offer.amount().in; - stream << " out: " << offer.amount().out; - stream << "quality: " << offer.quality(); - stream << " owner: " << offer.owner(); - stream << " funds: " - << accountFunds( - view, - offer.owner(), - offer.amount().out, - fhIGNORE_FREEZE, - ctx_.app.journal("View")); - } - - cross_result = taker.cross(offer); - - JLOG(j_.debug()) << "Direct Result: " << transToken(cross_result); - - if (dry_offer(view, offer)) - { - direct_consumed = true; - have_offer = step_account(offers, taker); - } - - if (cross_result != tesSUCCESS) - { - cross_result = tecFAILED_PROCESSING; - break; - } - - if (taker.done()) - { - JLOG(j_.debug()) << "The taker reports he's done during crossing!"; - break; - } - - if (reachedOfferCrossingLimit(taker)) - { - JLOG(j_.debug()) << "The offer crossing limit has been exceeded!"; - break; - } - - // Postcondition: If we aren't done, then we *must* have consumed the - // offer on the books fully! - XRPL_ASSERT( - direct_consumed, - "ripple::CreateOffer::direct_cross : consumed an offer"); - - if (!direct_consumed) - Throw( - "direct crossing: nothing was fully consumed."); - } - - return std::make_pair(cross_result, taker.remaining_offer()); -} - -// Step through the stream for as long as possible, skipping any offers -// that are from the taker or which cross the taker's threshold. -// Return false if the is no offer in the book, true otherwise. -bool -CreateOffer::step_account(OfferStream& stream, Taker const& taker) -{ - while (stream.step()) - { - auto const& offer = stream.tip(); - - // This offer at the tip crosses the taker's threshold. We're done. - if (taker.reject(offer.quality())) - return true; - - // This offer at the tip is not from the taker. We're done. - if (offer.owner() != taker.account()) - return true; - } - - // We ran out of offers. Can't advance. - return false; -} - std::pair CreateOffer::flowCross( PaymentSandbox& psb, @@ -883,21 +515,6 @@ CreateOffer::flowCross( return {tecINTERNAL, takerAmount}; } -std::pair -CreateOffer::cross( - Sandbox& sb, - Sandbox& sbCancel, - Amounts const& takerAmount, - std::optional const& domainID) -{ - PaymentSandbox psbFlow{&sb}; - PaymentSandbox psbCancelFlow{&sbCancel}; - auto const ret = flowCross(psbFlow, psbCancelFlow, takerAmount, domainID); - psbFlow.apply(sb); - psbCancelFlow.apply(sbCancel); - return ret; -} - std::string CreateOffer::format_amount(STAmount const& amount) { @@ -907,20 +524,6 @@ CreateOffer::format_amount(STAmount const& amount) return txt; } -void -CreateOffer::preCompute() -{ - cross_type_ = CrossType::IouToIou; - bool const pays_xrp = ctx_.tx.getFieldAmount(sfTakerPays).native(); - bool const gets_xrp = ctx_.tx.getFieldAmount(sfTakerGets).native(); - if (pays_xrp && !gets_xrp) - cross_type_ = CrossType::IouToXrp; - else if (gets_xrp && !pays_xrp) - cross_type_ = CrossType::XrpToIou; - - return Transactor::preCompute(); -} - TER CreateOffer::applyHybrid( Sandbox& sb, @@ -1084,11 +687,6 @@ CreateOffer::applyGuts(Sandbox& sb, Sandbox& sbCancel) // We reverse pays and gets because during crossing we are taking. Amounts const takerAmount(saTakerGets, saTakerPays); - // The amount of the offer that is unfilled after crossing has been - // performed. It may be equal to the original amount (didn't cross), - // empty (fully crossed), or something in-between. - Amounts place_offer; - JLOG(j_.debug()) << "Attempting cross: " << to_string(takerAmount.in.issue()) << " -> " << to_string(takerAmount.out.issue()); @@ -1101,8 +699,17 @@ CreateOffer::applyGuts(Sandbox& sb, Sandbox& sbCancel) stream << " out: " << format_amount(takerAmount.out); } + // The amount of the offer that is unfilled after crossing has been + // performed. It may be equal to the original amount (didn't cross), + // empty (fully crossed), or something in-between. + Amounts place_offer; + PaymentSandbox psbFlow{&sb}; + PaymentSandbox psbCancelFlow{&sbCancel}; + std::tie(result, place_offer) = - cross(sb, sbCancel, takerAmount, domainID); + flowCross(psbFlow, psbCancelFlow, takerAmount, domainID); + psbFlow.apply(sb); + psbCancelFlow.apply(sbCancel); // We expect the implementation of cross to succeed // or give a tec. diff --git a/src/xrpld/app/tx/detail/CreateOffer.h b/src/xrpld/app/tx/detail/CreateOffer.h index f995f4a5d6..6e3d6145b1 100644 --- a/src/xrpld/app/tx/detail/CreateOffer.h +++ b/src/xrpld/app/tx/detail/CreateOffer.h @@ -20,10 +20,10 @@ #ifndef RIPPLE_TX_CREATEOFFER_H_INCLUDED #define RIPPLE_TX_CREATEOFFER_H_INCLUDED -#include -#include #include +#include + namespace ripple { class PaymentSandbox; @@ -36,8 +36,7 @@ public: static constexpr ConsequencesFactoryType ConsequencesFactory{Custom}; /** Construct a Transactor subclass that creates an offer in the ledger. */ - explicit CreateOffer(ApplyContext& ctx) - : Transactor(ctx), stepCounter_(1000, j_) + explicit CreateOffer(ApplyContext& ctx) : Transactor(ctx) { } @@ -52,10 +51,6 @@ public: static TER preclaim(PreclaimContext const& ctx); - /** Gather information beyond what the Transactor base class gathers. */ - void - preCompute() override; - /** Precondition: fee collection is likely. Attempt to create the offer. */ TER doApply() override; @@ -73,42 +68,6 @@ private: beast::Journal const j, Issue const& issue); - bool - dry_offer(ApplyView& view, Offer const& offer); - - static std::pair - select_path( - bool have_direct, - OfferStream const& direct, - bool have_bridge, - OfferStream const& leg1, - OfferStream const& leg2); - - std::pair - bridged_cross( - Taker& taker, - ApplyView& view, - ApplyView& view_cancel, - NetClock::time_point const when); - - std::pair - direct_cross( - Taker& taker, - ApplyView& view, - ApplyView& view_cancel, - NetClock::time_point const when); - - // Step through the stream for as long as possible, skipping any offers - // that are from the taker or which cross the taker's threshold. - // Return false if the is no offer in the book, true otherwise. - static bool - step_account(OfferStream& stream, Taker const& taker); - - // True if the number of offers that have been crossed - // exceeds the limit. - bool - reachedOfferCrossingLimit(Taker const& taker) const; - // Use the payment flow code to perform offer crossing. std::pair flowCross( @@ -117,17 +76,6 @@ private: Amounts const& takerAmount, std::optional const& domainID); - // Temporary - // This is a central location that invokes both versions of cross - // so the results can be compared. Eventually this layer will be - // removed once flowCross is determined to be stable. - std::pair - cross( - Sandbox& sb, - Sandbox& sbCancel, - Amounts const& takerAmount, - std::optional const& domainID); - static std::string format_amount(STAmount const& amount); @@ -139,13 +87,6 @@ private: STAmount const& saTakerPays, STAmount const& saTakerGets, std::function)> const& setDir); - -private: - // What kind of offer we are placing - CrossType cross_type_; - - // The number of steps to take through order books while crossing - OfferStream::StepCounter stepCounter_; }; using OfferCreate = CreateOffer; diff --git a/src/xrpld/app/tx/detail/Taker.cpp b/src/xrpld/app/tx/detail/Taker.cpp deleted file mode 100644 index 9bfd6dc1d3..0000000000 --- a/src/xrpld/app/tx/detail/Taker.cpp +++ /dev/null @@ -1,863 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2014 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include - -#include -#include - -namespace ripple { - -static std::string -format_amount(STAmount const& amount) -{ - std::string txt = amount.getText(); - txt += "/"; - txt += to_string(amount.issue().currency); - return txt; -} - -BasicTaker::BasicTaker( - CrossType cross_type, - AccountID const& account, - Amounts const& amount, - Quality const& quality, - std::uint32_t flags, - Rate const& rate_in, - Rate const& rate_out, - beast::Journal journal) - : account_(account) - , quality_(quality) - , threshold_(quality_) - , sell_(flags & tfSell) - , original_(amount) - , remaining_(amount) - , issue_in_(remaining_.in.issue()) - , issue_out_(remaining_.out.issue()) - , m_rate_in(rate_in) - , m_rate_out(rate_out) - , cross_type_(cross_type) - , journal_(journal) -{ - XRPL_ASSERT( - remaining_.in > beast::zero, - "ripple::BasicTaker::BasicTaker : positive remaining in"); - XRPL_ASSERT( - remaining_.out > beast::zero, - "ripple::BasicTaker::BasicTaker : positive remaining out"); - - XRPL_ASSERT( - m_rate_in.value, "ripple::BasicTaker::BasicTaker : nonzero rate in"); - XRPL_ASSERT( - m_rate_out.value, "ripple::BasicTaker::BasicTaker : nonzero rate out"); - - // If we are dealing with a particular flavor, make sure that it's the - // flavor we expect: - XRPL_ASSERT( - cross_type != CrossType::XrpToIou || - (isXRP(issue_in()) && !isXRP(issue_out())), - "ripple::BasicTaker::BasicTaker : valid cross to IOU"); - - XRPL_ASSERT( - cross_type != CrossType::IouToXrp || - (!isXRP(issue_in()) && isXRP(issue_out())), - "ripple::BasicTaker::BasicTaker : valid cross to XRP"); - - // And make sure we're not crossing XRP for XRP - XRPL_ASSERT( - !isXRP(issue_in()) || !isXRP(issue_out()), - "ripple::BasicTaker::BasicTaker : not crossing XRP for XRP"); - - // If this is a passive order, we adjust the quality so as to prevent offers - // at the same quality level from being consumed. - if (flags & tfPassive) - ++threshold_; -} - -Rate -BasicTaker::effective_rate( - Rate const& rate, - Issue const& issue, - AccountID const& from, - AccountID const& to) -{ - // If there's a transfer rate, the issuer is not involved - // and the sender isn't the same as the recipient, return - // the actual transfer rate. - if (rate != parityRate && from != to && from != issue.account && - to != issue.account) - { - return rate; - } - - return parityRate; -} - -bool -BasicTaker::unfunded() const -{ - if (get_funds(account(), remaining_.in) > beast::zero) - return false; - - JLOG(journal_.debug()) << "Unfunded: taker is out of funds."; - return true; -} - -bool -BasicTaker::done() const -{ - // We are done if we have consumed all the input currency - if (remaining_.in <= beast::zero) - { - JLOG(journal_.debug()) - << "Done: all the input currency has been consumed."; - return true; - } - - // We are done if using buy semantics and we received the - // desired amount of output currency - if (!sell_ && (remaining_.out <= beast::zero)) - { - JLOG(journal_.debug()) << "Done: the desired amount has been received."; - return true; - } - - // We are done if the taker is out of funds - if (unfunded()) - { - JLOG(journal_.debug()) << "Done: taker out of funds."; - return true; - } - - return false; -} - -Amounts -BasicTaker::remaining_offer() const -{ - // If the taker is done, then there's no offer to place. - if (done()) - return Amounts(remaining_.in.zeroed(), remaining_.out.zeroed()); - - // Avoid math altogether if we didn't cross. - if (original_ == remaining_) - return original_; - - if (sell_) - { - XRPL_ASSERT( - remaining_.in > beast::zero, - "ripple::BasicTaker::remaining_offer : positive remaining in"); - - // We scale the output based on the remaining input: - return Amounts( - remaining_.in, - divRound(remaining_.in, quality_.rate(), issue_out_, true)); - } - - XRPL_ASSERT( - remaining_.out > beast::zero, - "ripple::BasicTaker::remaining_offer : positive remaining out"); - - // We scale the input based on the remaining output: - return Amounts( - mulRound(remaining_.out, quality_.rate(), issue_in_, true), - remaining_.out); -} - -Amounts const& -BasicTaker::original_offer() const -{ - return original_; -} - -// TODO: the presence of 'output' is an artifact caused by the fact that -// Amounts carry issue information which should be decoupled. -static STAmount -qual_div(STAmount const& amount, Quality const& quality, STAmount const& output) -{ - auto result = divide(amount, quality.rate(), output.issue()); - return std::min(result, output); -} - -static STAmount -qual_mul(STAmount const& amount, Quality const& quality, STAmount const& output) -{ - auto result = multiply(amount, quality.rate(), output.issue()); - return std::min(result, output); -} - -void -BasicTaker::log_flow(char const* description, Flow const& flow) -{ - auto stream = journal_.debug(); - if (!stream) - return; - - stream << description; - - if (isXRP(issue_in())) - stream << " order in: " << format_amount(flow.order.in); - else - stream << " order in: " << format_amount(flow.order.in) - << " (issuer: " << format_amount(flow.issuers.in) << ")"; - - if (isXRP(issue_out())) - stream << " order out: " << format_amount(flow.order.out); - else - stream << " order out: " << format_amount(flow.order.out) - << " (issuer: " << format_amount(flow.issuers.out) << ")"; -} - -BasicTaker::Flow -BasicTaker::flow_xrp_to_iou( - Amounts const& order, - Quality quality, - STAmount const& owner_funds, - STAmount const& taker_funds, - Rate const& rate_out) -{ - Flow f; - f.order = order; - f.issuers.out = multiply(f.order.out, rate_out); - - log_flow("flow_xrp_to_iou", f); - - // Clamp on owner balance - if (owner_funds < f.issuers.out) - { - f.issuers.out = owner_funds; - f.order.out = divide(f.issuers.out, rate_out); - f.order.in = qual_mul(f.order.out, quality, f.order.in); - log_flow("(clamped on owner balance)", f); - } - - // Clamp if taker wants to limit the output - if (!sell_ && remaining_.out < f.order.out) - { - f.order.out = remaining_.out; - f.order.in = qual_mul(f.order.out, quality, f.order.in); - f.issuers.out = multiply(f.order.out, rate_out); - log_flow("(clamped on taker output)", f); - } - - // Clamp on the taker's funds - if (taker_funds < f.order.in) - { - f.order.in = taker_funds; - f.order.out = qual_div(f.order.in, quality, f.order.out); - f.issuers.out = multiply(f.order.out, rate_out); - log_flow("(clamped on taker funds)", f); - } - - // Clamp on remaining offer if we are not handling the second leg - // of an autobridge. - if (cross_type_ == CrossType::XrpToIou && (remaining_.in < f.order.in)) - { - f.order.in = remaining_.in; - f.order.out = qual_div(f.order.in, quality, f.order.out); - f.issuers.out = multiply(f.order.out, rate_out); - log_flow("(clamped on taker input)", f); - } - - return f; -} - -BasicTaker::Flow -BasicTaker::flow_iou_to_xrp( - Amounts const& order, - Quality quality, - STAmount const& owner_funds, - STAmount const& taker_funds, - Rate const& rate_in) -{ - Flow f; - f.order = order; - f.issuers.in = multiply(f.order.in, rate_in); - - log_flow("flow_iou_to_xrp", f); - - // Clamp on owner's funds - if (owner_funds < f.order.out) - { - f.order.out = owner_funds; - f.order.in = qual_mul(f.order.out, quality, f.order.in); - f.issuers.in = multiply(f.order.in, rate_in); - log_flow("(clamped on owner funds)", f); - } - - // Clamp if taker wants to limit the output and we are not the - // first leg of an autobridge. - if (!sell_ && cross_type_ == CrossType::IouToXrp) - { - if (remaining_.out < f.order.out) - { - f.order.out = remaining_.out; - f.order.in = qual_mul(f.order.out, quality, f.order.in); - f.issuers.in = multiply(f.order.in, rate_in); - log_flow("(clamped on taker output)", f); - } - } - - // Clamp on the taker's input offer - if (remaining_.in < f.order.in) - { - f.order.in = remaining_.in; - f.issuers.in = multiply(f.order.in, rate_in); - f.order.out = qual_div(f.order.in, quality, f.order.out); - log_flow("(clamped on taker input)", f); - } - - // Clamp on the taker's input balance - if (taker_funds < f.issuers.in) - { - f.issuers.in = taker_funds; - f.order.in = divide(f.issuers.in, rate_in); - f.order.out = qual_div(f.order.in, quality, f.order.out); - log_flow("(clamped on taker funds)", f); - } - - return f; -} - -BasicTaker::Flow -BasicTaker::flow_iou_to_iou( - Amounts const& order, - Quality quality, - STAmount const& owner_funds, - STAmount const& taker_funds, - Rate const& rate_in, - Rate const& rate_out) -{ - Flow f; - f.order = order; - f.issuers.in = multiply(f.order.in, rate_in); - f.issuers.out = multiply(f.order.out, rate_out); - - log_flow("flow_iou_to_iou", f); - - // Clamp on owner balance - if (owner_funds < f.issuers.out) - { - f.issuers.out = owner_funds; - f.order.out = divide(f.issuers.out, rate_out); - f.order.in = qual_mul(f.order.out, quality, f.order.in); - f.issuers.in = multiply(f.order.in, rate_in); - log_flow("(clamped on owner funds)", f); - } - - // Clamp on taker's offer - if (!sell_ && remaining_.out < f.order.out) - { - f.order.out = remaining_.out; - f.order.in = qual_mul(f.order.out, quality, f.order.in); - f.issuers.out = multiply(f.order.out, rate_out); - f.issuers.in = multiply(f.order.in, rate_in); - log_flow("(clamped on taker output)", f); - } - - // Clamp on the taker's input offer - if (remaining_.in < f.order.in) - { - f.order.in = remaining_.in; - f.issuers.in = multiply(f.order.in, rate_in); - f.order.out = qual_div(f.order.in, quality, f.order.out); - f.issuers.out = multiply(f.order.out, rate_out); - log_flow("(clamped on taker input)", f); - } - - // Clamp on the taker's input balance - if (taker_funds < f.issuers.in) - { - f.issuers.in = taker_funds; - f.order.in = divide(f.issuers.in, rate_in); - f.order.out = qual_div(f.order.in, quality, f.order.out); - f.issuers.out = multiply(f.order.out, rate_out); - log_flow("(clamped on taker funds)", f); - } - - return f; -} - -// Calculates the direct flow through the specified offer -BasicTaker::Flow -BasicTaker::do_cross(Amounts offer, Quality quality, AccountID const& owner) -{ - auto const owner_funds = get_funds(owner, offer.out); - auto const taker_funds = get_funds(account(), offer.in); - - Flow result; - - if (cross_type_ == CrossType::XrpToIou) - { - result = flow_xrp_to_iou( - offer, - quality, - owner_funds, - taker_funds, - out_rate(owner, account())); - } - else if (cross_type_ == CrossType::IouToXrp) - { - result = flow_iou_to_xrp( - offer, - quality, - owner_funds, - taker_funds, - in_rate(owner, account())); - } - else - { - result = flow_iou_to_iou( - offer, - quality, - owner_funds, - taker_funds, - in_rate(owner, account()), - out_rate(owner, account())); - } - - if (!result.sanity_check()) - Throw("Computed flow fails sanity check."); - - remaining_.out -= result.order.out; - remaining_.in -= result.order.in; - - XRPL_ASSERT( - remaining_.in >= beast::zero, - "ripple::BasicTaker::do_cross : minimum remaining in"); - - return result; -} - -// Calculates the bridged flow through the specified offers -std::pair -BasicTaker::do_cross( - Amounts offer1, - Quality quality1, - AccountID const& owner1, - Amounts offer2, - Quality quality2, - AccountID const& owner2) -{ - XRPL_ASSERT( - !offer1.in.native(), - "ripple::BasicTaker::do_cross : offer1 in is not XRP"); - XRPL_ASSERT( - offer1.out.native(), - "ripple::BasicTaker::do_cross : offer1 out is XRP"); - XRPL_ASSERT( - offer2.in.native(), "ripple::BasicTaker::do_cross : offer2 in is XRP"); - XRPL_ASSERT( - !offer2.out.native(), - "ripple::BasicTaker::do_cross : offer2 out is not XRP"); - - // If the taker owns the first leg of the offer, then the taker's available - // funds aren't the limiting factor for the input - the offer itself is. - auto leg1_in_funds = get_funds(account(), offer1.in); - - if (account() == owner1) - { - JLOG(journal_.trace()) << "The taker owns the first leg of a bridge."; - leg1_in_funds = std::max(leg1_in_funds, offer1.in); - } - - // If the taker owns the second leg of the offer, then the taker's available - // funds are not the limiting factor for the output - the offer itself is. - auto leg2_out_funds = get_funds(owner2, offer2.out); - - if (account() == owner2) - { - JLOG(journal_.trace()) << "The taker owns the second leg of a bridge."; - leg2_out_funds = std::max(leg2_out_funds, offer2.out); - } - - // The amount available to flow via XRP is the amount that the owner of the - // first leg of the bridge has, up to the first leg's output. - // - // But, when both legs of a bridge are owned by the same person, the amount - // of XRP that can flow between the two legs is, essentially, infinite - // since all the owner is doing is taking out XRP of his left pocket - // and putting it in his right pocket. In that case, we set the available - // XRP to the largest of the two offers. - auto xrp_funds = get_funds(owner1, offer1.out); - - if (owner1 == owner2) - { - JLOG(journal_.trace()) - << "The bridge endpoints are owned by the same account."; - xrp_funds = std::max(offer1.out, offer2.in); - } - - if (auto stream = journal_.debug()) - { - stream << "Available bridge funds:"; - stream << " leg1 in: " << format_amount(leg1_in_funds); - stream << " leg2 out: " << format_amount(leg2_out_funds); - stream << " xrp: " << format_amount(xrp_funds); - } - - auto const leg1_rate = in_rate(owner1, account()); - auto const leg2_rate = out_rate(owner2, account()); - - // Attempt to determine the maximal flow that can be achieved across each - // leg independent of the other. - auto flow1 = - flow_iou_to_xrp(offer1, quality1, xrp_funds, leg1_in_funds, leg1_rate); - - if (!flow1.sanity_check()) - Throw("Computed flow1 fails sanity check."); - - auto flow2 = - flow_xrp_to_iou(offer2, quality2, leg2_out_funds, xrp_funds, leg2_rate); - - if (!flow2.sanity_check()) - Throw("Computed flow2 fails sanity check."); - - // We now have the maximal flows across each leg individually. We need to - // equalize them, so that the amount of XRP that flows out of the first leg - // is the same as the amount of XRP that flows into the second leg. We take - // the side which is the limiting factor (if any) and adjust the other. - if (flow1.order.out < flow2.order.in) - { - // Adjust the second leg of the offer down: - flow2.order.in = flow1.order.out; - flow2.order.out = qual_div(flow2.order.in, quality2, flow2.order.out); - flow2.issuers.out = multiply(flow2.order.out, leg2_rate); - log_flow("Balancing: adjusted second leg down", flow2); - } - else if (flow1.order.out > flow2.order.in) - { - // Adjust the first leg of the offer down: - flow1.order.out = flow2.order.in; - flow1.order.in = qual_mul(flow1.order.out, quality1, flow1.order.in); - flow1.issuers.in = multiply(flow1.order.in, leg1_rate); - log_flow("Balancing: adjusted first leg down", flow2); - } - - if (flow1.order.out != flow2.order.in) - Throw("Bridged flow is out of balance."); - - remaining_.out -= flow2.order.out; - remaining_.in -= flow1.order.in; - - return std::make_pair(flow1, flow2); -} - -//============================================================================== - -Taker::Taker( - CrossType cross_type, - ApplyView& view, - AccountID const& account, - Amounts const& offer, - std::uint32_t flags, - beast::Journal journal) - : BasicTaker( - cross_type, - account, - offer, - Quality(offer), - flags, - calculateRate(view, offer.in.getIssuer(), account), - calculateRate(view, offer.out.getIssuer(), account), - journal) - , view_(view) - , xrp_flow_(0) - , direct_crossings_(0) - , bridge_crossings_(0) -{ - XRPL_ASSERT( - issue_in() == offer.in.issue(), - "ripple::Taker::Taker : issue in is a match"); - XRPL_ASSERT( - issue_out() == offer.out.issue(), - "ripple::Taker::Taker : issue out is a match"); - - if (auto stream = journal_.debug()) - { - stream << "Crossing as: " << to_string(account); - - if (isXRP(issue_in())) - stream << " Offer in: " << format_amount(offer.in); - else - stream << " Offer in: " << format_amount(offer.in) - << " (issuer: " << issue_in().account << ")"; - - if (isXRP(issue_out())) - stream << " Offer out: " << format_amount(offer.out); - else - stream << " Offer out: " << format_amount(offer.out) - << " (issuer: " << issue_out().account << ")"; - - stream << " Balance: " - << format_amount(get_funds(account, offer.in)); - } -} - -void -Taker::consume_offer(Offer& offer, Amounts const& order) -{ - if (order.in < beast::zero) - Throw("flow with negative input."); - - if (order.out < beast::zero) - Throw("flow with negative output."); - - JLOG(journal_.debug()) << "Consuming from offer " << offer; - - if (auto stream = journal_.trace()) - { - auto const& available = offer.amount(); - - stream << " in:" << format_amount(available.in); - stream << " out:" << format_amount(available.out); - } - - offer.consume(view_, order); -} - -STAmount -Taker::get_funds(AccountID const& account, STAmount const& amount) const -{ - return accountFunds(view_, account, amount, fhZERO_IF_FROZEN, journal_); -} - -TER -Taker::transferXRP( - AccountID const& from, - AccountID const& to, - STAmount const& amount) -{ - if (!isXRP(amount)) - Throw("Using transferXRP with IOU"); - - if (from == to) - return tesSUCCESS; - - // Transferring zero is equivalent to not doing a transfer - if (amount == beast::zero) - return tesSUCCESS; - - return ripple::transferXRP(view_, from, to, amount, journal_); -} - -TER -Taker::redeemIOU( - AccountID const& account, - STAmount const& amount, - Issue const& issue) -{ - if (isXRP(amount)) - Throw("Using redeemIOU with XRP"); - - if (account == issue.account) - return tesSUCCESS; - - // Transferring zero is equivalent to not doing a transfer - if (amount == beast::zero) - return tesSUCCESS; - - // If we are trying to redeem some amount, then the account - // must have a credit balance. - if (get_funds(account, amount) <= beast::zero) - Throw("redeemIOU has no funds to redeem"); - - auto ret = ripple::redeemIOU(view_, account, amount, issue, journal_); - - if (get_funds(account, amount) < beast::zero) - Throw("redeemIOU redeemed more funds than available"); - - return ret; -} - -TER -Taker::issueIOU( - AccountID const& account, - STAmount const& amount, - Issue const& issue) -{ - if (isXRP(amount)) - Throw("Using issueIOU with XRP"); - - if (account == issue.account) - return tesSUCCESS; - - // Transferring zero is equivalent to not doing a transfer - if (amount == beast::zero) - return tesSUCCESS; - - return ripple::issueIOU(view_, account, amount, issue, journal_); -} - -// Performs funds transfers to fill the given offer and adjusts offer. -TER -Taker::fill(BasicTaker::Flow const& flow, Offer& offer) -{ - // adjust offer - consume_offer(offer, flow.order); - - TER result = tesSUCCESS; - - if (cross_type() != CrossType::XrpToIou) - { - XRPL_ASSERT( - !isXRP(flow.order.in), "ripple::Taker::fill : order in is not XRP"); - - if (result == tesSUCCESS) - result = - redeemIOU(account(), flow.issuers.in, flow.issuers.in.issue()); - - if (result == tesSUCCESS) - result = - issueIOU(offer.owner(), flow.order.in, flow.order.in.issue()); - } - else - { - XRPL_ASSERT( - isXRP(flow.order.in), "ripple::Taker::fill : order in is XRP"); - - if (result == tesSUCCESS) - result = transferXRP(account(), offer.owner(), flow.order.in); - } - - // Now send funds from the account whose offer we're taking - if (cross_type() != CrossType::IouToXrp) - { - XRPL_ASSERT( - !isXRP(flow.order.out), - "ripple::Taker::fill : order out is not XRP"); - - if (result == tesSUCCESS) - result = redeemIOU( - offer.owner(), flow.issuers.out, flow.issuers.out.issue()); - - if (result == tesSUCCESS) - result = - issueIOU(account(), flow.order.out, flow.order.out.issue()); - } - else - { - XRPL_ASSERT( - isXRP(flow.order.out), "ripple::Taker::fill : order out is XRP"); - - if (result == tesSUCCESS) - result = transferXRP(offer.owner(), account(), flow.order.out); - } - - if (result == tesSUCCESS) - direct_crossings_++; - - return result; -} - -// Performs bridged funds transfers to fill the given offers and adjusts offers. -TER -Taker::fill( - BasicTaker::Flow const& flow1, - Offer& leg1, - BasicTaker::Flow const& flow2, - Offer& leg2) -{ - // Adjust offers accordingly - consume_offer(leg1, flow1.order); - consume_offer(leg2, flow2.order); - - TER result = tesSUCCESS; - - // Taker to leg1: IOU - if (leg1.owner() != account()) - { - if (result == tesSUCCESS) - result = redeemIOU( - account(), flow1.issuers.in, flow1.issuers.in.issue()); - - if (result == tesSUCCESS) - result = - issueIOU(leg1.owner(), flow1.order.in, flow1.order.in.issue()); - } - - // leg1 to leg2: bridging over XRP - if (result == tesSUCCESS) - result = transferXRP(leg1.owner(), leg2.owner(), flow1.order.out); - - // leg2 to Taker: IOU - if (leg2.owner() != account()) - { - if (result == tesSUCCESS) - result = redeemIOU( - leg2.owner(), flow2.issuers.out, flow2.issuers.out.issue()); - - if (result == tesSUCCESS) - result = - issueIOU(account(), flow2.order.out, flow2.order.out.issue()); - } - - if (result == tesSUCCESS) - { - bridge_crossings_++; - xrp_flow_ += flow1.order.out; - } - - return result; -} - -TER -Taker::cross(Offer& offer) -{ - // In direct crossings, at least one leg must not be XRP. - if (isXRP(offer.amount().in) && isXRP(offer.amount().out)) - return tefINTERNAL; - - auto const amount = - do_cross(offer.amount(), offer.quality(), offer.owner()); - - return fill(amount, offer); -} - -TER -Taker::cross(Offer& leg1, Offer& leg2) -{ - // In bridged crossings, XRP must can't be the input to the first leg - // or the output of the second leg. - if (isXRP(leg1.amount().in) || isXRP(leg2.amount().out)) - return tefINTERNAL; - - auto ret = do_cross( - leg1.amount(), - leg1.quality(), - leg1.owner(), - leg2.amount(), - leg2.quality(), - leg2.owner()); - - return fill(ret.first, leg1, ret.second, leg2); -} - -Rate -Taker::calculateRate( - ApplyView const& view, - AccountID const& issuer, - AccountID const& account) -{ - return isXRP(issuer) || (account == issuer) ? parityRate - : transferRate(view, issuer); -} - -} // namespace ripple diff --git a/src/xrpld/app/tx/detail/Taker.h b/src/xrpld/app/tx/detail/Taker.h deleted file mode 100644 index 3702a30deb..0000000000 --- a/src/xrpld/app/tx/detail/Taker.h +++ /dev/null @@ -1,341 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2014 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_APP_BOOK_TAKER_H_INCLUDED -#define RIPPLE_APP_BOOK_TAKER_H_INCLUDED - -#include -#include - -#include -#include -#include -#include -#include - -namespace ripple { - -/** The flavor of an offer crossing */ -enum class CrossType { XrpToIou, IouToXrp, IouToIou }; - -/** State for the active party during order book or payment operations. */ -class BasicTaker -{ -private: - AccountID account_; - Quality quality_; - Quality threshold_; - - bool sell_; - - // The original in and out quantities. - Amounts const original_; - - // The amounts still left over for us to try and take. - Amounts remaining_; - - // The issuers for the input and output - Issue const& issue_in_; - Issue const& issue_out_; - - // The rates that will be paid when the input and output currencies are - // transfered and the currency issuer isn't involved: - Rate const m_rate_in; - Rate const m_rate_out; - - // The type of crossing that we are performing - CrossType cross_type_; - -protected: - beast::Journal const journal_; - - struct Flow - { - explicit Flow() = default; - - Amounts order; - Amounts issuers; - - bool - sanity_check() const - { - using beast::zero; - - if (isXRP(order.in) && isXRP(order.out)) - return false; - - return order.in >= zero && order.out >= zero && - issuers.in >= zero && issuers.out >= zero; - } - }; - -private: - void - log_flow(char const* description, Flow const& flow); - - Flow - flow_xrp_to_iou( - Amounts const& offer, - Quality quality, - STAmount const& owner_funds, - STAmount const& taker_funds, - Rate const& rate_out); - - Flow - flow_iou_to_xrp( - Amounts const& offer, - Quality quality, - STAmount const& owner_funds, - STAmount const& taker_funds, - Rate const& rate_in); - - Flow - flow_iou_to_iou( - Amounts const& offer, - Quality quality, - STAmount const& owner_funds, - STAmount const& taker_funds, - Rate const& rate_in, - Rate const& rate_out); - - // Calculates the transfer rate that we should use when calculating - // flows for a particular issue between two accounts. - static Rate - effective_rate( - Rate const& rate, - Issue const& issue, - AccountID const& from, - AccountID const& to); - - // The transfer rate for the input currency between the given accounts - Rate - in_rate(AccountID const& from, AccountID const& to) const - { - return effective_rate(m_rate_in, original_.in.issue(), from, to); - } - - // The transfer rate for the output currency between the given accounts - Rate - out_rate(AccountID const& from, AccountID const& to) const - { - return effective_rate(m_rate_out, original_.out.issue(), from, to); - } - -public: - BasicTaker() = delete; - BasicTaker(BasicTaker const&) = delete; - - BasicTaker( - CrossType cross_type, - AccountID const& account, - Amounts const& amount, - Quality const& quality, - std::uint32_t flags, - Rate const& rate_in, - Rate const& rate_out, - beast::Journal journal = beast::Journal{beast::Journal::getNullSink()}); - - virtual ~BasicTaker() = default; - - /** Returns the amount remaining on the offer. - This is the amount at which the offer should be placed. It may either - be for the full amount when there were no crossing offers, or for zero - when the offer fully crossed, or any amount in between. - It is always at the original offer quality (quality_) - */ - Amounts - remaining_offer() const; - - /** Returns the amount that the offer was originally placed at. */ - Amounts const& - original_offer() const; - - /** Returns the account identifier of the taker. */ - AccountID const& - account() const noexcept - { - return account_; - } - - /** Returns `true` if the quality does not meet the taker's requirements. */ - bool - reject(Quality const& quality) const noexcept - { - return quality < threshold_; - } - - /** Returns the type of crossing that is being performed */ - CrossType - cross_type() const - { - return cross_type_; - } - - /** Returns the Issue associated with the input of the offer */ - Issue const& - issue_in() const - { - return issue_in_; - } - - /** Returns the Issue associated with the output of the offer */ - Issue const& - issue_out() const - { - return issue_out_; - } - - /** Returns `true` if the taker has run out of funds. */ - bool - unfunded() const; - - /** Returns `true` if order crossing should not continue. - Order processing is stopped if the taker's order quantities have - been reached, or if the taker has run out of input funds. - */ - bool - done() const; - - /** Perform direct crossing through given offer. - @return an `Amounts` describing the flow achieved during cross - */ - BasicTaker::Flow - do_cross(Amounts offer, Quality quality, AccountID const& owner); - - /** Perform bridged crossing through given offers. - @return a pair of `Amounts` describing the flow achieved during cross - */ - std::pair - do_cross( - Amounts offer1, - Quality quality1, - AccountID const& owner1, - Amounts offer2, - Quality quality2, - AccountID const& owner2); - - virtual STAmount - get_funds(AccountID const& account, STAmount const& funds) const = 0; -}; - -//------------------------------------------------------------------------------ - -class Taker : public BasicTaker -{ -public: - Taker() = delete; - Taker(Taker const&) = delete; - - Taker( - CrossType cross_type, - ApplyView& view, - AccountID const& account, - Amounts const& offer, - std::uint32_t flags, - beast::Journal journal); - ~Taker() = default; - - void - consume_offer(Offer& offer, Amounts const& order); - - STAmount - get_funds(AccountID const& account, STAmount const& funds) const override; - - STAmount const& - get_xrp_flow() const - { - return xrp_flow_; - } - - std::uint32_t - get_direct_crossings() const - { - return direct_crossings_; - } - - std::uint32_t - get_bridge_crossings() const - { - return bridge_crossings_; - } - - /** Perform a direct or bridged offer crossing as appropriate. - Funds will be transferred accordingly, and offers will be adjusted. - @return tesSUCCESS if successful, or an error code otherwise. - */ - /** @{ */ - TER - cross(Offer& offer); - - TER - cross(Offer& leg1, Offer& leg2); - /** @} */ - -private: - static Rate - calculateRate( - ApplyView const& view, - AccountID const& issuer, - AccountID const& account); - - TER - fill(BasicTaker::Flow const& flow, Offer& offer); - - TER - fill( - BasicTaker::Flow const& flow1, - Offer& leg1, - BasicTaker::Flow const& flow2, - Offer& leg2); - - TER - transferXRP( - AccountID const& from, - AccountID const& to, - STAmount const& amount); - - TER - redeemIOU( - AccountID const& account, - STAmount const& amount, - Issue const& issue); - - TER - issueIOU( - AccountID const& account, - STAmount const& amount, - Issue const& issue); - -private: - // The underlying ledger entry we are dealing with - ApplyView& view_; - - // The amount of XRP that flowed if we were autobridging - STAmount xrp_flow_; - - // The number direct crossings that we performed - std::uint32_t direct_crossings_; - - // The number autobridged crossings that we performed - std::uint32_t bridge_crossings_; -}; - -} // namespace ripple - -#endif From 80d82c5b2b098fb11fb2f98dbcc3b6d5b8d9caf5 Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Wed, 23 Jul 2025 18:21:30 +0100 Subject: [PATCH 090/244] Add support for `DomainID` in `MPTokenIssuance` transactions (#5509) This change adds support for `DomainID` to existing transactions `MPTokenIssuanceCreate` and `MPTokenIssuanceSet`. In #5224 `DomainID` was added as an access control mechanism for `SingleAssetVault`. The actual implementation of this feature lies in `MPToken` and `MPTokenIssuance`, hence it makes sense to enable the use of `DomainID` also in `MPTokenIssuanceCreate` and `MPTokenIssuanceSet`, following same rules as in Vault: * `MPTokenIssuanceCreate` and `MPTokenIssuanceSet` can only set `DomainID` if flag `MPTRequireAuth` is set. * `MPTokenIssuanceCreate` requires that `DomainID` be a non-zero, uint256 number. * `MPTokenIssuanceSet` allows `DomainID` to be zero (or empty) in which case it will remove `DomainID` from the `MPTokenIssuance` object. The change is amendment-gated by `SingleAssetVault`. This is a non-breaking change because `SingleAssetVault` amendment is `Supported::no`, i.e. at this moment considered a work in progress, which cannot be enabled on the network. --- .../xrpl/protocol/detail/ledger_entries.macro | 3 +- .../xrpl/protocol/detail/transactions.macro | 6 +- src/test/app/MPToken_test.cpp | 577 ++++++++++++++++-- src/test/jtx/impl/mpt.cpp | 15 + src/test/jtx/mpt.h | 5 + .../app/tx/detail/MPTokenIssuanceCreate.cpp | 16 + .../app/tx/detail/MPTokenIssuanceSet.cpp | 60 +- 7 files changed, 615 insertions(+), 67 deletions(-) diff --git a/include/xrpl/protocol/detail/ledger_entries.macro b/include/xrpl/protocol/detail/ledger_entries.macro index 46c6e60db3..11306ee0f5 100644 --- a/include/xrpl/protocol/detail/ledger_entries.macro +++ b/include/xrpl/protocol/detail/ledger_entries.macro @@ -482,8 +482,7 @@ LEDGER_ENTRY(ltDELEGATE, 0x0083, Delegate, delegate, ({ })) /** A ledger object representing a single asset vault. - - \sa keylet::mptoken + \sa keylet::vault */ LEDGER_ENTRY(ltVAULT, 0x0084, Vault, vault, ({ {sfPreviousTxnID, soeREQUIRED}, diff --git a/include/xrpl/protocol/detail/transactions.macro b/include/xrpl/protocol/detail/transactions.macro index 1d59e71850..38665296cd 100644 --- a/include/xrpl/protocol/detail/transactions.macro +++ b/include/xrpl/protocol/detail/transactions.macro @@ -409,6 +409,7 @@ TRANSACTION(ttMPTOKEN_ISSUANCE_CREATE, 54, MPTokenIssuanceCreate, Delegation::de {sfTransferFee, soeOPTIONAL}, {sfMaximumAmount, soeOPTIONAL}, {sfMPTokenMetadata, soeOPTIONAL}, + {sfDomainID, soeOPTIONAL}, })) /** This transaction type destroys a MPTokensIssuance instance */ @@ -420,6 +421,7 @@ TRANSACTION(ttMPTOKEN_ISSUANCE_DESTROY, 55, MPTokenIssuanceDestroy, Delegation:: TRANSACTION(ttMPTOKEN_ISSUANCE_SET, 56, MPTokenIssuanceSet, Delegation::delegatable, ({ {sfMPTokenIssuanceID, soeREQUIRED}, {sfHolder, soeOPTIONAL}, + {sfDomainID, soeOPTIONAL}, })) /** This transaction type authorizes a MPToken instance */ @@ -478,7 +480,7 @@ TRANSACTION(ttVAULT_CREATE, 65, VaultCreate, Delegation::delegatable, ({ {sfAsset, soeREQUIRED, soeMPTSupported}, {sfAssetsMaximum, soeOPTIONAL}, {sfMPTokenMetadata, soeOPTIONAL}, - {sfDomainID, soeOPTIONAL}, // PermissionedDomainID + {sfDomainID, soeOPTIONAL}, {sfWithdrawalPolicy, soeOPTIONAL}, {sfData, soeOPTIONAL}, })) @@ -487,7 +489,7 @@ TRANSACTION(ttVAULT_CREATE, 65, VaultCreate, Delegation::delegatable, ({ TRANSACTION(ttVAULT_SET, 66, VaultSet, Delegation::delegatable, ({ {sfVaultID, soeREQUIRED}, {sfAssetsMaximum, soeOPTIONAL}, - {sfDomainID, soeOPTIONAL}, // PermissionedDomainID + {sfDomainID, soeOPTIONAL}, {sfData, soeOPTIONAL}, })) diff --git a/src/test/app/MPToken_test.cpp b/src/test/app/MPToken_test.cpp index 46b64e40f2..2cb47780ba 100644 --- a/src/test/app/MPToken_test.cpp +++ b/src/test/app/MPToken_test.cpp @@ -18,10 +18,16 @@ //============================================================================== #include +#include +#include #include #include +#include +#include #include +#include +#include #include namespace ripple { @@ -61,6 +67,48 @@ class MPToken_test : public beast::unit_test::suite .metadata = "test", .err = temMALFORMED}); + if (!features[featureSingleAssetVault]) + { + // tries to set DomainID when SAV is disabled + mptAlice.create( + {.maxAmt = 100, + .assetScale = 0, + .metadata = "test", + .flags = tfMPTRequireAuth, + .domainID = uint256(42), + .err = temDISABLED}); + } + else if (!features[featurePermissionedDomains]) + { + // tries to set DomainID when PD is disabled + mptAlice.create( + {.maxAmt = 100, + .assetScale = 0, + .metadata = "test", + .flags = tfMPTRequireAuth, + .domainID = uint256(42), + .err = temDISABLED}); + } + else + { + // tries to set DomainID when RequireAuth is not set + mptAlice.create( + {.maxAmt = 100, + .assetScale = 0, + .metadata = "test", + .domainID = uint256(42), + .err = temMALFORMED}); + + // tries to set zero DomainID + mptAlice.create( + {.maxAmt = 100, + .assetScale = 0, + .metadata = "test", + .flags = tfMPTRequireAuth, + .domainID = beast::zero, + .err = temMALFORMED}); + } + // tries to set a txfee greater than max mptAlice.create( {.maxAmt = 100, @@ -140,6 +188,48 @@ class MPToken_test : public beast::unit_test::suite BEAST_EXPECT( result[sfMaximumAmount.getJsonName()] == "9223372036854775807"); } + + if (features[featureSingleAssetVault]) + { + // Add permissioned domain + Account const credIssuer1{"credIssuer1"}; + std::string const credType = "credential"; + + pdomain::Credentials const credentials1{ + {.issuer = credIssuer1, .credType = credType}}; + + { + Env env{*this, features}; + env.fund(XRP(1000), credIssuer1); + + env(pdomain::setTx(credIssuer1, credentials1)); + auto const domainId1 = [&]() { + auto tx = env.tx()->getJson(JsonOptions::none); + return pdomain::getNewDomain(env.meta()); + }(); + + MPTTester mptAlice(env, alice); + mptAlice.create({ + .maxAmt = maxMPTokenAmount, // 9'223'372'036'854'775'807 + .assetScale = 1, + .transferFee = 10, + .metadata = "123", + .ownerCount = 1, + .flags = tfMPTCanLock | tfMPTRequireAuth | tfMPTCanEscrow | + tfMPTCanTrade | tfMPTCanTransfer | tfMPTCanClawback, + .domainID = domainId1, + }); + + // Get the hash for the most recent transaction. + std::string const txHash{ + env.tx()->getJson(JsonOptions::none)[jss::hash].asString()}; + + Json::Value const result = env.rpc("tx", txHash)[jss::result]; + BEAST_EXPECT( + result[sfMaximumAmount.getJsonName()] == + "9223372036854775807"); + } + } } void @@ -499,6 +589,59 @@ class MPToken_test : public beast::unit_test::suite .flags = 0x00000008, .err = temINVALID_FLAG}); + if (!features[featureSingleAssetVault]) + { + // test invalid flags - nothing is being changed + mptAlice.set( + {.account = alice, + .flags = 0x00000000, + .err = tecNO_PERMISSION}); + + mptAlice.set( + {.account = alice, + .holder = bob, + .flags = 0x00000000, + .err = tecNO_PERMISSION}); + + // cannot set DomainID since SAV is not enabled + mptAlice.set( + {.account = alice, + .domainID = uint256(42), + .err = temDISABLED}); + } + else + { + // test invalid flags - nothing is being changed + mptAlice.set( + {.account = alice, + .flags = 0x00000000, + .err = temMALFORMED}); + + mptAlice.set( + {.account = alice, + .holder = bob, + .flags = 0x00000000, + .err = temMALFORMED}); + + if (!features[featurePermissionedDomains]) + { + // cannot set DomainID since PD is not enabled + mptAlice.set( + {.account = alice, + .domainID = uint256(42), + .err = temDISABLED}); + } + else + { + // cannot set DomainID since Holder is set + mptAlice.set( + {.account = alice, + .holder = bob, + .domainID = uint256(42), + .err = temMALFORMED}); + } + } + // set both lock and unlock flags at the same time will fail mptAlice.set( {.account = alice, @@ -582,6 +725,53 @@ class MPToken_test : public beast::unit_test::suite mptAlice.set( {.holder = cindy, .flags = tfMPTLock, .err = tecNO_DST}); } + + if (features[featureSingleAssetVault] && + features[featurePermissionedDomains]) + { + // Add permissioned domain + Account const credIssuer1{"credIssuer1"}; + std::string const credType = "credential"; + + pdomain::Credentials const credentials1{ + {.issuer = credIssuer1, .credType = credType}}; + + { + Env env{*this, features}; + + MPTTester mptAlice(env, alice); + mptAlice.create({}); + + // Trying to set DomainID on a public MPTokenIssuance + mptAlice.set( + {.domainID = uint256(42), .err = tecNO_PERMISSION}); + + mptAlice.set( + {.domainID = beast::zero, .err = tecNO_PERMISSION}); + } + + { + Env env{*this, features}; + + MPTTester mptAlice(env, alice); + mptAlice.create({.flags = tfMPTRequireAuth}); + + // Trying to set non-existing DomainID + mptAlice.set( + {.domainID = uint256(42), .err = tecOBJECT_NOT_FOUND}); + + // Trying to lock but locking is disabled + mptAlice.set( + {.flags = tfMPTUnlock, + .domainID = uint256(42), + .err = tecNO_PERMISSION}); + + mptAlice.set( + {.flags = tfMPTUnlock, + .domainID = beast::zero, + .err = tecNO_PERMISSION}); + } + } } void @@ -590,71 +780,136 @@ class MPToken_test : public beast::unit_test::suite testcase("Enabled set transaction"); using namespace test::jtx; - - // Test locking and unlocking - Env env{*this, features}; Account const alice("alice"); // issuer Account const bob("bob"); // holder - MPTTester mptAlice(env, alice, {.holders = {bob}}); - - // create a mptokenissuance with locking - mptAlice.create( - {.ownerCount = 1, .holderCount = 0, .flags = tfMPTCanLock}); - - mptAlice.authorize({.account = bob, .holderCount = 1}); - - // locks bob's mptoken - mptAlice.set({.account = alice, .holder = bob, .flags = tfMPTLock}); - - // trying to lock bob's mptoken again will still succeed - // but no changes to the objects - mptAlice.set({.account = alice, .holder = bob, .flags = tfMPTLock}); - - // alice locks the mptissuance - mptAlice.set({.account = alice, .flags = tfMPTLock}); - - // alice tries to lock up both mptissuance and mptoken again - // it will not change the flags and both will remain locked. - mptAlice.set({.account = alice, .flags = tfMPTLock}); - mptAlice.set({.account = alice, .holder = bob, .flags = tfMPTLock}); - - // alice unlocks bob's mptoken - mptAlice.set({.account = alice, .holder = bob, .flags = tfMPTUnlock}); - - // locks up bob's mptoken again - mptAlice.set({.account = alice, .holder = bob, .flags = tfMPTLock}); - if (!features[featureSingleAssetVault]) { - // Delete bobs' mptoken even though it is locked - mptAlice.authorize({.account = bob, .flags = tfMPTUnauthorize}); + // Test locking and unlocking + Env env{*this, features}; + MPTTester mptAlice(env, alice, {.holders = {bob}}); + + // create a mptokenissuance with locking + mptAlice.create( + {.ownerCount = 1, .holderCount = 0, .flags = tfMPTCanLock}); + + mptAlice.authorize({.account = bob, .holderCount = 1}); + + // locks bob's mptoken + mptAlice.set({.account = alice, .holder = bob, .flags = tfMPTLock}); + + // trying to lock bob's mptoken again will still succeed + // but no changes to the objects + mptAlice.set({.account = alice, .holder = bob, .flags = tfMPTLock}); + + // alice locks the mptissuance + mptAlice.set({.account = alice, .flags = tfMPTLock}); + + // alice tries to lock up both mptissuance and mptoken again + // it will not change the flags and both will remain locked. + mptAlice.set({.account = alice, .flags = tfMPTLock}); + mptAlice.set({.account = alice, .holder = bob, .flags = tfMPTLock}); + + // alice unlocks bob's mptoken mptAlice.set( - {.account = alice, - .holder = bob, - .flags = tfMPTUnlock, - .err = tecOBJECT_NOT_FOUND}); + {.account = alice, .holder = bob, .flags = tfMPTUnlock}); - return; + // locks up bob's mptoken again + mptAlice.set({.account = alice, .holder = bob, .flags = tfMPTLock}); + if (!features[featureSingleAssetVault]) + { + // Delete bobs' mptoken even though it is locked + mptAlice.authorize({.account = bob, .flags = tfMPTUnauthorize}); + + mptAlice.set( + {.account = alice, + .holder = bob, + .flags = tfMPTUnlock, + .err = tecOBJECT_NOT_FOUND}); + + return; + } + + // Cannot delete locked MPToken + mptAlice.authorize( + {.account = bob, + .flags = tfMPTUnauthorize, + .err = tecNO_PERMISSION}); + + // alice unlocks mptissuance + mptAlice.set({.account = alice, .flags = tfMPTUnlock}); + + // alice unlocks bob's mptoken + mptAlice.set( + {.account = alice, .holder = bob, .flags = tfMPTUnlock}); + + // alice unlocks mptissuance and bob's mptoken again despite that + // they are already unlocked. Make sure this will not change the + // flags + mptAlice.set( + {.account = alice, .holder = bob, .flags = tfMPTUnlock}); + mptAlice.set({.account = alice, .flags = tfMPTUnlock}); } - // Cannot delete locked MPToken - mptAlice.authorize( - {.account = bob, - .flags = tfMPTUnauthorize, - .err = tecNO_PERMISSION}); + if (features[featureSingleAssetVault]) + { + // Add permissioned domain + std::string const credType = "credential"; - // alice unlocks mptissuance - mptAlice.set({.account = alice, .flags = tfMPTUnlock}); + // Test setting and resetting domain ID + Env env{*this, features}; - // alice unlocks bob's mptoken - mptAlice.set({.account = alice, .holder = bob, .flags = tfMPTUnlock}); + auto const domainId1 = [&]() { + Account const credIssuer1{"credIssuer1"}; + env.fund(XRP(1000), credIssuer1); - // alice unlocks mptissuance and bob's mptoken again despite that - // they are already unlocked. Make sure this will not change the - // flags - mptAlice.set({.account = alice, .holder = bob, .flags = tfMPTUnlock}); - mptAlice.set({.account = alice, .flags = tfMPTUnlock}); + pdomain::Credentials const credentials1{ + {.issuer = credIssuer1, .credType = credType}}; + + env(pdomain::setTx(credIssuer1, credentials1)); + return [&]() { + auto tx = env.tx()->getJson(JsonOptions::none); + return pdomain::getNewDomain(env.meta()); + }(); + }(); + + auto const domainId2 = [&]() { + Account const credIssuer2{"credIssuer2"}; + env.fund(XRP(1000), credIssuer2); + + pdomain::Credentials const credentials2{ + {.issuer = credIssuer2, .credType = credType}}; + + env(pdomain::setTx(credIssuer2, credentials2)); + return [&]() { + auto tx = env.tx()->getJson(JsonOptions::none); + return pdomain::getNewDomain(env.meta()); + }(); + }(); + + MPTTester mptAlice(env, alice, {.holders = {bob}}); + + // create a mptokenissuance with auth. + mptAlice.create( + {.ownerCount = 1, .holderCount = 0, .flags = tfMPTRequireAuth}); + BEAST_EXPECT(mptAlice.checkDomainID(std::nullopt)); + + // reset "domain not set" to "domain not set", i.e. no change + mptAlice.set({.domainID = beast::zero}); + BEAST_EXPECT(mptAlice.checkDomainID(std::nullopt)); + + // reset "domain not set" to domain1 + mptAlice.set({.domainID = domainId1}); + BEAST_EXPECT(mptAlice.checkDomainID(domainId1)); + + // reset domain1 to domain2 + mptAlice.set({.domainID = domainId2}); + BEAST_EXPECT(mptAlice.checkDomainID(domainId2)); + + // reset domain to "domain not set" + mptAlice.set({.domainID = beast::zero}); + BEAST_EXPECT(mptAlice.checkDomainID(std::nullopt)); + } } void @@ -889,6 +1144,200 @@ class MPToken_test : public beast::unit_test::suite mptAlice.pay(bob, alice, 100, tecNO_AUTH); } + if (features[featureSingleAssetVault] && + features[featurePermissionedDomains]) + { + // If RequireAuth is enabled and domain is a match, payment succeeds + { + Env env{*this, features}; + std::string const credType = "credential"; + Account const credIssuer1{"credIssuer1"}; + env.fund(XRP(1000), credIssuer1, bob); + + auto const domainId1 = [&]() { + pdomain::Credentials const credentials1{ + {.issuer = credIssuer1, .credType = credType}}; + + env(pdomain::setTx(credIssuer1, credentials1)); + return [&]() { + auto tx = env.tx()->getJson(JsonOptions::none); + return pdomain::getNewDomain(env.meta()); + }(); + }(); + // bob is authorized via domain + env(credentials::create(bob, credIssuer1, credType)); + env(credentials::accept(bob, credIssuer1, credType)); + env.close(); + + MPTTester mptAlice(env, alice, {}); + env.close(); + + mptAlice.create({ + .ownerCount = 1, + .holderCount = 0, + .flags = tfMPTRequireAuth | tfMPTCanTransfer, + .domainID = domainId1, + }); + + mptAlice.authorize({.account = bob}); + env.close(); + + // bob is authorized via domain + mptAlice.pay(alice, bob, 100); + mptAlice.set({.domainID = beast::zero}); + + // bob is no longer authorized + mptAlice.pay(alice, bob, 100, tecNO_AUTH); + } + + { + Env env{*this, features}; + std::string const credType = "credential"; + Account const credIssuer1{"credIssuer1"}; + env.fund(XRP(1000), credIssuer1, bob); + + auto const domainId1 = [&]() { + pdomain::Credentials const credentials1{ + {.issuer = credIssuer1, .credType = credType}}; + + env(pdomain::setTx(credIssuer1, credentials1)); + return [&]() { + auto tx = env.tx()->getJson(JsonOptions::none); + return pdomain::getNewDomain(env.meta()); + }(); + }(); + // bob is authorized via domain + env(credentials::create(bob, credIssuer1, credType)); + env(credentials::accept(bob, credIssuer1, credType)); + env.close(); + + MPTTester mptAlice(env, alice, {}); + env.close(); + + mptAlice.create({ + .ownerCount = 1, + .holderCount = 0, + .flags = tfMPTRequireAuth | tfMPTCanTransfer, + .domainID = domainId1, + }); + + // bob creates an empty MPToken + mptAlice.authorize({.account = bob}); + + // alice authorizes bob to hold funds + mptAlice.authorize({.account = alice, .holder = bob}); + + // alice sends 100 MPT to bob + mptAlice.pay(alice, bob, 100); + + // alice UNAUTHORIZES bob + mptAlice.authorize( + {.account = alice, + .holder = bob, + .flags = tfMPTUnauthorize}); + + // bob is still authorized, via domain + mptAlice.pay(bob, alice, 10); + + mptAlice.set({.domainID = beast::zero}); + + // bob fails to send back to alice because he is no longer + // authorize to move his funds! + mptAlice.pay(bob, alice, 10, tecNO_AUTH); + } + + { + Env env{*this, features}; + std::string const credType = "credential"; + // credIssuer1 is the owner of domainId1 and a credential issuer + Account const credIssuer1{"credIssuer1"}; + // credIssuer2 is the owner of domainId2 and a credential issuer + // Note, domainId2 also lists credentials issued by credIssuer1 + Account const credIssuer2{"credIssuer2"}; + env.fund(XRP(1000), credIssuer1, credIssuer2, bob, carol); + + auto const domainId1 = [&]() { + pdomain::Credentials const credentials{ + {.issuer = credIssuer1, .credType = credType}}; + + env(pdomain::setTx(credIssuer1, credentials)); + return [&]() { + auto tx = env.tx()->getJson(JsonOptions::none); + return pdomain::getNewDomain(env.meta()); + }(); + }(); + + auto const domainId2 = [&]() { + pdomain::Credentials const credentials{ + {.issuer = credIssuer1, .credType = credType}, + {.issuer = credIssuer2, .credType = credType}}; + + env(pdomain::setTx(credIssuer2, credentials)); + return [&]() { + auto tx = env.tx()->getJson(JsonOptions::none); + return pdomain::getNewDomain(env.meta()); + }(); + }(); + + // bob is authorized via credIssuer1 which is recognized by both + // domainId1 and domainId2 + env(credentials::create(bob, credIssuer1, credType)); + env(credentials::accept(bob, credIssuer1, credType)); + env.close(); + + // carol is authorized via credIssuer2, only recognized by + // domainId2 + env(credentials::create(carol, credIssuer2, credType)); + env(credentials::accept(carol, credIssuer2, credType)); + env.close(); + + MPTTester mptAlice(env, alice, {}); + env.close(); + + mptAlice.create({ + .ownerCount = 1, + .holderCount = 0, + .flags = tfMPTRequireAuth | tfMPTCanTransfer, + .domainID = domainId1, + }); + + // bob and carol create an empty MPToken + mptAlice.authorize({.account = bob}); + mptAlice.authorize({.account = carol}); + env.close(); + + // alice sends 50 MPT to bob but cannot send to carol + mptAlice.pay(alice, bob, 50); + mptAlice.pay(alice, carol, 50, tecNO_AUTH); + env.close(); + + // bob cannot send to carol because they are not on the same + // domain (since credIssuer2 is not recognized by domainId1) + mptAlice.pay(bob, carol, 10, tecNO_AUTH); + env.close(); + + // alice updates domainID to domainId2 which recognizes both + // credIssuer1 and credIssuer2 + mptAlice.set({.domainID = domainId2}); + // alice can now send to carol + mptAlice.pay(alice, carol, 10); + env.close(); + + // bob can now send to carol because both are in the same + // domain + mptAlice.pay(bob, carol, 10); + env.close(); + + // bob loses his authorization and can no longer send MPT + env(credentials::deleteCred( + credIssuer1, bob, credIssuer1, credType)); + env.close(); + + mptAlice.pay(bob, carol, 10, tecNO_AUTH); + mptAlice.pay(bob, alice, 10, tecNO_AUTH); + } + } + // Non-issuer cannot send to each other if MPTCanTransfer isn't set { Env env(*this, features); @@ -1340,10 +1789,8 @@ class MPToken_test : public beast::unit_test::suite } void - testDepositPreauth() + testDepositPreauth(FeatureBitset features) { - testcase("DepositPreauth"); - using namespace test::jtx; Account const alice("alice"); // issuer Account const bob("bob"); // holder @@ -1352,8 +1799,11 @@ class MPToken_test : public beast::unit_test::suite char const credType[] = "abcde"; + if (features[featureCredentials]) { - Env env(*this); + testcase("DepositPreauth"); + + Env env(*this, features); env.fund(XRP(50000), diana, dpIssuer); env.close(); @@ -2297,6 +2747,8 @@ public: // MPTokenIssuanceCreate testCreateValidation(all - featureSingleAssetVault); + testCreateValidation( + (all | featureSingleAssetVault) - featurePermissionedDomains); testCreateValidation(all | featureSingleAssetVault); testCreateEnabled(all - featureSingleAssetVault); testCreateEnabled(all | featureSingleAssetVault); @@ -2314,7 +2766,11 @@ public: testAuthorizeEnabled(all | featureSingleAssetVault); // MPTokenIssuanceSet - testSetValidation(all); + testSetValidation(all - featureSingleAssetVault); + testSetValidation( + (all | featureSingleAssetVault) - featurePermissionedDomains); + testSetValidation(all | featureSingleAssetVault); + testSetEnabled(all - featureSingleAssetVault); testSetEnabled(all | featureSingleAssetVault); @@ -2323,8 +2779,9 @@ public: testClawback(all); // Test Direct Payment - testPayment(all); - testDepositPreauth(); + testPayment(all | featureSingleAssetVault); + testDepositPreauth(all); + testDepositPreauth(all - featureCredentials); // Test MPT Amount is invalid in Tx, which don't support MPT testMPTInvalidInTx(all); diff --git a/src/test/jtx/impl/mpt.cpp b/src/test/jtx/impl/mpt.cpp index d33432d316..9f7a611feb 100644 --- a/src/test/jtx/impl/mpt.cpp +++ b/src/test/jtx/impl/mpt.cpp @@ -19,6 +19,7 @@ #include +#include #include namespace ripple { @@ -99,6 +100,8 @@ MPTTester::create(MPTCreate const& arg) jv[sfMPTokenMetadata] = strHex(*arg.metadata); if (arg.maxAmt) jv[sfMaximumAmount] = std::to_string(*arg.maxAmt); + if (arg.domainID) + jv[sfDomainID] = to_string(*arg.domainID); if (submit(arg, jv) != tesSUCCESS) { // Verify issuance doesn't exist @@ -235,6 +238,8 @@ MPTTester::set(MPTSet const& arg) jv[sfHolder] = arg.holder->human(); if (arg.delegate) jv[sfDelegate] = arg.delegate->human(); + if (arg.domainID) + jv[sfDomainID] = to_string(*arg.domainID); if (submit(arg, jv) == tesSUCCESS && arg.flags.value_or(0)) { auto require = [&](std::optional const& holder, @@ -272,6 +277,16 @@ MPTTester::forObject( return false; } +[[nodiscard]] bool +MPTTester::checkDomainID(std::optional expected) const +{ + return forObject([&](SLEP const& sle) -> bool { + if (sle->isFieldPresent(sfDomainID)) + return expected == sle->getFieldH256(sfDomainID); + return (!expected.has_value()); + }); +} + [[nodiscard]] bool MPTTester::checkMPTokenAmount( Account const& holder_, diff --git a/src/test/jtx/mpt.h b/src/test/jtx/mpt.h index 64eaa452f5..4756ca723d 100644 --- a/src/test/jtx/mpt.h +++ b/src/test/jtx/mpt.h @@ -106,6 +106,7 @@ struct MPTCreate std::optional holderCount = std::nullopt; bool fund = true; std::optional flags = {0}; + std::optional domainID = std::nullopt; std::optional err = std::nullopt; }; @@ -139,6 +140,7 @@ struct MPTSet std::optional holderCount = std::nullopt; std::optional flags = std::nullopt; std::optional delegate = std::nullopt; + std::optional domainID = std::nullopt; std::optional err = std::nullopt; }; @@ -165,6 +167,9 @@ public: void set(MPTSet const& set = {}); + [[nodiscard]] bool + checkDomainID(std::optional expected) const; + [[nodiscard]] bool checkMPTokenAmount(Account const& holder, std::int64_t expectedAmount) const; diff --git a/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.cpp b/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.cpp index 1b96b27f24..da3b57c8fe 100644 --- a/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.cpp +++ b/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.cpp @@ -31,6 +31,11 @@ MPTokenIssuanceCreate::preflight(PreflightContext const& ctx) if (!ctx.rules.enabled(featureMPTokensV1)) return temDISABLED; + if (ctx.tx.isFieldPresent(sfDomainID) && + !(ctx.rules.enabled(featurePermissionedDomains) && + ctx.rules.enabled(featureSingleAssetVault))) + return temDISABLED; + if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) return ret; @@ -48,6 +53,16 @@ MPTokenIssuanceCreate::preflight(PreflightContext const& ctx) return temMALFORMED; } + if (auto const domain = ctx.tx[~sfDomainID]) + { + if (*domain == beast::zero) + return temMALFORMED; + + // Domain present implies that MPTokenIssuance is not public + if ((ctx.tx.getFlags() & tfMPTRequireAuth) == 0) + return temMALFORMED; + } + if (auto const metadata = ctx.tx[~sfMPTokenMetadata]) { if (metadata->length() == 0 || @@ -142,6 +157,7 @@ MPTokenIssuanceCreate::doApply() .assetScale = tx[~sfAssetScale], .transferFee = tx[~sfTransferFee], .metadata = tx[~sfMPTokenMetadata], + .domainId = tx[~sfDomainID], }); return result ? tesSUCCESS : result.error(); } diff --git a/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp b/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp index 06ea089526..e05862af37 100644 --- a/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp +++ b/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp @@ -21,6 +21,7 @@ #include #include +#include #include namespace ripple { @@ -31,6 +32,14 @@ MPTokenIssuanceSet::preflight(PreflightContext const& ctx) if (!ctx.rules.enabled(featureMPTokensV1)) return temDISABLED; + if (ctx.tx.isFieldPresent(sfDomainID) && + !(ctx.rules.enabled(featurePermissionedDomains) && + ctx.rules.enabled(featureSingleAssetVault))) + return temDISABLED; + + if (ctx.tx.isFieldPresent(sfDomainID) && ctx.tx.isFieldPresent(sfHolder)) + return temMALFORMED; + if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) return ret; @@ -48,6 +57,13 @@ MPTokenIssuanceSet::preflight(PreflightContext const& ctx) if (holderID && accountID == holderID) return temMALFORMED; + if (ctx.rules.enabled(featureSingleAssetVault)) + { + // Is this transaction actually changing anything ? + if (txFlags == 0 && !ctx.tx.isFieldPresent(sfDomainID)) + return temMALFORMED; + } + return preflight2(ctx); } @@ -97,9 +113,14 @@ MPTokenIssuanceSet::preclaim(PreclaimContext const& ctx) if (!sleMptIssuance) return tecOBJECT_NOT_FOUND; - // if the mpt has disabled locking - if (!((*sleMptIssuance)[sfFlags] & lsfMPTCanLock)) - return tecNO_PERMISSION; + if (!sleMptIssuance->isFlag(lsfMPTCanLock)) + { + // For readability two separate `if` rather than `||` of two conditions + if (!ctx.view.rules().enabled(featureSingleAssetVault)) + return tecNO_PERMISSION; + else if (ctx.tx.isFlag(tfMPTLock) || ctx.tx.isFlag(tfMPTUnlock)) + return tecNO_PERMISSION; + } // ensure it is issued by the tx submitter if ((*sleMptIssuance)[sfIssuer] != ctx.tx[sfAccount]) @@ -117,6 +138,20 @@ MPTokenIssuanceSet::preclaim(PreclaimContext const& ctx) return tecOBJECT_NOT_FOUND; } + if (auto const domain = ctx.tx[~sfDomainID]) + { + if (not sleMptIssuance->isFlag(lsfMPTRequireAuth)) + return tecNO_PERMISSION; + + if (*domain != beast::zero) + { + auto const sleDomain = + ctx.view.read(keylet::permissionedDomain(*domain)); + if (!sleDomain) + return tecOBJECT_NOT_FOUND; + } + } + return tesSUCCESS; } @@ -126,6 +161,7 @@ MPTokenIssuanceSet::doApply() auto const mptIssuanceID = ctx_.tx[sfMPTokenIssuanceID]; auto const txFlags = ctx_.tx.getFlags(); auto const holderID = ctx_.tx[~sfHolder]; + auto const domainID = ctx_.tx[~sfDomainID]; std::shared_ptr sle; if (holderID) @@ -147,6 +183,24 @@ MPTokenIssuanceSet::doApply() if (flagsIn != flagsOut) sle->setFieldU32(sfFlags, flagsOut); + if (domainID) + { + // This is enforced in preflight. + XRPL_ASSERT( + sle->getType() == ltMPTOKEN_ISSUANCE, + "MPTokenIssuanceSet::doApply : modifying MPTokenIssuance"); + + if (*domainID != beast::zero) + { + sle->setFieldH256(sfDomainID, *domainID); + } + else + { + if (sle->isFieldPresent(sfDomainID)) + sle->makeFieldAbsent(sfDomainID); + } + } + view().update(sle); return tesSUCCESS; From 60e340d35628640a14ced0fdbbeef43393625353 Mon Sep 17 00:00:00 2001 From: Chenna Keshava B S <21219765+ckeshava@users.noreply.github.com> Date: Wed, 23 Jul 2025 10:53:18 -0700 Subject: [PATCH 091/244] Include `network_id` in validations and subscription stream responses (#5579) This change includes `network_id` data in the validations and ledger subscription stream responses, as well as unit tests to validate the response fields. Fixes #4783 --- src/test/rpc/Subscribe_test.cpp | 15 +++++++++++++-- src/xrpld/app/misc/NetworkOPs.cpp | 4 ++++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/src/test/rpc/Subscribe_test.cpp b/src/test/rpc/Subscribe_test.cpp index 989afc0acc..e0db79bf53 100644 --- a/src/test/rpc/Subscribe_test.cpp +++ b/src/test/rpc/Subscribe_test.cpp @@ -131,6 +131,9 @@ public: BEAST_EXPECT(jv.isMember(jss::id) && jv[jss::id] == 5); } BEAST_EXPECT(jv[jss::result][jss::ledger_index] == 2); + BEAST_EXPECT( + jv[jss::result][jss::network_id] == + env.app().config().NETWORK_ID); } { @@ -139,7 +142,8 @@ public: // Check stream update BEAST_EXPECT(wsc->findMsg(5s, [&](auto const& jv) { - return jv[jss::ledger_index] == 3; + return jv[jss::ledger_index] == 3 && + jv[jss::network_id] == env.app().config().NETWORK_ID; })); } @@ -149,7 +153,8 @@ public: // Check stream update BEAST_EXPECT(wsc->findMsg(5s, [&](auto const& jv) { - return jv[jss::ledger_index] == 4; + return jv[jss::ledger_index] == 4 && + jv[jss::network_id] == env.app().config().NETWORK_ID; })); } @@ -509,6 +514,11 @@ public: if (!jv.isMember(jss::validated_hash)) return false; + uint32_t netID = env.app().config().NETWORK_ID; + if (!jv.isMember(jss::network_id) || + jv[jss::network_id] != netID) + return false; + // Certain fields are only added on a flag ledger. bool const isFlagLedger = (env.closed()->info().seq + 1) % 256 == 0; @@ -567,6 +577,7 @@ public: jv[jss::streams][0u] = "ledger"; jr = env.rpc("json", "subscribe", to_string(jv))[jss::result]; BEAST_EXPECT(jr[jss::status] == "success"); + BEAST_EXPECT(jr[jss::network_id] == env.app().config().NETWORK_ID); jr = env.rpc("json", "unsubscribe", to_string(jv))[jss::result]; BEAST_EXPECT(jr[jss::status] == "success"); diff --git a/src/xrpld/app/misc/NetworkOPs.cpp b/src/xrpld/app/misc/NetworkOPs.cpp index 1ac42579ba..3220ce99fc 100644 --- a/src/xrpld/app/misc/NetworkOPs.cpp +++ b/src/xrpld/app/misc/NetworkOPs.cpp @@ -2415,6 +2415,7 @@ NetworkOPsImp::pubValidation(std::shared_ptr const& val) jvObj[jss::flags] = val->getFlags(); jvObj[jss::signing_time] = *(*val)[~sfSigningTime]; jvObj[jss::data] = strHex(val->getSerializer().slice()); + jvObj[jss::network_id] = app_.config().NETWORK_ID; if (auto version = (*val)[~sfServerVersion]) jvObj[jss::server_version] = std::to_string(*version); @@ -3119,6 +3120,8 @@ NetworkOPsImp::pubLedger(std::shared_ptr const& lpAccepted) jvObj[jss::ledger_time] = Json::Value::UInt( lpAccepted->info().closeTime.time_since_epoch().count()); + jvObj[jss::network_id] = app_.config().NETWORK_ID; + if (!lpAccepted->rules().enabled(featureXRPFees)) jvObj[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED; jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped(); @@ -4177,6 +4180,7 @@ NetworkOPsImp::subLedger(InfoSub::ref isrListener, Json::Value& jvResult) jvResult[jss::reserve_base] = lpClosed->fees().accountReserve(0).jsonClipped(); jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped(); + jvResult[jss::network_id] = app_.config().NETWORK_ID; } if ((mMode >= OperatingMode::SYNCING) && !isNeedNetworkLedger()) From 5713f9782ab2ccf59db37d31e14a600fb9466556 Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Thu, 24 Jul 2025 11:35:47 +0100 Subject: [PATCH 092/244] chore: Rename conan profile to `default` (#5599) This change renames the `libxrpl` profile to `default` to make it more usable. --- .github/workflows/macos.yml | 2 -- .github/workflows/nix.yml | 2 -- .github/workflows/windows.yml | 2 -- conan/profiles/{libxrpl => default} | 18 ++++++++++++++++++ conanfile.py | 2 -- 5 files changed, 18 insertions(+), 8 deletions(-) rename conan/profiles/{libxrpl => default} (50%) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 8acd90eeff..3c47a8bd53 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -24,8 +24,6 @@ env: CONAN_GLOBAL_CONF: | core.download:parallel={{os.cpu_count()}} core.upload:parallel={{os.cpu_count()}} - core:default_build_profile=libxrpl - core:default_profile=libxrpl tools.build:jobs={{ (os.cpu_count() * 4/5) | int }} tools.build:verbosity=verbose tools.compilation:verbosity=verbose diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index 8218dcc276..9ff96035b2 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -25,8 +25,6 @@ env: CONAN_GLOBAL_CONF: | core.download:parallel={{ os.cpu_count() }} core.upload:parallel={{ os.cpu_count() }} - core:default_build_profile=libxrpl - core:default_profile=libxrpl tools.build:jobs={{ (os.cpu_count() * 4/5) | int }} tools.build:verbosity=verbose tools.compilation:verbosity=verbose diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 254850f26a..1479c47600 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -27,8 +27,6 @@ env: CONAN_GLOBAL_CONF: | core.download:parallel={{os.cpu_count()}} core.upload:parallel={{os.cpu_count()}} - core:default_build_profile=libxrpl - core:default_profile=libxrpl tools.build:jobs=24 tools.build:verbosity=verbose tools.compilation:verbosity=verbose diff --git a/conan/profiles/libxrpl b/conan/profiles/default similarity index 50% rename from conan/profiles/libxrpl rename to conan/profiles/default index b037b8c4a2..0417704f8a 100644 --- a/conan/profiles/libxrpl +++ b/conan/profiles/default @@ -9,6 +9,7 @@ [settings] os={{ os }} arch={{ arch }} +build_type=Debug compiler={{compiler}} compiler.version={{ compiler_version }} compiler.cppstd=20 @@ -17,3 +18,20 @@ compiler.runtime=static {% else %} compiler.libcxx={{detect_api.detect_libcxx(compiler, version, compiler_exe)}} {% endif %} + +[conf] +{% if compiler == "clang" and compiler_version >= 19 %} +tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw'] +{% endif %} +{% if compiler == "apple-clang" and compiler_version >= 17 %} +tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw'] +{% endif %} +{% if compiler == "clang" and compiler_version == 16 %} +tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS'] +{% endif %} +{% if compiler == "gcc" and compiler_version < 13 %} +tools.build:cxxflags=['-Wno-restrict'] +{% endif %} + +[tool_requires] +!cmake/*: cmake/[>=3 <4] diff --git a/conanfile.py b/conanfile.py index d79b47bc6f..bb65969288 100644 --- a/conanfile.py +++ b/conanfile.py @@ -143,8 +143,6 @@ class Xrpl(ConanFile): tc.variables['static'] = self.options.static tc.variables['unity'] = self.options.unity tc.variables['xrpld'] = self.options.xrpld - if self.settings.compiler == 'clang' and self.settings.compiler.version == 16: - tc.extra_cxxflags = ["-DBOOST_ASIO_DISABLE_CONCEPTS"] tc.generate() def build(self): From b2960b9e7f713ea40a997712105eadb85770f918 Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Thu, 24 Jul 2025 14:20:50 +0100 Subject: [PATCH 093/244] Switch instrumentation workflow to use dependencies (#5607) Before `XRPLF/ci` images, we did not have a `dependencies:` job for clang-16, so `instrumentation:` had to build its own dependencies. Now we have clang-16 Conan dependencies built in a separate job that can be used. --- .github/workflows/nix.yml | 53 +++++++++++++++++++++------------------ 1 file changed, 29 insertions(+), 24 deletions(-) diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index 9ff96035b2..c3560dbab7 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -357,39 +357,44 @@ jobs: cmake --build . ./example | grep '^[[:digit:]]\+\.[[:digit:]]\+\.[[:digit:]]\+' - # NOTE we are not using dependencies built above because it lags with - # compiler versions. Instrumentation requires clang version 16 or - # later - instrumentation-build: - if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }} - env: - CLANG_RELEASE: 16 + needs: dependencies runs-on: [self-hosted, heavy] container: ghcr.io/xrplf/ci/debian-bookworm:clang-16 - + env: + build_dir: .build steps: + - name: download cache + uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 + with: + name: linux-clang-Debug + + - name: extract cache + run: | + mkdir -p ${CONAN_HOME} + tar -xzf conan.tar.gz -C ${CONAN_HOME} + + - name: check environment + run: | + echo ${PATH} | tr ':' '\n' + conan --version + cmake --version + env | sort + ls ${CONAN_HOME} + - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + - name: dependencies + uses: ./.github/actions/dependencies + with: + configuration: Debug + - name: prepare environment run: | - mkdir ${GITHUB_WORKSPACE}/.build - echo "SOURCE_DIR=$GITHUB_WORKSPACE" >> $GITHUB_ENV - echo "BUILD_DIR=$GITHUB_WORKSPACE/.build" >> $GITHUB_ENV - - - name: configure Conan - run: | - echo "${CONAN_GLOBAL_CONF}" >> $(conan config home)/global.conf - conan config install conan/profiles/ -tf $(conan config home)/profiles/ - conan profile show - - name: build dependencies - run: | - cd ${BUILD_DIR} - conan install ${SOURCE_DIR} \ - --output-folder ${BUILD_DIR} \ - --build missing \ - --settings:all build_type=Debug + mkdir -p ${build_dir} + echo "SOURCE_DIR=$(pwd)" >> $GITHUB_ENV + echo "BUILD_DIR=$(pwd)/${build_dir}" >> $GITHUB_ENV - name: build with instrumentation run: | From 5c2a3a27798d1c17d32c265d3b003ce420082e6a Mon Sep 17 00:00:00 2001 From: Bart Date: Thu, 24 Jul 2025 14:53:14 -0400 Subject: [PATCH 094/244] refactor: Update rocksdb (#5568) This change updates RocksDB to its latest version. RocksDB is backward-compatible, so even though this is a major version bump, databases created with previous versions will continue to function. The external RocksDB folder is removed, as the latest version available via Conan Center no longer needs custom patches. --- .github/actions/dependencies/action.yml | 3 - .github/workflows/macos.yml | 1 - .github/workflows/nix.yml | 1 - .github/workflows/windows.yml | 1 - BUILD.md | 8 - conanfile.py | 2 +- external/rocksdb/conandata.yml | 12 - external/rocksdb/conanfile.py | 235 ------------- .../patches/9.7.3-0001-memory-leak.patch | 319 ------------------ .../9.x.x-0001-exclude-thirdparty.patch | 30 -- 10 files changed, 1 insertion(+), 611 deletions(-) delete mode 100644 external/rocksdb/conandata.yml delete mode 100644 external/rocksdb/conanfile.py delete mode 100644 external/rocksdb/patches/9.7.3-0001-memory-leak.patch delete mode 100644 external/rocksdb/patches/9.x.x-0001-exclude-thirdparty.patch diff --git a/.github/actions/dependencies/action.yml b/.github/actions/dependencies/action.yml index eeb8df105c..7ece9710a8 100644 --- a/.github/actions/dependencies/action.yml +++ b/.github/actions/dependencies/action.yml @@ -10,7 +10,6 @@ runs: shell: bash run: | conan export --version 1.1.10 external/snappy - conan export --version 9.7.3 external/rocksdb conan export --version 4.0.3 external/soci - name: add Ripple Conan remote if: env.CONAN_URL != '' @@ -22,7 +21,6 @@ runs: fi conan remote add --index 0 ripple "${CONAN_URL}" echo "Added conan remote ripple at ${CONAN_URL}" - - name: try to authenticate to Ripple Conan remote if: env.CONAN_LOGIN_USERNAME_RIPPLE != '' && env.CONAN_PASSWORD_RIPPLE != '' id: remote @@ -31,7 +29,6 @@ runs: echo "Authenticating to ripple remote..." conan remote auth ripple --force conan remote list-users - - name: list missing binaries id: binaries shell: bash diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 3c47a8bd53..adea15af9e 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -94,7 +94,6 @@ jobs: shell: bash run: | conan export --version 1.1.10 external/snappy - conan export --version 9.7.3 external/rocksdb conan export --version 4.0.3 external/soci - name: add Ripple Conan remote if: env.CONAN_URL != '' diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index c3560dbab7..d6490e4caa 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -99,7 +99,6 @@ jobs: run: tar -czf conan.tar.gz -C ${CONAN_HOME} . - name: build dependencies uses: ./.github/actions/dependencies - with: configuration: ${{ matrix.configuration }} - name: upload archive diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 1479c47600..84a91bcb4e 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -89,7 +89,6 @@ jobs: shell: bash run: | conan export --version 1.1.10 external/snappy - conan export --version 9.7.3 external/rocksdb conan export --version 4.0.3 external/soci - name: add Ripple Conan remote if: env.CONAN_URL != '' diff --git a/BUILD.md b/BUILD.md index fba238d2bc..8e2d5ffb2b 100644 --- a/BUILD.md +++ b/BUILD.md @@ -171,14 +171,6 @@ which allows you to statically link it with GCC, if you want. conan export --version 1.1.10 external/snappy ``` -Export our [Conan recipe for RocksDB](./external/rocksdb). -It does not override paths to dependencies when building with Visual Studio. - - ``` - # Conan 2.x - conan export --version 9.7.3 external/rocksdb - ``` - Export our [Conan recipe for SOCI](./external/soci). It patches their CMake to correctly import its dependencies. diff --git a/conanfile.py b/conanfile.py index bb65969288..bad4dfd111 100644 --- a/conanfile.py +++ b/conanfile.py @@ -112,7 +112,7 @@ class Xrpl(ConanFile): if self.options.jemalloc: self.requires('jemalloc/5.3.0') if self.options.rocksdb: - self.requires('rocksdb/9.7.3') + self.requires('rocksdb/10.0.1') self.requires('xxhash/0.8.3', **transitive_headers_opt) exports_sources = ( diff --git a/external/rocksdb/conandata.yml b/external/rocksdb/conandata.yml deleted file mode 100644 index 7d7a575d98..0000000000 --- a/external/rocksdb/conandata.yml +++ /dev/null @@ -1,12 +0,0 @@ -sources: - "9.7.3": - url: "https://github.com/facebook/rocksdb/archive/refs/tags/v9.7.3.tar.gz" - sha256: "acfabb989cbfb5b5c4d23214819b059638193ec33dad2d88373c46448d16d38b" -patches: - "9.7.3": - - patch_file: "patches/9.x.x-0001-exclude-thirdparty.patch" - patch_description: "Do not include thirdparty.inc" - patch_type: "portability" - - patch_file: "patches/9.7.3-0001-memory-leak.patch" - patch_description: "Fix a leak of obsolete blob files left open until DB::Close()" - patch_type: "portability" diff --git a/external/rocksdb/conanfile.py b/external/rocksdb/conanfile.py deleted file mode 100644 index 8b85ce1540..0000000000 --- a/external/rocksdb/conanfile.py +++ /dev/null @@ -1,235 +0,0 @@ -import os -import glob -import shutil - -from conan import ConanFile -from conan.errors import ConanInvalidConfiguration -from conan.tools.build import check_min_cppstd -from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout -from conan.tools.files import apply_conandata_patches, collect_libs, copy, export_conandata_patches, get, rm, rmdir -from conan.tools.microsoft import check_min_vs, is_msvc, is_msvc_static_runtime -from conan.tools.scm import Version - -required_conan_version = ">=1.53.0" - - -class RocksDBConan(ConanFile): - name = "rocksdb" - description = "A library that provides an embeddable, persistent key-value store for fast storage" - license = ("GPL-2.0-only", "Apache-2.0") - url = "https://github.com/conan-io/conan-center-index" - homepage = "https://github.com/facebook/rocksdb" - topics = ("database", "leveldb", "facebook", "key-value") - package_type = "library" - settings = "os", "arch", "compiler", "build_type" - options = { - "shared": [True, False], - "fPIC": [True, False], - "lite": [True, False], - "with_gflags": [True, False], - "with_snappy": [True, False], - "with_lz4": [True, False], - "with_zlib": [True, False], - "with_zstd": [True, False], - "with_tbb": [True, False], - "with_jemalloc": [True, False], - "enable_sse": [False, "sse42", "avx2"], - "use_rtti": [True, False], - } - default_options = { - "shared": False, - "fPIC": True, - "lite": False, - "with_snappy": False, - "with_lz4": False, - "with_zlib": False, - "with_zstd": False, - "with_gflags": False, - "with_tbb": False, - "with_jemalloc": False, - "enable_sse": False, - "use_rtti": False, - } - - @property - def _min_cppstd(self): - return "11" if Version(self.version) < "8.8.1" else "17" - - @property - def _compilers_minimum_version(self): - return {} if self._min_cppstd == "11" else { - "apple-clang": "10", - "clang": "7", - "gcc": "7", - "msvc": "191", - "Visual Studio": "15", - } - - def export_sources(self): - export_conandata_patches(self) - - def config_options(self): - if self.settings.os == "Windows": - del self.options.fPIC - if self.settings.arch != "x86_64": - del self.options.with_tbb - if self.settings.build_type == "Debug": - self.options.use_rtti = True # Rtti are used in asserts for debug mode... - - def configure(self): - if self.options.shared: - self.options.rm_safe("fPIC") - - def layout(self): - cmake_layout(self, src_folder="src") - - def requirements(self): - if self.options.with_gflags: - self.requires("gflags/2.2.2") - if self.options.with_snappy: - self.requires("snappy/1.1.10") - if self.options.with_lz4: - self.requires("lz4/1.10.0") - if self.options.with_zlib: - self.requires("zlib/[>=1.2.11 <2]") - if self.options.with_zstd: - self.requires("zstd/1.5.6") - if self.options.get_safe("with_tbb"): - self.requires("onetbb/2021.12.0") - if self.options.with_jemalloc: - self.requires("jemalloc/5.3.0") - - def validate(self): - if self.settings.compiler.get_safe("cppstd"): - check_min_cppstd(self, self._min_cppstd) - - minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False) - if minimum_version and Version(self.settings.compiler.version) < minimum_version: - raise ConanInvalidConfiguration( - f"{self.ref} requires C++{self._min_cppstd}, which your compiler does not support." - ) - - if self.settings.arch not in ["x86_64", "ppc64le", "ppc64", "mips64", "armv8"]: - raise ConanInvalidConfiguration("Rocksdb requires 64 bits") - - check_min_vs(self, "191") - - if self.version == "6.20.3" and \ - self.settings.os == "Linux" and \ - self.settings.compiler == "gcc" and \ - Version(self.settings.compiler.version) < "5": - raise ConanInvalidConfiguration("Rocksdb 6.20.3 is not compilable with gcc <5.") # See https://github.com/facebook/rocksdb/issues/3522 - - def source(self): - get(self, **self.conan_data["sources"][self.version], strip_root=True) - - def generate(self): - tc = CMakeToolchain(self) - tc.variables["FAIL_ON_WARNINGS"] = False - tc.variables["WITH_TESTS"] = False - tc.variables["WITH_TOOLS"] = False - tc.variables["WITH_CORE_TOOLS"] = False - tc.variables["WITH_BENCHMARK_TOOLS"] = False - tc.variables["WITH_FOLLY_DISTRIBUTED_MUTEX"] = False - if is_msvc(self): - tc.variables["WITH_MD_LIBRARY"] = not is_msvc_static_runtime(self) - tc.variables["ROCKSDB_INSTALL_ON_WINDOWS"] = self.settings.os == "Windows" - tc.variables["ROCKSDB_LITE"] = self.options.lite - tc.variables["WITH_GFLAGS"] = self.options.with_gflags - tc.variables["WITH_SNAPPY"] = self.options.with_snappy - tc.variables["WITH_LZ4"] = self.options.with_lz4 - tc.variables["WITH_ZLIB"] = self.options.with_zlib - tc.variables["WITH_ZSTD"] = self.options.with_zstd - tc.variables["WITH_TBB"] = self.options.get_safe("with_tbb", False) - tc.variables["WITH_JEMALLOC"] = self.options.with_jemalloc - tc.variables["ROCKSDB_BUILD_SHARED"] = self.options.shared - tc.variables["ROCKSDB_LIBRARY_EXPORTS"] = self.settings.os == "Windows" and self.options.shared - tc.variables["ROCKSDB_DLL" ] = self.settings.os == "Windows" and self.options.shared - tc.variables["USE_RTTI"] = self.options.use_rtti - if not bool(self.options.enable_sse): - tc.variables["PORTABLE"] = True - tc.variables["FORCE_SSE42"] = False - elif self.options.enable_sse == "sse42": - tc.variables["PORTABLE"] = True - tc.variables["FORCE_SSE42"] = True - elif self.options.enable_sse == "avx2": - tc.variables["PORTABLE"] = False - tc.variables["FORCE_SSE42"] = False - # not available yet in CCI - tc.variables["WITH_NUMA"] = False - tc.generate() - - deps = CMakeDeps(self) - if self.options.with_jemalloc: - deps.set_property("jemalloc", "cmake_file_name", "JeMalloc") - deps.set_property("jemalloc", "cmake_target_name", "JeMalloc::JeMalloc") - if self.options.with_zstd: - deps.set_property("zstd", "cmake_target_name", "zstd::zstd") - deps.generate() - - def build(self): - apply_conandata_patches(self) - cmake = CMake(self) - cmake.configure() - cmake.build() - - def _remove_static_libraries(self): - rm(self, "rocksdb.lib", os.path.join(self.package_folder, "lib")) - for lib in glob.glob(os.path.join(self.package_folder, "lib", "*.a")): - if not lib.endswith(".dll.a"): - os.remove(lib) - - def _remove_cpp_headers(self): - for path in glob.glob(os.path.join(self.package_folder, "include", "rocksdb", "*")): - if path != os.path.join(self.package_folder, "include", "rocksdb", "c.h"): - if os.path.isfile(path): - os.remove(path) - else: - shutil.rmtree(path) - - def package(self): - copy(self, "COPYING", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses")) - copy(self, "LICENSE*", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses")) - cmake = CMake(self) - cmake.install() - if self.options.shared: - self._remove_static_libraries() - self._remove_cpp_headers() # Force stable ABI for shared libraries - rmdir(self, os.path.join(self.package_folder, "lib", "cmake")) - rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig")) - - def package_info(self): - cmake_target = "rocksdb-shared" if self.options.shared else "rocksdb" - self.cpp_info.set_property("cmake_file_name", "RocksDB") - self.cpp_info.set_property("cmake_target_name", f"RocksDB::{cmake_target}") - # TODO: back to global scope in conan v2 once cmake_find_package* generators removed - self.cpp_info.components["librocksdb"].libs = collect_libs(self) - if self.settings.os == "Windows": - self.cpp_info.components["librocksdb"].system_libs = ["shlwapi", "rpcrt4"] - if self.options.shared: - self.cpp_info.components["librocksdb"].defines = ["ROCKSDB_DLL"] - elif self.settings.os in ["Linux", "FreeBSD"]: - self.cpp_info.components["librocksdb"].system_libs = ["pthread", "m"] - if self.options.lite: - self.cpp_info.components["librocksdb"].defines.append("ROCKSDB_LITE") - - # TODO: to remove in conan v2 once cmake_find_package* generators removed - self.cpp_info.names["cmake_find_package"] = "RocksDB" - self.cpp_info.names["cmake_find_package_multi"] = "RocksDB" - self.cpp_info.components["librocksdb"].names["cmake_find_package"] = cmake_target - self.cpp_info.components["librocksdb"].names["cmake_find_package_multi"] = cmake_target - self.cpp_info.components["librocksdb"].set_property("cmake_target_name", f"RocksDB::{cmake_target}") - if self.options.with_gflags: - self.cpp_info.components["librocksdb"].requires.append("gflags::gflags") - if self.options.with_snappy: - self.cpp_info.components["librocksdb"].requires.append("snappy::snappy") - if self.options.with_lz4: - self.cpp_info.components["librocksdb"].requires.append("lz4::lz4") - if self.options.with_zlib: - self.cpp_info.components["librocksdb"].requires.append("zlib::zlib") - if self.options.with_zstd: - self.cpp_info.components["librocksdb"].requires.append("zstd::zstd") - if self.options.get_safe("with_tbb"): - self.cpp_info.components["librocksdb"].requires.append("onetbb::onetbb") - if self.options.with_jemalloc: - self.cpp_info.components["librocksdb"].requires.append("jemalloc::jemalloc") diff --git a/external/rocksdb/patches/9.7.3-0001-memory-leak.patch b/external/rocksdb/patches/9.7.3-0001-memory-leak.patch deleted file mode 100644 index bb086e6cb2..0000000000 --- a/external/rocksdb/patches/9.7.3-0001-memory-leak.patch +++ /dev/null @@ -1,319 +0,0 @@ -diff --git a/HISTORY.md b/HISTORY.md -index 36d472229..05ad1a202 100644 ---- a/HISTORY.md -+++ b/HISTORY.md -@@ -1,6 +1,10 @@ - # Rocksdb Change Log - > NOTE: Entries for next release do not go here. Follow instructions in `unreleased_history/README.txt` - -+## 9.7.4 (10/31/2024) -+### Bug Fixes -+* Fix a leak of obsolete blob files left open until DB::Close(). This bug was introduced in version 9.4.0. -+ - ## 9.7.3 (10/16/2024) - ### Behavior Changes - * OPTIONS file to be loaded by remote worker is now preserved so that it does not get purged by the primary host. A similar technique as how we are preserving new SST files from getting purged is used for this. min_options_file_numbers_ is tracked like pending_outputs_ is tracked. -diff --git a/db/blob/blob_file_cache.cc b/db/blob/blob_file_cache.cc -index 5f340aadf..1b9faa238 100644 ---- a/db/blob/blob_file_cache.cc -+++ b/db/blob/blob_file_cache.cc -@@ -42,6 +42,7 @@ Status BlobFileCache::GetBlobFileReader( - assert(blob_file_reader); - assert(blob_file_reader->IsEmpty()); - -+ // NOTE: sharing same Cache with table_cache - const Slice key = GetSliceForKey(&blob_file_number); - - assert(cache_); -@@ -98,4 +99,13 @@ Status BlobFileCache::GetBlobFileReader( - return Status::OK(); - } - -+void BlobFileCache::Evict(uint64_t blob_file_number) { -+ // NOTE: sharing same Cache with table_cache -+ const Slice key = GetSliceForKey(&blob_file_number); -+ -+ assert(cache_); -+ -+ cache_.get()->Erase(key); -+} -+ - } // namespace ROCKSDB_NAMESPACE -diff --git a/db/blob/blob_file_cache.h b/db/blob/blob_file_cache.h -index 740e67ada..6858d012b 100644 ---- a/db/blob/blob_file_cache.h -+++ b/db/blob/blob_file_cache.h -@@ -36,6 +36,15 @@ class BlobFileCache { - uint64_t blob_file_number, - CacheHandleGuard* blob_file_reader); - -+ // Called when a blob file is obsolete to ensure it is removed from the cache -+ // to avoid effectively leaking the open file and assicated memory -+ void Evict(uint64_t blob_file_number); -+ -+ // Used to identify cache entries for blob files (not normally useful) -+ static const Cache::CacheItemHelper* GetHelper() { -+ return CacheInterface::GetBasicHelper(); -+ } -+ - private: - using CacheInterface = - BasicTypedCacheInterface; -diff --git a/db/column_family.h b/db/column_family.h -index e4b7adde8..86637736a 100644 ---- a/db/column_family.h -+++ b/db/column_family.h -@@ -401,6 +401,7 @@ class ColumnFamilyData { - SequenceNumber earliest_seq); - - TableCache* table_cache() const { return table_cache_.get(); } -+ BlobFileCache* blob_file_cache() const { return blob_file_cache_.get(); } - BlobSource* blob_source() const { return blob_source_.get(); } - - // See documentation in compaction_picker.h -diff --git a/db/db_impl/db_impl.cc b/db/db_impl/db_impl.cc -index 261593423..06573ac2e 100644 ---- a/db/db_impl/db_impl.cc -+++ b/db/db_impl/db_impl.cc -@@ -659,8 +659,9 @@ Status DBImpl::CloseHelper() { - // We need to release them before the block cache is destroyed. The block - // cache may be destroyed inside versions_.reset(), when column family data - // list is destroyed, so leaving handles in table cache after -- // versions_.reset() may cause issues. -- // Here we clean all unreferenced handles in table cache. -+ // versions_.reset() may cause issues. Here we clean all unreferenced handles -+ // in table cache, and (for certain builds/conditions) assert that no obsolete -+ // files are hanging around unreferenced (leak) in the table/blob file cache. - // Now we assume all user queries have finished, so only version set itself - // can possibly hold the blocks from block cache. After releasing unreferenced - // handles here, only handles held by version set left and inside -@@ -668,6 +669,9 @@ Status DBImpl::CloseHelper() { - // time a handle is released, we erase it from the cache too. By doing that, - // we can guarantee that after versions_.reset(), table cache is empty - // so the cache can be safely destroyed. -+#ifndef NDEBUG -+ TEST_VerifyNoObsoleteFilesCached(/*db_mutex_already_held=*/true); -+#endif // !NDEBUG - table_cache_->EraseUnRefEntries(); - - for (auto& txn_entry : recovered_transactions_) { -@@ -3227,6 +3231,8 @@ Status DBImpl::MultiGetImpl( - s = Status::Aborted(); - break; - } -+ // This could be a long-running operation -+ ROCKSDB_THREAD_YIELD_HOOK(); - } - - // Post processing (decrement reference counts and record statistics) -diff --git a/db/db_impl/db_impl.h b/db/db_impl/db_impl.h -index 5e4fa310b..ccc0abfa7 100644 ---- a/db/db_impl/db_impl.h -+++ b/db/db_impl/db_impl.h -@@ -1241,9 +1241,14 @@ class DBImpl : public DB { - static Status TEST_ValidateOptions(const DBOptions& db_options) { - return ValidateOptions(db_options); - } -- - #endif // NDEBUG - -+ // In certain configurations, verify that the table/blob file cache only -+ // contains entries for live files, to check for effective leaks of open -+ // files. This can only be called when purging of obsolete files has -+ // "settled," such as during parts of DB Close(). -+ void TEST_VerifyNoObsoleteFilesCached(bool db_mutex_already_held) const; -+ - // persist stats to column family "_persistent_stats" - void PersistStats(); - -diff --git a/db/db_impl/db_impl_debug.cc b/db/db_impl/db_impl_debug.cc -index 790a50d7a..67f5b4aaf 100644 ---- a/db/db_impl/db_impl_debug.cc -+++ b/db/db_impl/db_impl_debug.cc -@@ -9,6 +9,7 @@ - - #ifndef NDEBUG - -+#include "db/blob/blob_file_cache.h" - #include "db/column_family.h" - #include "db/db_impl/db_impl.h" - #include "db/error_handler.h" -@@ -328,5 +329,49 @@ size_t DBImpl::TEST_EstimateInMemoryStatsHistorySize() const { - InstrumentedMutexLock l(&const_cast(this)->stats_history_mutex_); - return EstimateInMemoryStatsHistorySize(); - } -+ -+void DBImpl::TEST_VerifyNoObsoleteFilesCached( -+ bool db_mutex_already_held) const { -+ // This check is somewhat expensive and obscure to make a part of every -+ // unit test in every build variety. Thus, we only enable it for ASAN builds. -+ if (!kMustFreeHeapAllocations) { -+ return; -+ } -+ -+ std::optional l; -+ if (db_mutex_already_held) { -+ mutex_.AssertHeld(); -+ } else { -+ l.emplace(&mutex_); -+ } -+ -+ std::vector live_files; -+ for (auto cfd : *versions_->GetColumnFamilySet()) { -+ if (cfd->IsDropped()) { -+ continue; -+ } -+ // Sneakily add both SST and blob files to the same list -+ cfd->current()->AddLiveFiles(&live_files, &live_files); -+ } -+ std::sort(live_files.begin(), live_files.end()); -+ -+ auto fn = [&live_files](const Slice& key, Cache::ObjectPtr, size_t, -+ const Cache::CacheItemHelper* helper) { -+ if (helper != BlobFileCache::GetHelper()) { -+ // Skip non-blob files for now -+ // FIXME: diagnose and fix the leaks of obsolete SST files revealed in -+ // unit tests. -+ return; -+ } -+ // See TableCache and BlobFileCache -+ assert(key.size() == sizeof(uint64_t)); -+ uint64_t file_number; -+ GetUnaligned(reinterpret_cast(key.data()), &file_number); -+ // Assert file is in sorted live_files -+ assert( -+ std::binary_search(live_files.begin(), live_files.end(), file_number)); -+ }; -+ table_cache_->ApplyToAllEntries(fn, {}); -+} - } // namespace ROCKSDB_NAMESPACE - #endif // NDEBUG -diff --git a/db/db_iter.cc b/db/db_iter.cc -index e02586377..bf4749eb9 100644 ---- a/db/db_iter.cc -+++ b/db/db_iter.cc -@@ -540,6 +540,8 @@ bool DBIter::FindNextUserEntryInternal(bool skipping_saved_key, - } else { - iter_.Next(); - } -+ // This could be a long-running operation due to tombstones, etc. -+ ROCKSDB_THREAD_YIELD_HOOK(); - } while (iter_.Valid()); - - valid_ = false; -diff --git a/db/table_cache.cc b/db/table_cache.cc -index 71fc29c32..8a5be75e8 100644 ---- a/db/table_cache.cc -+++ b/db/table_cache.cc -@@ -164,6 +164,7 @@ Status TableCache::GetTableReader( - } - - Cache::Handle* TableCache::Lookup(Cache* cache, uint64_t file_number) { -+ // NOTE: sharing same Cache with BlobFileCache - Slice key = GetSliceForFileNumber(&file_number); - return cache->Lookup(key); - } -@@ -179,6 +180,7 @@ Status TableCache::FindTable( - size_t max_file_size_for_l0_meta_pin, Temperature file_temperature) { - PERF_TIMER_GUARD_WITH_CLOCK(find_table_nanos, ioptions_.clock); - uint64_t number = file_meta.fd.GetNumber(); -+ // NOTE: sharing same Cache with BlobFileCache - Slice key = GetSliceForFileNumber(&number); - *handle = cache_.Lookup(key); - TEST_SYNC_POINT_CALLBACK("TableCache::FindTable:0", -diff --git a/db/version_builder.cc b/db/version_builder.cc -index ed8ab8214..c98f53f42 100644 ---- a/db/version_builder.cc -+++ b/db/version_builder.cc -@@ -24,6 +24,7 @@ - #include - - #include "cache/cache_reservation_manager.h" -+#include "db/blob/blob_file_cache.h" - #include "db/blob/blob_file_meta.h" - #include "db/dbformat.h" - #include "db/internal_stats.h" -@@ -744,12 +745,9 @@ class VersionBuilder::Rep { - return Status::Corruption("VersionBuilder", oss.str()); - } - -- // Note: we use C++11 for now but in C++14, this could be done in a more -- // elegant way using generalized lambda capture. -- VersionSet* const vs = version_set_; -- const ImmutableCFOptions* const ioptions = ioptions_; -- -- auto deleter = [vs, ioptions](SharedBlobFileMetaData* shared_meta) { -+ auto deleter = [vs = version_set_, ioptions = ioptions_, -+ bc = cfd_ ? cfd_->blob_file_cache() -+ : nullptr](SharedBlobFileMetaData* shared_meta) { - if (vs) { - assert(ioptions); - assert(!ioptions->cf_paths.empty()); -@@ -758,6 +756,9 @@ class VersionBuilder::Rep { - vs->AddObsoleteBlobFile(shared_meta->GetBlobFileNumber(), - ioptions->cf_paths.front().path); - } -+ if (bc) { -+ bc->Evict(shared_meta->GetBlobFileNumber()); -+ } - - delete shared_meta; - }; -@@ -766,7 +767,7 @@ class VersionBuilder::Rep { - blob_file_number, blob_file_addition.GetTotalBlobCount(), - blob_file_addition.GetTotalBlobBytes(), - blob_file_addition.GetChecksumMethod(), -- blob_file_addition.GetChecksumValue(), deleter); -+ blob_file_addition.GetChecksumValue(), std::move(deleter)); - - mutable_blob_file_metas_.emplace( - blob_file_number, MutableBlobFileMetaData(std::move(shared_meta))); -diff --git a/db/version_set.h b/db/version_set.h -index 9336782b1..024f869e7 100644 ---- a/db/version_set.h -+++ b/db/version_set.h -@@ -1514,7 +1514,6 @@ class VersionSet { - void GetLiveFilesMetaData(std::vector* metadata); - - void AddObsoleteBlobFile(uint64_t blob_file_number, std::string path) { -- // TODO: Erase file from BlobFileCache? - obsolete_blob_files_.emplace_back(blob_file_number, std::move(path)); - } - -diff --git a/include/rocksdb/version.h b/include/rocksdb/version.h -index 2a19796b8..0afa2cab1 100644 ---- a/include/rocksdb/version.h -+++ b/include/rocksdb/version.h -@@ -13,7 +13,7 @@ - // minor or major version number planned for release. - #define ROCKSDB_MAJOR 9 - #define ROCKSDB_MINOR 7 --#define ROCKSDB_PATCH 3 -+#define ROCKSDB_PATCH 4 - - // Do not use these. We made the mistake of declaring macros starting with - // double underscore. Now we have to live with our choice. We'll deprecate these -diff --git a/port/port.h b/port/port.h -index 13aa56d47..141716e5b 100644 ---- a/port/port.h -+++ b/port/port.h -@@ -19,3 +19,19 @@ - #elif defined(OS_WIN) - #include "port/win/port_win.h" - #endif -+ -+#ifdef OS_LINUX -+// A temporary hook into long-running RocksDB threads to support modifying their -+// priority etc. This should become a public API hook once the requirements -+// are better understood. -+extern "C" void RocksDbThreadYield() __attribute__((__weak__)); -+#define ROCKSDB_THREAD_YIELD_HOOK() \ -+ { \ -+ if (RocksDbThreadYield) { \ -+ RocksDbThreadYield(); \ -+ } \ -+ } -+#else -+#define ROCKSDB_THREAD_YIELD_HOOK() \ -+ {} -+#endif diff --git a/external/rocksdb/patches/9.x.x-0001-exclude-thirdparty.patch b/external/rocksdb/patches/9.x.x-0001-exclude-thirdparty.patch deleted file mode 100644 index 7b5858bc1e..0000000000 --- a/external/rocksdb/patches/9.x.x-0001-exclude-thirdparty.patch +++ /dev/null @@ -1,30 +0,0 @@ -diff --git a/CMakeLists.txt b/CMakeLists.txt -index 93b884d..b715cb6 100644 ---- a/CMakeLists.txt -+++ b/CMakeLists.txt -@@ -106,14 +106,9 @@ endif() - include(CMakeDependentOption) - - if(MSVC) -- option(WITH_GFLAGS "build with GFlags" OFF) - option(WITH_XPRESS "build with windows built in compression" OFF) -- option(ROCKSDB_SKIP_THIRDPARTY "skip thirdparty.inc" OFF) -- -- if(NOT ROCKSDB_SKIP_THIRDPARTY) -- include(${CMAKE_CURRENT_SOURCE_DIR}/thirdparty.inc) -- endif() --else() -+endif() -+if(TRUE) - if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD" AND NOT CMAKE_SYSTEM_NAME MATCHES "kFreeBSD") - # FreeBSD has jemalloc as default malloc - # but it does not have all the jemalloc files in include/... -@@ -126,7 +121,7 @@ else() - endif() - endif() - -- if(MINGW) -+ if(MSVC OR MINGW) - option(WITH_GFLAGS "build with GFlags" OFF) - else() - option(WITH_GFLAGS "build with GFlags" ON) From e7a7bb83c1a3916095274b6ea1095f3b50722333 Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Fri, 25 Jul 2025 14:53:25 +0100 Subject: [PATCH 095/244] VaultWithdraw destination account bugfix (#5572) #5224 added (among other things) a `VaultWithdraw` transaction that allows setting the recipient of the withdrawn funds in the `Destination` transaction field. This technically turns this transaction into a payment, and in some respect the implementation does follow payment rules, e.g. enforcement of `lsfRequireDestTag` or `lsfDepositAuth`, or that MPT transfer has destination `MPToken`. However for IOUs, it missed verification that the destination account has a trust line to the asset issuer. Since the default behavior of `accountSendIOU` is to create this trust line (if missing), this is what `VaultWithdraw` currently does. This is incorrect, since the `Destination` might not be interested in holding the asset in question; this basically enables spammy transfers. This change, therefore, removes automatic creation of a trust line to the `Destination` account in `VaultWithdraw`. --- .../xrpl/protocol/detail/transactions.macro | 1 + src/test/app/Vault_test.cpp | 205 +++++++++++++++++- src/xrpld/app/tx/detail/Escrow.cpp | 8 +- src/xrpld/app/tx/detail/VaultWithdraw.cpp | 36 ++- src/xrpld/ledger/View.h | 81 +++++-- src/xrpld/ledger/detail/View.cpp | 29 ++- 6 files changed, 315 insertions(+), 45 deletions(-) diff --git a/include/xrpl/protocol/detail/transactions.macro b/include/xrpl/protocol/detail/transactions.macro index 38665296cd..89e9a16df5 100644 --- a/include/xrpl/protocol/detail/transactions.macro +++ b/include/xrpl/protocol/detail/transactions.macro @@ -509,6 +509,7 @@ TRANSACTION(ttVAULT_WITHDRAW, 69, VaultWithdraw, Delegation::delegatable, ({ {sfVaultID, soeREQUIRED}, {sfAmount, soeREQUIRED, soeMPTSupported}, {sfDestination, soeOPTIONAL}, + {sfDestinationTag, soeOPTIONAL}, })) /** This transaction claws back tokens from a vault. */ diff --git a/src/test/app/Vault_test.cpp b/src/test/app/Vault_test.cpp index ce97eff24f..f9036719cd 100644 --- a/src/test/app/Vault_test.cpp +++ b/src/test/app/Vault_test.cpp @@ -234,6 +234,28 @@ class Vault_test : public beast::unit_test::suite env(tx, ter{tecNO_PERMISSION}); } + { + testcase(prefix + " fail to withdraw to zero destination"); + auto tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(1000)}); + tx[sfDestination] = "0"; + env(tx, ter(temMALFORMED)); + } + + { + testcase( + prefix + + " fail to withdraw with tag but without destination"); + auto tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(1000)}); + tx[sfDestinationTag] = "0"; + env(tx, ter(temMALFORMED)); + } + if (!asset.raw().native()) { testcase( @@ -1335,6 +1357,7 @@ class Vault_test : public beast::unit_test::suite struct CaseArgs { bool enableClawback = true; + bool requireAuth = true; }; auto testCase = [this]( @@ -1356,16 +1379,20 @@ class Vault_test : public beast::unit_test::suite Vault vault{env}; MPTTester mptt{env, issuer, mptInitNoFund}; + auto const none = LedgerSpecificFlags(0); mptt.create( {.flags = tfMPTCanTransfer | tfMPTCanLock | - (args.enableClawback ? lsfMPTCanClawback - : LedgerSpecificFlags(0)) | - tfMPTRequireAuth}); + (args.enableClawback ? tfMPTCanClawback : none) | + (args.requireAuth ? tfMPTRequireAuth : none)}); PrettyAsset asset = mptt.issuanceID(); mptt.authorize({.account = owner}); - mptt.authorize({.account = issuer, .holder = owner}); mptt.authorize({.account = depositor}); - mptt.authorize({.account = issuer, .holder = depositor}); + if (args.requireAuth) + { + mptt.authorize({.account = issuer, .holder = owner}); + mptt.authorize({.account = issuer, .holder = depositor}); + } + env(pay(issuer, depositor, asset(1000))); env.close(); @@ -1514,6 +1541,100 @@ class Vault_test : public beast::unit_test::suite } }); + testCase( + [this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + PrettyAsset const& asset, + Vault& vault, + MPTTester& mptt) { + testcase( + "MPT 3rd party without MPToken cannot be withdrawal " + "destination"); + + auto [tx, keylet] = + vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + + tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + env(tx); + env.close(); + + { + // Set destination to 3rd party without MPToken + Account charlie{"charlie"}; + env.fund(XRP(1000), charlie); + env.close(); + + tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + tx[sfDestination] = charlie.human(); + env(tx, ter(tecNO_AUTH)); + } + }, + {.requireAuth = false}); + + testCase( + [this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + PrettyAsset const& asset, + Vault& vault, + MPTTester& mptt) { + testcase("MPT depositor without MPToken cannot withdraw"); + + auto [tx, keylet] = + vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + + tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(1000)}); + env(tx); + env.close(); + + { + // Remove depositor's MPToken and withdraw will fail + mptt.authorize( + {.account = depositor, .flags = tfMPTUnauthorize}); + env.close(); + auto const mptoken = + env.le(keylet::mptoken(mptt.issuanceID(), depositor)); + BEAST_EXPECT(mptoken == nullptr); + + tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + env(tx, ter(tecNO_AUTH)); + } + + { + // Restore depositor's MPToken and withdraw will succeed + mptt.authorize({.account = depositor}); + env.close(); + + tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + env(tx); + } + }, + {.requireAuth = false}); + testCase([this]( Env& env, Account const& issuer, @@ -1803,6 +1924,7 @@ class Vault_test : public beast::unit_test::suite PrettyAsset const asset = issuer["IOU"]; env.trust(asset(1000), owner); + env.trust(asset(1000), charlie); env(pay(issuer, owner, asset(200))); env(rate(issuer, 1.25)); env.close(); @@ -2118,6 +2240,79 @@ class Vault_test : public beast::unit_test::suite env.close(); }); + testCase([&, this]( + Env& env, + Account const& owner, + Account const& issuer, + Account const& charlie, + auto, + Vault& vault, + PrettyAsset const& asset, + auto&&...) { + testcase("IOU no trust line to 3rd party"); + + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + + env(vault.deposit( + {.depositor = owner, .id = keylet.key, .amount = asset(100)})); + env.close(); + + Account const erin{"erin"}; + env.fund(XRP(1000), erin); + env.close(); + + // Withdraw to 3rd party without trust line + auto const tx1 = [&](ripple::Keylet keylet) { + auto tx = vault.withdraw( + {.depositor = owner, + .id = keylet.key, + .amount = asset(10)}); + tx[sfDestination] = erin.human(); + return tx; + }(keylet); + env(tx1, ter{tecNO_LINE}); + }); + + testCase([&, this]( + Env& env, + Account const& owner, + Account const& issuer, + Account const& charlie, + auto, + Vault& vault, + PrettyAsset const& asset, + auto&&...) { + testcase("IOU no trust line to depositor"); + + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + + // reset limit, so deposit of all funds will delete the trust line + env.trust(asset(0), owner); + env.close(); + + env(vault.deposit( + {.depositor = owner, .id = keylet.key, .amount = asset(200)})); + env.close(); + + auto trustline = + env.le(keylet::line(owner, asset.raw().get())); + BEAST_EXPECT(trustline == nullptr); + + // Withdraw without trust line, will succeed + auto const tx1 = [&](ripple::Keylet keylet) { + auto tx = vault.withdraw( + {.depositor = owner, + .id = keylet.key, + .amount = asset(10)}); + return tx; + }(keylet); + env(tx1); + }); + testCase([&, this]( Env& env, Account const& owner, diff --git a/src/xrpld/app/tx/detail/Escrow.cpp b/src/xrpld/app/tx/detail/Escrow.cpp index c62c38c675..dd0ffac778 100644 --- a/src/xrpld/app/tx/detail/Escrow.cpp +++ b/src/xrpld/app/tx/detail/Escrow.cpp @@ -315,14 +315,14 @@ escrowCreatePreclaimHelper( // authorized auto const& mptIssue = amount.get(); if (auto const ter = - requireAuth(ctx.view, mptIssue, account, MPTAuthType::WeakAuth); + requireAuth(ctx.view, mptIssue, account, AuthType::WeakAuth); ter != tesSUCCESS) return ter; // If the issuer has requireAuth set, check if the destination is // authorized if (auto const ter = - requireAuth(ctx.view, mptIssue, dest, MPTAuthType::WeakAuth); + requireAuth(ctx.view, mptIssue, dest, AuthType::WeakAuth); ter != tesSUCCESS) return ter; @@ -746,7 +746,7 @@ escrowFinishPreclaimHelper( // authorized auto const& mptIssue = amount.get(); if (auto const ter = - requireAuth(ctx.view, mptIssue, dest, MPTAuthType::WeakAuth); + requireAuth(ctx.view, mptIssue, dest, AuthType::WeakAuth); ter != tesSUCCESS) return ter; @@ -1259,7 +1259,7 @@ escrowCancelPreclaimHelper( // authorized auto const& mptIssue = amount.get(); if (auto const ter = - requireAuth(ctx.view, mptIssue, account, MPTAuthType::WeakAuth); + requireAuth(ctx.view, mptIssue, account, AuthType::WeakAuth); ter != tesSUCCESS) return ter; diff --git a/src/xrpld/app/tx/detail/VaultWithdraw.cpp b/src/xrpld/app/tx/detail/VaultWithdraw.cpp index 7a8605cdbd..09a9fd14e1 100644 --- a/src/xrpld/app/tx/detail/VaultWithdraw.cpp +++ b/src/xrpld/app/tx/detail/VaultWithdraw.cpp @@ -52,9 +52,19 @@ VaultWithdraw::preflight(PreflightContext const& ctx) return temBAD_AMOUNT; if (auto const destination = ctx.tx[~sfDestination]; - destination && *destination == beast::zero) + destination.has_value()) { - JLOG(ctx.j.debug()) << "VaultWithdraw: zero/empty destination account."; + if (*destination == beast::zero) + { + JLOG(ctx.j.debug()) + << "VaultWithdraw: zero/empty destination account."; + return temMALFORMED; + } + } + else if (ctx.tx.isFieldPresent(sfDestinationTag)) + { + JLOG(ctx.j.debug()) << "VaultWithdraw: sfDestinationTag is set but " + "sfDestination is not"; return temMALFORMED; } @@ -123,33 +133,39 @@ VaultWithdraw::preclaim(PreclaimContext const& ctx) // Withdrawal to a 3rd party destination account is essentially a transfer, // via shares in the vault. Enforce all the usual asset transfer checks. + AuthType authType = AuthType::Legacy; if (account != dstAcct) { auto const sleDst = ctx.view.read(keylet::account(dstAcct)); if (sleDst == nullptr) return tecNO_DST; - if (sleDst->getFlags() & lsfRequireDestTag) + if (sleDst->isFlag(lsfRequireDestTag) && + !ctx.tx.isFieldPresent(sfDestinationTag)) return tecDST_TAG_NEEDED; // Cannot send without a tag - if (sleDst->getFlags() & lsfDepositAuth) + if (sleDst->isFlag(lsfDepositAuth)) { if (!ctx.view.exists(keylet::depositPreauth(dstAcct, account))) return tecNO_PERMISSION; } + // The destination account must have consented to receive the asset by + // creating a RippleState or MPToken + authType = AuthType::StrongAuth; } - // Destination MPToken must exist (if asset is an MPT) - if (auto const ter = requireAuth(ctx.view, vaultAsset, dstAcct); + // Destination MPToken (for an MPT) or trust line (for an IOU) must exist + // if not sending to Account. + if (auto const ter = requireAuth(ctx.view, vaultAsset, dstAcct, authType); !isTesSuccess(ter)) return ter; // Cannot withdraw from a Vault an Asset frozen for the destination account - if (isFrozen(ctx.view, dstAcct, vaultAsset)) - return vaultAsset.holds() ? tecFROZEN : tecLOCKED; + if (auto const ret = checkFrozen(ctx.view, dstAcct, vaultAsset)) + return ret; - if (isFrozen(ctx.view, account, vaultShare)) - return tecLOCKED; + if (auto const ret = checkFrozen(ctx.view, account, vaultShare)) + return ret; return tesSUCCESS; } diff --git a/src/xrpld/ledger/View.h b/src/xrpld/ledger/View.h index 8c391499b6..fc9360734d 100644 --- a/src/xrpld/ledger/View.h +++ b/src/xrpld/ledger/View.h @@ -175,6 +175,29 @@ isFrozen( asset.value()); } +[[nodiscard]] inline TER +checkFrozen(ReadView const& view, AccountID const& account, Issue const& issue) +{ + return isFrozen(view, account, issue) ? (TER)tecFROZEN : (TER)tesSUCCESS; +} + +[[nodiscard]] inline TER +checkFrozen( + ReadView const& view, + AccountID const& account, + MPTIssue const& mptIssue) +{ + return isFrozen(view, account, mptIssue) ? (TER)tecLOCKED : (TER)tesSUCCESS; +} + +[[nodiscard]] inline TER +checkFrozen(ReadView const& view, AccountID const& account, Asset const& asset) +{ + return std::visit( + [&](auto const& issue) { return checkFrozen(view, account, issue); }, + asset.value()); +} + [[nodiscard]] bool isAnyFrozen( ReadView const& view, @@ -725,19 +748,40 @@ transferXRP( STAmount const& amount, beast::Journal j); -/* Check if MPToken exists: - * - StrongAuth - before checking lsfMPTRequireAuth is set - * - WeakAuth - after checking if lsfMPTRequireAuth is set +/* Check if MPToken (for MPT) or trust line (for IOU) exists: + * - StrongAuth - before checking if authorization is required + * - WeakAuth + * for MPT - after checking lsfMPTRequireAuth flag + * for IOU - do not check if trust line exists + * - Legacy + * for MPT - before checking lsfMPTRequireAuth flag i.e. same as StrongAuth + * for IOU - do not check if trust line exists i.e. same as WeakAuth */ -enum class MPTAuthType : bool { StrongAuth = true, WeakAuth = false }; +enum class AuthType { StrongAuth, WeakAuth, Legacy }; /** Check if the account lacks required authorization. * - * Return tecNO_AUTH or tecNO_LINE if it does - * and tesSUCCESS otherwise. + * Return tecNO_AUTH or tecNO_LINE if it does + * and tesSUCCESS otherwise. + * + * If StrongAuth then return tecNO_LINE if the RippleState doesn't exist. Return + * tecNO_AUTH if lsfRequireAuth is set on the issuer's AccountRoot, and the + * RippleState does exist, and the RippleState is not authorized. + * + * If WeakAuth then return tecNO_AUTH if lsfRequireAuth is set, and the + * RippleState exists, and is not authorized. Return tecNO_LINE if + * lsfRequireAuth is set and the RippleState doesn't exist. Consequently, if + * WeakAuth and lsfRequireAuth is *not* set, this function will return + * tesSUCCESS even if RippleState does *not* exist. + * + * The default "Legacy" auth type is equivalent to WeakAuth. */ [[nodiscard]] TER -requireAuth(ReadView const& view, Issue const& issue, AccountID const& account); +requireAuth( + ReadView const& view, + Issue const& issue, + AccountID const& account, + AuthType authType = AuthType::Legacy); /** Check if the account lacks required authorization. * @@ -751,32 +795,33 @@ requireAuth(ReadView const& view, Issue const& issue, AccountID const& account); * purely defensive, as we currently do not allow such vaults to be created. * * If StrongAuth then return tecNO_AUTH if MPToken doesn't exist or - * lsfMPTRequireAuth is set and MPToken is not authorized. If WeakAuth then - * return tecNO_AUTH if lsfMPTRequireAuth is set and MPToken doesn't exist or is - * not authorized (explicitly or via credentials, if DomainID is set in - * MPTokenIssuance). Consequently, if WeakAuth and lsfMPTRequireAuth is *not* - * set, this function will return true even if MPToken does *not* exist. + * lsfMPTRequireAuth is set and MPToken is not authorized. + * + * If WeakAuth then return tecNO_AUTH if lsfMPTRequireAuth is set and MPToken + * doesn't exist or is not authorized (explicitly or via credentials, if + * DomainID is set in MPTokenIssuance). Consequently, if WeakAuth and + * lsfMPTRequireAuth is *not* set, this function will return true even if + * MPToken does *not* exist. + * + * The default "Legacy" auth type is equivalent to StrongAuth. */ [[nodiscard]] TER requireAuth( ReadView const& view, MPTIssue const& mptIssue, AccountID const& account, - MPTAuthType authType = MPTAuthType::StrongAuth, + AuthType authType = AuthType::Legacy, int depth = 0); [[nodiscard]] TER inline requireAuth( ReadView const& view, Asset const& asset, AccountID const& account, - MPTAuthType authType = MPTAuthType::StrongAuth) + AuthType authType = AuthType::Legacy) { return std::visit( [&](TIss const& issue_) { - if constexpr (std::is_same_v) - return requireAuth(view, issue_, account); - else - return requireAuth(view, issue_, account, authType); + return requireAuth(view, issue_, account, authType); }, asset.value()); } diff --git a/src/xrpld/ledger/detail/View.cpp b/src/xrpld/ledger/detail/View.cpp index cb95819014..1f616ed491 100644 --- a/src/xrpld/ledger/detail/View.cpp +++ b/src/xrpld/ledger/detail/View.cpp @@ -505,8 +505,8 @@ accountHolds( if (zeroIfUnauthorized == ahZERO_IF_UNAUTHORIZED && view.rules().enabled(featureSingleAssetVault)) { - if (auto const err = requireAuth( - view, mptIssue, account, MPTAuthType::StrongAuth); + if (auto const err = + requireAuth(view, mptIssue, account, AuthType::StrongAuth); !isTesSuccess(err)) amount.clear(mptIssue); } @@ -2298,15 +2298,27 @@ transferXRP( } TER -requireAuth(ReadView const& view, Issue const& issue, AccountID const& account) +requireAuth( + ReadView const& view, + Issue const& issue, + AccountID const& account, + AuthType authType) { if (isXRP(issue) || issue.account == account) return tesSUCCESS; + + auto const trustLine = + view.read(keylet::line(account, issue.account, issue.currency)); + // If account has no line, and this is a strong check, fail + if (!trustLine && authType == AuthType::StrongAuth) + return tecNO_LINE; + + // If this is a weak or legacy check, or if the account has a line, fail if + // auth is required and not set on the line if (auto const issuerAccount = view.read(keylet::account(issue.account)); issuerAccount && (*issuerAccount)[sfFlags] & lsfRequireAuth) { - if (auto const trustLine = - view.read(keylet::line(account, issue.account, issue.currency))) + if (trustLine) return ((*trustLine)[sfFlags] & ((account > issue.account) ? lsfLowAuth : lsfHighAuth)) ? tesSUCCESS @@ -2322,7 +2334,7 @@ requireAuth( ReadView const& view, MPTIssue const& mptIssue, AccountID const& account, - MPTAuthType authType, + AuthType authType, int depth) { auto const mptID = keylet::mptIssuance(mptIssue.getMptID()); @@ -2357,7 +2369,7 @@ requireAuth( if (auto const err = std::visit( [&](TIss const& issue) { if constexpr (std::is_same_v) - return requireAuth(view, issue, account); + return requireAuth(view, issue, account, authType); else return requireAuth( view, issue, account, authType, depth + 1); @@ -2372,7 +2384,8 @@ requireAuth( auto const sleToken = view.read(mptokenID); // if account has no MPToken, fail - if (!sleToken && authType == MPTAuthType::StrongAuth) + if (!sleToken && + (authType == AuthType::StrongAuth || authType == AuthType::Legacy)) return tecNO_AUTH; // Note, this check is not amendment-gated because DomainID will be always From 921aef9934fba3a947fe3d6d38de756abd3a6adc Mon Sep 17 00:00:00 2001 From: Bart Date: Fri, 25 Jul 2025 11:54:02 -0400 Subject: [PATCH 096/244] Updates Conan dependencies: Boost 1.86 (#5264) --- BUILD.md | 57 +-------------------------- conan/profiles/default | 3 -- conanfile.py | 2 +- external/soci/conanfile.py | 4 +- include/xrpl/basics/Log.h | 1 + src/libxrpl/basics/FileUtilities.cpp | 6 ++- src/test/jtx/TrustedPublisherServer.h | 13 +++--- src/test/server/ServerStatus_test.cpp | 2 +- src/test/unit_test/FileDirGuard.h | 2 + src/xrpld/app/main/Application.cpp | 2 +- src/xrpld/app/main/Main.cpp | 2 +- 11 files changed, 21 insertions(+), 73 deletions(-) diff --git a/BUILD.md b/BUILD.md index 8e2d5ffb2b..072e38af93 100644 --- a/BUILD.md +++ b/BUILD.md @@ -370,18 +370,13 @@ and can be helpful for detecting `#include` omissions. ## Troubleshooting - ### Conan After any updates or changes to dependencies, you may need to do the following: 1. Remove your build directory. -2. Remove the Conan cache: - ``` - rm -rf ~/.conan/data - ``` -4. Re-run [conan install](#build-and-test). - +2. Remove the Conan cache: `conan remove "*" -c` +3. Re-run [conan install](#build-and-test). ### 'protobuf/port_def.inc' file not found @@ -399,54 +394,6 @@ For example, if you want to build Debug: 1. For conan install, pass `--settings build_type=Debug` 2. For cmake, pass `-DCMAKE_BUILD_TYPE=Debug` - -### no std::result_of - -If your compiler version is recent enough to have removed `std::result_of` as -part of C++20, e.g. Apple Clang 15.0, then you might need to add a preprocessor -definition to your build. - -``` -conan profile update 'options.boost:extra_b2_flags="define=BOOST_ASIO_HAS_STD_INVOKE_RESULT"' default -conan profile update 'env.CFLAGS="-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"' default -conan profile update 'env.CXXFLAGS="-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"' default -conan profile update 'conf.tools.build:cflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"]' default -conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"]' default -``` - - -### call to 'async_teardown' is ambiguous - -If you are compiling with an early version of Clang 16, then you might hit -a [regression][6] when compiling C++20 that manifests as an [error in a Boost -header][7]. You can workaround it by adding this preprocessor definition: - -``` -conan profile update 'env.CXXFLAGS="-DBOOST_ASIO_DISABLE_CONCEPTS"' default -conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_ASIO_DISABLE_CONCEPTS"]' default -``` - - -### recompile with -fPIC - -If you get a linker error suggesting that you recompile Boost with -position-independent code, such as: - -``` -/usr/bin/ld.gold: error: /home/username/.conan/data/boost/1.77.0/_/_/package/.../lib/libboost_container.a(alloc_lib.o): - requires unsupported dynamic reloc 11; recompile with -fPIC -``` - -Conan most likely downloaded a bad binary distribution of the dependency. -This seems to be a [bug][1] in Conan just for Boost 1.77.0 compiled with GCC -for Linux. The solution is to build the dependency locally by passing -`--build boost` when calling `conan install`. - -``` -conan install --build boost ... -``` - - ## Add a Dependency If you want to experiment with a new package, follow these steps: diff --git a/conan/profiles/default b/conan/profiles/default index 0417704f8a..3a7bcda1c6 100644 --- a/conan/profiles/default +++ b/conan/profiles/default @@ -26,9 +26,6 @@ tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw'] {% if compiler == "apple-clang" and compiler_version >= 17 %} tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw'] {% endif %} -{% if compiler == "clang" and compiler_version == 16 %} -tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS'] -{% endif %} {% if compiler == "gcc" and compiler_version < 13 %} tools.build:cxxflags=['-Wno-restrict'] {% endif %} diff --git a/conanfile.py b/conanfile.py index bad4dfd111..399c9d6e1f 100644 --- a/conanfile.py +++ b/conanfile.py @@ -104,7 +104,7 @@ class Xrpl(ConanFile): def requirements(self): # Conan 2 requires transitive headers to be specified transitive_headers_opt = {'transitive_headers': True} if conan_version.split('.')[0] == '2' else {} - self.requires('boost/1.83.0', force=True, **transitive_headers_opt) + self.requires('boost/1.86.0', force=True, **transitive_headers_opt) self.requires('date/3.0.4', **transitive_headers_opt) self.requires('lz4/1.10.0', force=True) self.requires('protobuf/3.21.12', force=True) diff --git a/external/soci/conanfile.py b/external/soci/conanfile.py index 7e611493d7..fe4c54e53e 100644 --- a/external/soci/conanfile.py +++ b/external/soci/conanfile.py @@ -70,7 +70,7 @@ class SociConan(ConanFile): if self.options.with_postgresql: self.requires("libpq/15.5") if self.options.with_boost: - self.requires("boost/1.83.0") + self.requires("boost/1.86.0") @property def _minimum_compilers_version(self): @@ -154,7 +154,7 @@ class SociConan(ConanFile): self.cpp_info.components["soci_core"].set_property("cmake_target_name", "SOCI::soci_core{}".format(target_suffix)) self.cpp_info.components["soci_core"].libs = ["{}soci_core{}".format(lib_prefix, lib_suffix)] if self.options.with_boost: - self.cpp_info.components["soci_core"].requires.append("boost::boost") + self.cpp_info.components["soci_core"].requires.append("boost::headers") # soci_empty if self.options.empty: diff --git a/include/xrpl/basics/Log.h b/include/xrpl/basics/Log.h index 2506b8ea8d..833907eb9c 100644 --- a/include/xrpl/basics/Log.h +++ b/include/xrpl/basics/Log.h @@ -26,6 +26,7 @@ #include #include +#include #include #include #include diff --git a/src/libxrpl/basics/FileUtilities.cpp b/src/libxrpl/basics/FileUtilities.cpp index 291eb43c7b..ffb9792614 100644 --- a/src/libxrpl/basics/FileUtilities.cpp +++ b/src/libxrpl/basics/FileUtilities.cpp @@ -28,6 +28,7 @@ #include #include +#include #include #include #include @@ -55,7 +56,7 @@ getFileContents( return {}; } - ifstream fileStream(fullPath, std::ios::in); + std::ifstream fileStream(fullPath.string(), std::ios::in); if (!fileStream) { @@ -85,7 +86,8 @@ writeFileContents( using namespace boost::filesystem; using namespace boost::system::errc; - ofstream fileStream(destPath, std::ios::out | std::ios::trunc); + std::ofstream fileStream( + destPath.string(), std::ios::out | std::ios::trunc); if (!fileStream) { diff --git a/src/test/jtx/TrustedPublisherServer.h b/src/test/jtx/TrustedPublisherServer.h index 54538032f5..7bc092cbe3 100644 --- a/src/test/jtx/TrustedPublisherServer.h +++ b/src/test/jtx/TrustedPublisherServer.h @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -220,9 +221,8 @@ public: getList_ = [blob = blob, sig, manifest, version](int interval) { // Build the contents of a version 1 format UNL file std::stringstream l; - l << "{\"blob\":\"" << blob << "\"" - << ",\"signature\":\"" << sig << "\"" - << ",\"manifest\":\"" << manifest << "\"" + l << "{\"blob\":\"" << blob << "\"" << ",\"signature\":\"" << sig + << "\"" << ",\"manifest\":\"" << manifest << "\"" << ",\"refresh_interval\": " << interval << ",\"version\":" << version << '}'; return l.str(); @@ -257,15 +257,14 @@ public: std::stringstream l; for (auto const& info : blobInfo) { - l << "{\"blob\":\"" << info.blob << "\"" - << ",\"signature\":\"" << info.signature << "\"},"; + l << "{\"blob\":\"" << info.blob << "\"" << ",\"signature\":\"" + << info.signature << "\"},"; } std::string blobs = l.str(); blobs.pop_back(); l.str(std::string()); l << "{\"blobs_v2\": [ " << blobs << "],\"manifest\":\"" << manifest - << "\"" - << ",\"refresh_interval\": " << interval + << "\"" << ",\"refresh_interval\": " << interval << ",\"version\":" << (version + 1) << '}'; return l.str(); }; diff --git a/src/test/server/ServerStatus_test.cpp b/src/test/server/ServerStatus_test.cpp index bcd355e301..b27dee6e0a 100644 --- a/src/test/server/ServerStatus_test.cpp +++ b/src/test/server/ServerStatus_test.cpp @@ -681,7 +681,7 @@ class ServerStatus_test : public beast::unit_test::suite, resp["Upgrade"] == "websocket"); BEAST_EXPECT( resp.find("Connection") != resp.end() && - resp["Connection"] == "upgrade"); + resp["Connection"] == "Upgrade"); } void diff --git a/src/test/unit_test/FileDirGuard.h b/src/test/unit_test/FileDirGuard.h index d247ae3015..091bc80d20 100644 --- a/src/test/unit_test/FileDirGuard.h +++ b/src/test/unit_test/FileDirGuard.h @@ -26,6 +26,8 @@ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. #include +#include + namespace ripple { namespace test { namespace detail { diff --git a/src/xrpld/app/main/Application.cpp b/src/xrpld/app/main/Application.cpp index ea0b794116..c824eccfba 100644 --- a/src/xrpld/app/main/Application.cpp +++ b/src/xrpld/app/main/Application.cpp @@ -79,7 +79,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/xrpld/app/main/Main.cpp b/src/xrpld/app/main/Main.cpp index e926a38563..19c8c9910d 100644 --- a/src/xrpld/app/main/Main.cpp +++ b/src/xrpld/app/main/Main.cpp @@ -39,7 +39,7 @@ #include #include -#include +#include #include #include From 7179ce9c58c395dc6c44b1cf15b89b8b2114e889 Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Fri, 25 Jul 2025 20:48:22 +0100 Subject: [PATCH 097/244] Build options cleanup (#5581) As we no longer support old compiler versions, we are bringing back some warnings by removing no longer relevant `-Wno-...` options. --- cmake/RippledCompiler.cmake | 23 ++------- external/ed25519-donna/CMakeLists.txt | 3 ++ .../detail/aged_unordered_container.h | 1 - src/libxrpl/protocol/PublicKey.cpp | 5 +- src/libxrpl/protocol/STTx.cpp | 4 +- src/libxrpl/protocol/tokens.cpp | 4 +- src/test/app/RCLValidations_test.cpp | 1 - .../beast/aged_associative_container_test.cpp | 48 ------------------- src/test/consensus/LedgerTrie_test.cpp | 2 - src/test/consensus/Validations_test.cpp | 1 - src/xrpld/rpc/handlers/LogLevel.cpp | 1 - 11 files changed, 15 insertions(+), 78 deletions(-) diff --git a/cmake/RippledCompiler.cmake b/cmake/RippledCompiler.cmake index 7485605d95..30058fd503 100644 --- a/cmake/RippledCompiler.cmake +++ b/cmake/RippledCompiler.cmake @@ -90,28 +90,15 @@ if (MSVC) -errorreport:none -machine:X64) else () - # HACK : because these need to come first, before any warning demotion - string (APPEND CMAKE_CXX_FLAGS " -Wall -Wdeprecated") - if (wextra) - string (APPEND CMAKE_CXX_FLAGS " -Wextra -Wno-unused-parameter") - endif () - # not MSVC target_compile_options (common INTERFACE + -Wall + -Wdeprecated + $<$:-Wextra -Wno-unused-parameter> $<$:-Werror> - $<$: - -frtti - -Wnon-virtual-dtor - > - -Wno-sign-compare - -Wno-char-subscripts - -Wno-format - -Wno-unused-local-typedefs -fstack-protector - $<$: - -Wno-unused-but-set-variable - -Wno-deprecated - > + -Wno-sign-compare + -Wno-unused-but-set-variable $<$>:-fno-strict-aliasing> # tweak gcc optimization for debug $<$,$>:-O0> diff --git a/external/ed25519-donna/CMakeLists.txt b/external/ed25519-donna/CMakeLists.txt index 418dc38326..f060d530aa 100644 --- a/external/ed25519-donna/CMakeLists.txt +++ b/external/ed25519-donna/CMakeLists.txt @@ -17,6 +17,9 @@ add_library(ed25519 STATIC ) add_library(ed25519::ed25519 ALIAS ed25519) target_link_libraries(ed25519 PUBLIC OpenSSL::SSL) +if(NOT MSVC) + target_compile_options(ed25519 PRIVATE -Wno-implicit-fallthrough) +endif() include(GNUInstallDirs) diff --git a/include/xrpl/beast/container/detail/aged_unordered_container.h b/include/xrpl/beast/container/detail/aged_unordered_container.h index 3b9c83a014..23200ae007 100644 --- a/include/xrpl/beast/container/detail/aged_unordered_container.h +++ b/include/xrpl/beast/container/detail/aged_unordered_container.h @@ -3257,7 +3257,6 @@ operator==(aged_unordered_container< { if (size() != other.size()) return false; - using EqRng = std::pair; for (auto iter(cbegin()), last(cend()); iter != last;) { auto const& k(extract(*iter)); diff --git a/src/libxrpl/protocol/PublicKey.cpp b/src/libxrpl/protocol/PublicKey.cpp index cdf646e0f8..54b50c80ef 100644 --- a/src/libxrpl/protocol/PublicKey.cpp +++ b/src/libxrpl/protocol/PublicKey.cpp @@ -107,8 +107,9 @@ sliceToHex(Slice const& slice) } for (int i = 0; i < slice.size(); ++i) { - s += "0123456789ABCDEF"[((slice[i] & 0xf0) >> 4)]; - s += "0123456789ABCDEF"[((slice[i] & 0x0f) >> 0)]; + constexpr char hex[] = "0123456789ABCDEF"; + s += hex[((slice[i] & 0xf0) >> 4)]; + s += hex[((slice[i] & 0x0f) >> 0)]; } return s; } diff --git a/src/libxrpl/protocol/STTx.cpp b/src/libxrpl/protocol/STTx.cpp index 615012dba4..8be8f906a5 100644 --- a/src/libxrpl/protocol/STTx.cpp +++ b/src/libxrpl/protocol/STTx.cpp @@ -671,12 +671,12 @@ isMemoOkay(STObject const& st, std::string& reason) "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "abcdefghijklmnopqrstuvwxyz"); - for (char c : symbols) + for (unsigned char c : symbols) a[c] = 1; return a; }(); - for (auto c : *optData) + for (unsigned char c : *optData) { if (!allowedSymbols[c]) { diff --git a/src/libxrpl/protocol/tokens.cpp b/src/libxrpl/protocol/tokens.cpp index a822b1937f..52cffd7a5c 100644 --- a/src/libxrpl/protocol/tokens.cpp +++ b/src/libxrpl/protocol/tokens.cpp @@ -544,7 +544,7 @@ b58_to_b256_be(std::string_view input, std::span out) XRPL_ASSERT( num_b_58_10_coeffs <= b_58_10_coeff.size(), "ripple::b58_fast::detail::b58_to_b256_be : maximum coeff"); - for (auto c : input.substr(0, partial_coeff_len)) + for (unsigned char c : input.substr(0, partial_coeff_len)) { auto cur_val = ::ripple::alphabetReverse[c]; if (cur_val < 0) @@ -558,7 +558,7 @@ b58_to_b256_be(std::string_view input, std::span out) { for (int j = 0; j < num_full_coeffs; ++j) { - auto c = input[partial_coeff_len + j * 10 + i]; + unsigned char c = input[partial_coeff_len + j * 10 + i]; auto cur_val = ::ripple::alphabetReverse[c]; if (cur_val < 0) { diff --git a/src/test/app/RCLValidations_test.cpp b/src/test/app/RCLValidations_test.cpp index 31c38f23b1..fce4e94048 100644 --- a/src/test/app/RCLValidations_test.cpp +++ b/src/test/app/RCLValidations_test.cpp @@ -229,7 +229,6 @@ class RCLValidations_test : public beast::unit_test::suite // support for a ledger hash which is already in the trie. using Seq = RCLValidatedLedger::Seq; - using ID = RCLValidatedLedger::ID; // Max known ancestors for each ledger Seq const maxAncestors = 256; diff --git a/src/test/beast/aged_associative_container_test.cpp b/src/test/beast/aged_associative_container_test.cpp index f88d5acc27..586f486872 100644 --- a/src/test/beast/aged_associative_container_test.cpp +++ b/src/test/beast/aged_associative_container_test.cpp @@ -703,10 +703,6 @@ aged_associative_container_test_base::checkContentsRefRef( Values const& v) { using Cont = typename std::remove_reference::type; - using Traits = TestTraits< - Cont::is_unordered::value, - Cont::is_multi::value, - Cont::is_map::value>; using size_type = typename Cont::size_type; BEAST_EXPECT(c.size() == v.size()); @@ -761,10 +757,6 @@ typename std::enable_if::type aged_associative_container_test_base::testConstructEmpty() { using Traits = TestTraits; - using Value = typename Traits::Value; - using Key = typename Traits::Key; - using T = typename Traits::T; - using Clock = typename Traits::Clock; using Comp = typename Traits::Comp; using Alloc = typename Traits::Alloc; using MyComp = typename Traits::MyComp; @@ -802,10 +794,6 @@ typename std::enable_if::type aged_associative_container_test_base::testConstructEmpty() { using Traits = TestTraits; - using Value = typename Traits::Value; - using Key = typename Traits::Key; - using T = typename Traits::T; - using Clock = typename Traits::Clock; using Hash = typename Traits::Hash; using Equal = typename Traits::Equal; using Alloc = typename Traits::Alloc; @@ -870,10 +858,6 @@ typename std::enable_if::type aged_associative_container_test_base::testConstructRange() { using Traits = TestTraits; - using Value = typename Traits::Value; - using Key = typename Traits::Key; - using T = typename Traits::T; - using Clock = typename Traits::Clock; using Comp = typename Traits::Comp; using Alloc = typename Traits::Alloc; using MyComp = typename Traits::MyComp; @@ -925,10 +909,6 @@ typename std::enable_if::type aged_associative_container_test_base::testConstructRange() { using Traits = TestTraits; - using Value = typename Traits::Value; - using Key = typename Traits::Key; - using T = typename Traits::T; - using Clock = typename Traits::Clock; using Hash = typename Traits::Hash; using Equal = typename Traits::Equal; using Alloc = typename Traits::Alloc; @@ -996,14 +976,6 @@ typename std::enable_if::type aged_associative_container_test_base::testConstructInitList() { using Traits = TestTraits; - using Value = typename Traits::Value; - using Key = typename Traits::Key; - using T = typename Traits::T; - using Clock = typename Traits::Clock; - using Comp = typename Traits::Comp; - using Alloc = typename Traits::Alloc; - using MyComp = typename Traits::MyComp; - using MyAlloc = typename Traits::MyAlloc; typename Traits::ManualClock clock; // testcase (Traits::name() + " init-list"); @@ -1020,16 +992,6 @@ typename std::enable_if::type aged_associative_container_test_base::testConstructInitList() { using Traits = TestTraits; - using Value = typename Traits::Value; - using Key = typename Traits::Key; - using T = typename Traits::T; - using Clock = typename Traits::Clock; - using Hash = typename Traits::Hash; - using Equal = typename Traits::Equal; - using Alloc = typename Traits::Alloc; - using MyHash = typename Traits::MyHash; - using MyEqual = typename Traits::MyEqual; - using MyAlloc = typename Traits::MyAlloc; typename Traits::ManualClock clock; // testcase (Traits::name() + " init-list"); @@ -1050,7 +1012,6 @@ void aged_associative_container_test_base::testCopyMove() { using Traits = TestTraits; - using Value = typename Traits::Value; using Alloc = typename Traits::Alloc; typename Traits::ManualClock clock; auto const v(Traits::values()); @@ -1121,8 +1082,6 @@ void aged_associative_container_test_base::testIterator() { using Traits = TestTraits; - using Value = typename Traits::Value; - using Alloc = typename Traits::Alloc; typename Traits::ManualClock clock; auto const v(Traits::values()); @@ -1179,8 +1138,6 @@ typename std::enable_if::type aged_associative_container_test_base::testReverseIterator() { using Traits = TestTraits; - using Value = typename Traits::Value; - using Alloc = typename Traits::Alloc; typename Traits::ManualClock clock; auto const v(Traits::values()); @@ -1190,7 +1147,6 @@ aged_associative_container_test_base::testReverseIterator() typename Traits::template Cont<> c{clock}; using iterator = decltype(c.begin()); - using const_iterator = decltype(c.cbegin()); using reverse_iterator = decltype(c.rbegin()); using const_reverse_iterator = decltype(c.crbegin()); @@ -1394,7 +1350,6 @@ void aged_associative_container_test_base::testChronological() { using Traits = TestTraits; - using Value = typename Traits::Value; typename Traits::ManualClock clock; auto const v(Traits::values()); @@ -1760,7 +1715,6 @@ typename std::enable_if::type aged_associative_container_test_base::testCompare() { using Traits = TestTraits; - using Value = typename Traits::Value; typename Traits::ManualClock clock; auto const v(Traits::values()); @@ -1832,8 +1786,6 @@ template void aged_associative_container_test_base::testMaybeUnorderedMultiMap() { - using Traits = TestTraits; - testConstructEmpty(); testConstructRange(); testConstructInitList(); diff --git a/src/test/consensus/LedgerTrie_test.cpp b/src/test/consensus/LedgerTrie_test.cpp index f46fea8e6e..6ed45777f0 100644 --- a/src/test/consensus/LedgerTrie_test.cpp +++ b/src/test/consensus/LedgerTrie_test.cpp @@ -313,7 +313,6 @@ class LedgerTrie_test : public beast::unit_test::suite testSupport() { using namespace csf; - using Seq = Ledger::Seq; LedgerTrie t; LedgerHistoryHelper h; @@ -596,7 +595,6 @@ class LedgerTrie_test : public beast::unit_test::suite testRootRelated() { using namespace csf; - using Seq = Ledger::Seq; // Since the root is a special node that breaks the no-single child // invariant, do some tests that exercise it. diff --git a/src/test/consensus/Validations_test.cpp b/src/test/consensus/Validations_test.cpp index 4424d7619d..a04e62b723 100644 --- a/src/test/consensus/Validations_test.cpp +++ b/src/test/consensus/Validations_test.cpp @@ -805,7 +805,6 @@ class Validations_test : public beast::unit_test::suite Ledger ledgerACD = h["acd"]; using Seq = Ledger::Seq; - using ID = Ledger::ID; auto pref = [](Ledger ledger) { return std::make_pair(ledger.seq(), ledger.id()); diff --git a/src/xrpld/rpc/handlers/LogLevel.cpp b/src/xrpld/rpc/handlers/LogLevel.cpp index 0fc266569e..a93d010706 100644 --- a/src/xrpld/rpc/handlers/LogLevel.cpp +++ b/src/xrpld/rpc/handlers/LogLevel.cpp @@ -44,7 +44,6 @@ doLogLevel(RPC::JsonContext& context) Logs::toString(Logs::fromSeverity(context.app.logs().threshold())); std::vector> logTable( context.app.logs().partition_severities()); - using stringPair = std::map::value_type; for (auto const& [k, v] : logTable) lev[k] = v; From 9b45b6888b9bfc4ca81d0fb71e0b6f2f3a120bae Mon Sep 17 00:00:00 2001 From: Ayaz Salikhov Date: Tue, 29 Jul 2025 16:29:38 +0100 Subject: [PATCH 098/244] ci: Build all conan dependencies from source for now (#5623) --- .github/actions/dependencies/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/dependencies/action.yml b/.github/actions/dependencies/action.yml index 7ece9710a8..c5c6a59ab9 100644 --- a/.github/actions/dependencies/action.yml +++ b/.github/actions/dependencies/action.yml @@ -43,7 +43,7 @@ runs: cd ${build_dir} conan install \ --output-folder . \ - --build missing \ + --build '*' \ --options:host "&:tests=True" \ --options:host "&:xrpld=True" \ --settings:all build_type=${{ inputs.configuration }} \ From baf4b8381f32d00e05d4e5bc0598d14ab80c00ac Mon Sep 17 00:00:00 2001 From: Shawn Xie <35279399+shawnxie999@users.noreply.github.com> Date: Tue, 29 Jul 2025 13:02:33 -0400 Subject: [PATCH 099/244] fix `DeliveredAmount` and `delivered_amount` in transaction metadata for direct MPT transfer (#5569) The Payment transaction metadata is missing the `DeliveredAmount` field that displays the actual amount delivered to the destination excluding transfer fees. This amendment fixes this problem. --- include/xrpl/protocol/detail/features.macro | 1 + src/test/rpc/DeliveredAmount_test.cpp | 84 +++++++++++++++++++++ src/xrpld/app/tx/detail/Payment.cpp | 9 +++ 3 files changed, 94 insertions(+) diff --git a/include/xrpl/protocol/detail/features.macro b/include/xrpl/protocol/detail/features.macro index c83dacfa73..e36a466971 100644 --- a/include/xrpl/protocol/detail/features.macro +++ b/include/xrpl/protocol/detail/features.macro @@ -32,6 +32,7 @@ // If you add an amendment here, then do not forget to increment `numFeatures` // in include/xrpl/protocol/Feature.h. +XRPL_FIX (MPTDeliveredAmount, Supported::no, VoteBehavior::DefaultNo) XRPL_FIX (AMMClawbackRounding, Supported::no, VoteBehavior::DefaultNo) XRPL_FEATURE(TokenEscrow, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (EnforceNFTokenTrustlineV2, Supported::yes, VoteBehavior::DefaultNo) diff --git a/src/test/rpc/DeliveredAmount_test.cpp b/src/test/rpc/DeliveredAmount_test.cpp index 17763790e8..d084f92f25 100644 --- a/src/test/rpc/DeliveredAmount_test.cpp +++ b/src/test/rpc/DeliveredAmount_test.cpp @@ -21,6 +21,7 @@ #include #include +#include #include namespace ripple { @@ -329,12 +330,95 @@ class DeliveredAmount_test : public beast::unit_test::suite } } + void + testMPTDeliveredAmountRPC(FeatureBitset features) + { + testcase("MPT DeliveredAmount"); + + using namespace jtx; + Account const alice("alice"); + Account const carol("carol"); + Account const bob("bob"); + Env env{*this, features}; + + MPTTester mptAlice( + env, alice, {.holders = {bob, carol}, .close = false}); + + mptAlice.create( + {.transferFee = 25000, + .ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanTransfer}); + auto const MPT = mptAlice["MPT"]; + + mptAlice.authorize({.account = bob}); + mptAlice.authorize({.account = carol}); + + // issuer to holder + mptAlice.pay(alice, bob, 10000); + + // holder to holder + env(pay(bob, carol, mptAlice.mpt(1000)), txflags(tfPartialPayment)); + env.close(); + + // Get the hash for the most recent transaction. + std::string txHash{ + env.tx()->getJson(JsonOptions::none)[jss::hash].asString()}; + Json::Value meta = env.rpc("tx", txHash)[jss::result][jss::meta]; + + if (features[fixMPTDeliveredAmount]) + { + BEAST_EXPECT( + meta[sfDeliveredAmount.jsonName] == + STAmount{MPT(800)}.getJson(JsonOptions::none)); + BEAST_EXPECT( + meta[jss::delivered_amount] == + STAmount{MPT(800)}.getJson(JsonOptions::none)); + } + else + { + BEAST_EXPECT(!meta.isMember(sfDeliveredAmount.jsonName)); + BEAST_EXPECT( + meta[jss::delivered_amount] = Json::Value("unavailable")); + } + + env(pay(bob, carol, MPT(1000)), + sendmax(MPT(1200)), + txflags(tfPartialPayment)); + env.close(); + + txHash = env.tx()->getJson(JsonOptions::none)[jss::hash].asString(); + meta = env.rpc("tx", txHash)[jss::result][jss::meta]; + + if (features[fixMPTDeliveredAmount]) + { + BEAST_EXPECT( + meta[sfDeliveredAmount.jsonName] == + STAmount{MPT(960)}.getJson(JsonOptions::none)); + BEAST_EXPECT( + meta[jss::delivered_amount] == + STAmount{MPT(960)}.getJson(JsonOptions::none)); + } + else + { + BEAST_EXPECT(!meta.isMember(sfDeliveredAmount.jsonName)); + BEAST_EXPECT( + meta[jss::delivered_amount] = Json::Value("unavailable")); + } + } + public: void run() override { + using namespace test::jtx; + FeatureBitset const all{testable_amendments()}; + testTxDeliveredAmountRPC(); testAccountDeliveredAmountSubscribe(); + + testMPTDeliveredAmountRPC(all - fixMPTDeliveredAmount); + testMPTDeliveredAmountRPC(all); } }; diff --git a/src/xrpld/app/tx/detail/Payment.cpp b/src/xrpld/app/tx/detail/Payment.cpp index 692e03109e..386b170ed1 100644 --- a/src/xrpld/app/tx/detail/Payment.cpp +++ b/src/xrpld/app/tx/detail/Payment.cpp @@ -580,7 +580,16 @@ Payment::doApply() auto res = accountSend( pv, account_, dstAccountID, amountDeliver, ctx_.journal); if (res == tesSUCCESS) + { pv.apply(ctx_.rawView()); + + // If the actual amount delivered is different from the original + // amount due to partial payment or transfer fee, we need to update + // DelieveredAmount using the actual delivered amount + if (view().rules().enabled(fixMPTDeliveredAmount) && + amountDeliver != dstAmount) + ctx_.deliver(amountDeliver); + } else if (res == tecINSUFFICIENT_FUNDS || res == tecPATH_DRY) res = tecPATH_PARTIAL; From d835e974905474d6b7d90be9f846c4cb2abe180b Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Thu, 31 Jul 2025 18:08:34 +0100 Subject: [PATCH 100/244] Fix crash in Slot::deletePeer (#5635) Fix crash due to recurrent call to `Slot::deletePeer` (via `OverlayImpl::unsquelch`) when a peer is disconnected at just the wrong moment. --- src/xrpld/overlay/Slot.h | 8 +++++++- src/xrpld/overlay/detail/OverlayImpl.cpp | 16 ++++++++++++---- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/src/xrpld/overlay/Slot.h b/src/xrpld/overlay/Slot.h index 0956eb06f7..ea9fc3285b 100644 --- a/src/xrpld/overlay/Slot.h +++ b/src/xrpld/overlay/Slot.h @@ -446,6 +446,8 @@ Slot::deletePeer(PublicKey const& validator, id_t id, bool erase) auto it = peers_.find(id); if (it != peers_.end()) { + std::vector toUnsquelch; + JLOG(journal_.trace()) << "deletePeer: " << Slice(validator) << " " << id << " selected " << (it->second.state == PeerState::Selected) << " considered " @@ -457,7 +459,7 @@ Slot::deletePeer(PublicKey const& validator, id_t id, bool erase) for (auto& [k, v] : peers_) { if (v.state == PeerState::Squelched) - handler_.unsquelch(validator, k); + toUnsquelch.push_back(k); v.state = PeerState::Counting; v.count = 0; v.expire = now; @@ -479,6 +481,10 @@ Slot::deletePeer(PublicKey const& validator, id_t id, bool erase) if (erase) peers_.erase(it); + + // Must be after peers_.erase(it) + for (auto const& k : toUnsquelch) + handler_.unsquelch(validator, k); } } diff --git a/src/xrpld/overlay/detail/OverlayImpl.cpp b/src/xrpld/overlay/detail/OverlayImpl.cpp index 3cc5b2a024..53b4cad646 100644 --- a/src/xrpld/overlay/detail/OverlayImpl.cpp +++ b/src/xrpld/overlay/detail/OverlayImpl.cpp @@ -1423,7 +1423,12 @@ OverlayImpl::updateSlotAndSquelch( if (!strand_.running_in_this_thread()) return post( strand_, - [this, key, validator, peers = std::move(peers), type]() mutable { + // Must capture copies of reference parameters (i.e. key, validator) + [this, + key = key, + validator = validator, + peers = std::move(peers), + type]() mutable { updateSlotAndSquelch(key, validator, std::move(peers), type); }); @@ -1444,9 +1449,12 @@ OverlayImpl::updateSlotAndSquelch( return; if (!strand_.running_in_this_thread()) - return post(strand_, [this, key, validator, peer, type]() { - updateSlotAndSquelch(key, validator, peer, type); - }); + return post( + strand_, + // Must capture copies of reference parameters (i.e. key, validator) + [this, key = key, validator = validator, peer, type]() { + updateSlotAndSquelch(key, validator, peer, type); + }); slots_.updateSlotAndSquelch(key, validator, peer, type, [&]() { reportInboundTraffic(TrafficCount::squelch_ignored, 0); From 31c99caa65bfe8beaf6137b69c11a1fab40cae61 Mon Sep 17 00:00:00 2001 From: Ayaz Salikhov Date: Thu, 31 Jul 2025 19:01:43 +0100 Subject: [PATCH 101/244] Revert "ci: Build all conan dependencies from source for now (#5623)" (#5639) This reverts commit 9b45b6888b9bfc4ca81d0fb71e0b6f2f3a120bae. --- .github/actions/dependencies/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/dependencies/action.yml b/.github/actions/dependencies/action.yml index c5c6a59ab9..7ece9710a8 100644 --- a/.github/actions/dependencies/action.yml +++ b/.github/actions/dependencies/action.yml @@ -43,7 +43,7 @@ runs: cd ${build_dir} conan install \ --output-folder . \ - --build '*' \ + --build missing \ --options:host "&:tests=True" \ --options:host "&:xrpld=True" \ --settings:all build_type=${{ inputs.configuration }} \ From 6419f9a253f0bcae72b863cf8a8de72415fc5b9c Mon Sep 17 00:00:00 2001 From: Denis Angell Date: Mon, 4 Aug 2025 16:54:54 +0200 Subject: [PATCH 102/244] docs: Set up developer environment with specific XCode version (#5645) --- docs/build/environment.md | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/docs/build/environment.md b/docs/build/environment.md index 760be144d8..40a80d4b25 100644 --- a/docs/build/environment.md +++ b/docs/build/environment.md @@ -53,6 +53,34 @@ minimum required (see [BUILD.md][]). clang --version ``` +### Install Xcode Specific Version (Optional) + +If you develop other applications using XCode you might be consistently updating to the newest version of Apple Clang. +This will likely cause issues building rippled. You may want to install a specific version of Xcode: + +1. **Download Xcode** + + - Visit [Apple Developer Downloads](https://developer.apple.com/download/more/) + - Sign in with your Apple Developer account + - Search for an Xcode version that includes **Apple Clang (Expected Version)** + - Download the `.xip` file + +2. **Install and Configure Xcode** + + ```bash + # Extract the .xip file and rename for version management + # Example: Xcode_16.2.app + + # Move to Applications directory + sudo mv Xcode_16.2.app /Applications/ + + # Set as default toolchain (persistent) + sudo xcode-select -s /Applications/Xcode_16.2.app/Contents/Developer + + # Set as environment variable (temporary) + export DEVELOPER_DIR=/Applications/Xcode_16.2.app/Contents/Developer + ``` + The command line developer tools should include Git too: ``` From b5a63b39d34b7fc819ebaab3c9085dd47251d248 Mon Sep 17 00:00:00 2001 From: Jingchen Date: Tue, 5 Aug 2025 16:28:56 +0100 Subject: [PATCH 103/244] refactor: Decouple ledger from xrpld/app (#5492) This change decouples `ledger` from `xrpld/app`, and therefore fully clears the path to the modularisation of the ledger component. Before this change, `View.cpp` relied on `MPTokenAuthorize::authorize; this change moves `MPTokenAuthorize::authorize` to `View.cpp` to invert the dependency, making ledger a standalone module. --- src/xrpld/app/tx/detail/MPTokenAuthorize.cpp | 120 +--------------- src/xrpld/app/tx/detail/MPTokenAuthorize.h | 6 - src/xrpld/app/tx/detail/VaultDeposit.cpp | 24 ++-- src/xrpld/ledger/View.h | 10 ++ src/xrpld/ledger/detail/View.cpp | 140 ++++++++++++++++--- 5 files changed, 148 insertions(+), 152 deletions(-) diff --git a/src/xrpld/app/tx/detail/MPTokenAuthorize.cpp b/src/xrpld/app/tx/detail/MPTokenAuthorize.cpp index 77b21b65f3..77fe19a287 100644 --- a/src/xrpld/app/tx/detail/MPTokenAuthorize.cpp +++ b/src/xrpld/app/tx/detail/MPTokenAuthorize.cpp @@ -175,126 +175,18 @@ MPTokenAuthorize::createMPToken( return tesSUCCESS; } -TER -MPTokenAuthorize::authorize( - ApplyView& view, - beast::Journal journal, - MPTAuthorizeArgs const& args) -{ - auto const sleAcct = view.peek(keylet::account(args.account)); - if (!sleAcct) - return tecINTERNAL; - - // If the account that submitted the tx is a holder - // Note: `account_` is holder's account - // `holderID` is NOT used - if (!args.holderID) - { - // When a holder wants to unauthorize/delete a MPT, the ledger must - // - delete mptokenKey from owner directory - // - delete the MPToken - if (args.flags & tfMPTUnauthorize) - { - auto const mptokenKey = - keylet::mptoken(args.mptIssuanceID, args.account); - auto const sleMpt = view.peek(mptokenKey); - if (!sleMpt || (*sleMpt)[sfMPTAmount] != 0) - return tecINTERNAL; // LCOV_EXCL_LINE - - if (!view.dirRemove( - keylet::ownerDir(args.account), - (*sleMpt)[sfOwnerNode], - sleMpt->key(), - false)) - return tecINTERNAL; // LCOV_EXCL_LINE - - adjustOwnerCount(view, sleAcct, -1, journal); - - view.erase(sleMpt); - return tesSUCCESS; - } - - // A potential holder wants to authorize/hold a mpt, the ledger must: - // - add the new mptokenKey to the owner directory - // - create the MPToken object for the holder - - // The reserve that is required to create the MPToken. Note - // that although the reserve increases with every item - // an account owns, in the case of MPTokens we only - // *enforce* a reserve if the user owns more than two - // items. This is similar to the reserve requirements of trust lines. - std::uint32_t const uOwnerCount = sleAcct->getFieldU32(sfOwnerCount); - XRPAmount const reserveCreate( - (uOwnerCount < 2) ? XRPAmount(beast::zero) - : view.fees().accountReserve(uOwnerCount + 1)); - - if (args.priorBalance < reserveCreate) - return tecINSUFFICIENT_RESERVE; - - auto const mptokenKey = - keylet::mptoken(args.mptIssuanceID, args.account); - auto mptoken = std::make_shared(mptokenKey); - if (auto ter = dirLink(view, args.account, mptoken)) - return ter; // LCOV_EXCL_LINE - - (*mptoken)[sfAccount] = args.account; - (*mptoken)[sfMPTokenIssuanceID] = args.mptIssuanceID; - (*mptoken)[sfFlags] = 0; - view.insert(mptoken); - - // Update owner count. - adjustOwnerCount(view, sleAcct, 1, journal); - - return tesSUCCESS; - } - - auto const sleMptIssuance = - view.read(keylet::mptIssuance(args.mptIssuanceID)); - if (!sleMptIssuance) - return tecINTERNAL; - - // If the account that submitted this tx is the issuer of the MPT - // Note: `account_` is issuer's account - // `holderID` is holder's account - if (args.account != (*sleMptIssuance)[sfIssuer]) - return tecINTERNAL; - - auto const sleMpt = - view.peek(keylet::mptoken(args.mptIssuanceID, *args.holderID)); - if (!sleMpt) - return tecINTERNAL; - - std::uint32_t const flagsIn = sleMpt->getFieldU32(sfFlags); - std::uint32_t flagsOut = flagsIn; - - // Issuer wants to unauthorize the holder, unset lsfMPTAuthorized on - // their MPToken - if (args.flags & tfMPTUnauthorize) - flagsOut &= ~lsfMPTAuthorized; - // Issuer wants to authorize a holder, set lsfMPTAuthorized on their - // MPToken - else - flagsOut |= lsfMPTAuthorized; - - if (flagsIn != flagsOut) - sleMpt->setFieldU32(sfFlags, flagsOut); - - view.update(sleMpt); - return tesSUCCESS; -} - TER MPTokenAuthorize::doApply() { auto const& tx = ctx_.tx; - return authorize( + return authorizeMPToken( ctx_.view(), + mPriorBalance, + tx[sfMPTokenIssuanceID], + account_, ctx_.journal, - {.priorBalance = mPriorBalance, - .mptIssuanceID = tx[sfMPTokenIssuanceID], - .account = account_, - .flags = tx.getFlags(), - .holderID = tx[~sfHolder]}); + tx.getFlags(), + tx[~sfHolder]); } } // namespace ripple diff --git a/src/xrpld/app/tx/detail/MPTokenAuthorize.h b/src/xrpld/app/tx/detail/MPTokenAuthorize.h index a81dc7dea2..85e8edcf9f 100644 --- a/src/xrpld/app/tx/detail/MPTokenAuthorize.h +++ b/src/xrpld/app/tx/detail/MPTokenAuthorize.h @@ -48,12 +48,6 @@ public: static TER preclaim(PreclaimContext const& ctx); - static TER - authorize( - ApplyView& view, - beast::Journal journal, - MPTAuthorizeArgs const& args); - static TER createMPToken( ApplyView& view, diff --git a/src/xrpld/app/tx/detail/VaultDeposit.cpp b/src/xrpld/app/tx/detail/VaultDeposit.cpp index 0efddb0ff7..db1fc3bbfe 100644 --- a/src/xrpld/app/tx/detail/VaultDeposit.cpp +++ b/src/xrpld/app/tx/detail/VaultDeposit.cpp @@ -210,12 +210,12 @@ VaultDeposit::doApply() auto sleMpt = view().read(keylet::mptoken(mptIssuanceID, account_)); if (!sleMpt) { - if (auto const err = MPTokenAuthorize::authorize( + if (auto const err = authorizeMPToken( view(), - ctx_.journal, - {.priorBalance = mPriorBalance, - .mptIssuanceID = mptIssuanceID->value(), - .account = account_}); + mPriorBalance, + mptIssuanceID->value(), + account_, + ctx_.journal); !isTesSuccess(err)) return err; } @@ -223,15 +223,15 @@ VaultDeposit::doApply() // If the vault is private, set the authorized flag for the vault owner if (vault->isFlag(tfVaultPrivate)) { - if (auto const err = MPTokenAuthorize::authorize( + if (auto const err = authorizeMPToken( view(), + mPriorBalance, // priorBalance + mptIssuanceID->value(), // mptIssuanceID + sleIssuance->at(sfIssuer), // account ctx_.journal, - { - .priorBalance = mPriorBalance, - .mptIssuanceID = mptIssuanceID->value(), - .account = sleIssuance->at(sfIssuer), - .holderID = account_, - }); + {}, // flags + account_ // holderID + ); !isTesSuccess(err)) return err; } diff --git a/src/xrpld/ledger/View.h b/src/xrpld/ledger/View.h index fc9360734d..07f6945dd4 100644 --- a/src/xrpld/ledger/View.h +++ b/src/xrpld/ledger/View.h @@ -600,6 +600,16 @@ addEmptyHolding( asset.value()); } +[[nodiscard]] TER +authorizeMPToken( + ApplyView& view, + XRPAmount const& priorBalance, + MPTID const& mptIssuanceID, + AccountID const& account, + beast::Journal journal, + std::uint32_t flags = 0, + std::optional holderID = std::nullopt); + // VFALCO NOTE Both STAmount parameters should just // be "Amount", a unit-less number. // diff --git a/src/xrpld/ledger/detail/View.cpp b/src/xrpld/ledger/detail/View.cpp index 1f616ed491..7c6e1d60f1 100644 --- a/src/xrpld/ledger/detail/View.cpp +++ b/src/xrpld/ledger/detail/View.cpp @@ -18,7 +18,6 @@ //============================================================================== #include -#include #include #include @@ -1215,12 +1214,115 @@ addEmptyHolding( if (view.peek(keylet::mptoken(mptID, accountID))) return tecDUPLICATE; - return MPTokenAuthorize::authorize( - view, - journal, - {.priorBalance = priorBalance, - .mptIssuanceID = mptID, - .account = accountID}); + return authorizeMPToken(view, priorBalance, mptID, accountID, journal); +} + +[[nodiscard]] TER +authorizeMPToken( + ApplyView& view, + XRPAmount const& priorBalance, + MPTID const& mptIssuanceID, + AccountID const& account, + beast::Journal journal, + std::uint32_t flags, + std::optional holderID) +{ + auto const sleAcct = view.peek(keylet::account(account)); + if (!sleAcct) + return tecINTERNAL; + + // If the account that submitted the tx is a holder + // Note: `account_` is holder's account + // `holderID` is NOT used + if (!holderID) + { + // When a holder wants to unauthorize/delete a MPT, the ledger must + // - delete mptokenKey from owner directory + // - delete the MPToken + if (flags & tfMPTUnauthorize) + { + auto const mptokenKey = keylet::mptoken(mptIssuanceID, account); + auto const sleMpt = view.peek(mptokenKey); + if (!sleMpt || (*sleMpt)[sfMPTAmount] != 0) + return tecINTERNAL; // LCOV_EXCL_LINE + + if (!view.dirRemove( + keylet::ownerDir(account), + (*sleMpt)[sfOwnerNode], + sleMpt->key(), + false)) + return tecINTERNAL; // LCOV_EXCL_LINE + + adjustOwnerCount(view, sleAcct, -1, journal); + + view.erase(sleMpt); + return tesSUCCESS; + } + + // A potential holder wants to authorize/hold a mpt, the ledger must: + // - add the new mptokenKey to the owner directory + // - create the MPToken object for the holder + + // The reserve that is required to create the MPToken. Note + // that although the reserve increases with every item + // an account owns, in the case of MPTokens we only + // *enforce* a reserve if the user owns more than two + // items. This is similar to the reserve requirements of trust lines. + std::uint32_t const uOwnerCount = sleAcct->getFieldU32(sfOwnerCount); + XRPAmount const reserveCreate( + (uOwnerCount < 2) ? XRPAmount(beast::zero) + : view.fees().accountReserve(uOwnerCount + 1)); + + if (priorBalance < reserveCreate) + return tecINSUFFICIENT_RESERVE; + + auto const mptokenKey = keylet::mptoken(mptIssuanceID, account); + auto mptoken = std::make_shared(mptokenKey); + if (auto ter = dirLink(view, account, mptoken)) + return ter; // LCOV_EXCL_LINE + + (*mptoken)[sfAccount] = account; + (*mptoken)[sfMPTokenIssuanceID] = mptIssuanceID; + (*mptoken)[sfFlags] = 0; + view.insert(mptoken); + + // Update owner count. + adjustOwnerCount(view, sleAcct, 1, journal); + + return tesSUCCESS; + } + + auto const sleMptIssuance = view.read(keylet::mptIssuance(mptIssuanceID)); + if (!sleMptIssuance) + return tecINTERNAL; + + // If the account that submitted this tx is the issuer of the MPT + // Note: `account_` is issuer's account + // `holderID` is holder's account + if (account != (*sleMptIssuance)[sfIssuer]) + return tecINTERNAL; + + auto const sleMpt = view.peek(keylet::mptoken(mptIssuanceID, *holderID)); + if (!sleMpt) + return tecINTERNAL; + + std::uint32_t const flagsIn = sleMpt->getFieldU32(sfFlags); + std::uint32_t flagsOut = flagsIn; + + // Issuer wants to unauthorize the holder, unset lsfMPTAuthorized on + // their MPToken + if (flags & tfMPTUnauthorize) + flagsOut &= ~lsfMPTAuthorized; + // Issuer wants to authorize a holder, set lsfMPTAuthorized on their + // MPToken + else + flagsOut |= lsfMPTAuthorized; + + if (flagsIn != flagsOut) + sleMpt->setFieldU32(sfFlags, flagsOut); + + view.update(sleMpt); + return tesSUCCESS; } TER @@ -1418,13 +1520,14 @@ removeEmptyHolding( if (mptoken->at(sfMPTAmount) != 0) return tecHAS_OBLIGATIONS; - return MPTokenAuthorize::authorize( + return authorizeMPToken( view, + {}, // priorBalance + mptID, + accountID, journal, - {.priorBalance = {}, - .mptIssuanceID = mptID, - .account = accountID, - .flags = tfMPTUnauthorize}); + tfMPTUnauthorize // flags + ); } TER @@ -2497,15 +2600,12 @@ enforceMPTokenAuthorization( XRPL_ASSERT( maybeDomainID.has_value() && sleToken == nullptr, "ripple::enforceMPTokenAuthorization : new MPToken for domain"); - if (auto const err = MPTokenAuthorize::authorize( + if (auto const err = authorizeMPToken( view, - j, - { - .priorBalance = priorBalance, - .mptIssuanceID = mptIssuanceID, - .account = account, - .flags = 0, - }); + priorBalance, // priorBalance + mptIssuanceID, // mptIssuanceID + account, // account + j); !isTesSuccess(err)) return err; From 4eae037fee99ebd70cc22a5637adb84f7123b118 Mon Sep 17 00:00:00 2001 From: tequ Date: Wed, 6 Aug 2025 02:08:59 +0900 Subject: [PATCH 104/244] fix: Ensures canonical order for `PriceDataSeries` upon `PriceOracle` creation (#5485) This change fixes an issue where the order of `PriceDataSeries` was out of sync between when `PriceOracle` was created and when it was updated. Although they are registered in the canonical order when updated, they are created using the order specified in the transaction; this change ensures that they are also registered in the canonical order when created. --- include/xrpl/protocol/detail/features.macro | 3 +- src/test/app/Oracle_test.cpp | 55 +++++++++++++++++++++ src/xrpld/app/tx/detail/SetOracle.cpp | 42 ++++++++++++---- 3 files changed, 89 insertions(+), 11 deletions(-) diff --git a/include/xrpl/protocol/detail/features.macro b/include/xrpl/protocol/detail/features.macro index e36a466971..e2725d1fc0 100644 --- a/include/xrpl/protocol/detail/features.macro +++ b/include/xrpl/protocol/detail/features.macro @@ -32,6 +32,7 @@ // If you add an amendment here, then do not forget to increment `numFeatures` // in include/xrpl/protocol/Feature.h. +XRPL_FIX (PriceOracleOrder, Supported::no, VoteBehavior::DefaultNo) XRPL_FIX (MPTDeliveredAmount, Supported::no, VoteBehavior::DefaultNo) XRPL_FIX (AMMClawbackRounding, Supported::no, VoteBehavior::DefaultNo) XRPL_FEATURE(TokenEscrow, Supported::yes, VoteBehavior::DefaultNo) @@ -39,7 +40,7 @@ XRPL_FIX (EnforceNFTokenTrustlineV2, Supported::yes, VoteBehavior::DefaultNo XRPL_FIX (AMMv1_3, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(PermissionedDEX, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(Batch, Supported::yes, VoteBehavior::DefaultNo) -XRPL_FEATURE(SingleAssetVault, Supported::no, VoteBehavior::DefaultNo) +XRPL_FEATURE(SingleAssetVault, Supported::no, VoteBehavior::DefaultNo) XRPL_FEATURE(PermissionDelegation, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (PayChanCancelAfter, Supported::yes, VoteBehavior::DefaultNo) // Check flags in Credential transactions diff --git a/src/test/app/Oracle_test.cpp b/src/test/app/Oracle_test.cpp index aaa7f9a746..fdd7ad941e 100644 --- a/src/test/app/Oracle_test.cpp +++ b/src/test/app/Oracle_test.cpp @@ -678,6 +678,61 @@ private: oracle.set( UpdateArg{.series = {{"XRP", "USD", 742, 2}}, .fee = baseFee}); } + + for (bool const withFixOrder : {false, true}) + { + // Should be same order as creation + Env env( + *this, + withFixOrder ? testable_amendments() + : testable_amendments() - fixPriceOracleOrder); + auto const baseFee = + static_cast(env.current()->fees().base.drops()); + + auto test = [&](Env& env, DataSeries const& series) { + env.fund(XRP(1'000), owner); + Oracle oracle( + env, {.owner = owner, .series = series, .fee = baseFee}); + BEAST_EXPECT(oracle.exists()); + auto sle = env.le(keylet::oracle(owner, oracle.documentID())); + BEAST_EXPECT( + sle->getFieldArray(sfPriceDataSeries).size() == + series.size()); + + auto const beforeQuoteAssetName1 = + sle->getFieldArray(sfPriceDataSeries)[0] + .getFieldCurrency(sfQuoteAsset) + .getText(); + auto const beforeQuoteAssetName2 = + sle->getFieldArray(sfPriceDataSeries)[1] + .getFieldCurrency(sfQuoteAsset) + .getText(); + + oracle.set(UpdateArg{.series = series, .fee = baseFee}); + sle = env.le(keylet::oracle(owner, oracle.documentID())); + + auto const afterQuoteAssetName1 = + sle->getFieldArray(sfPriceDataSeries)[0] + .getFieldCurrency(sfQuoteAsset) + .getText(); + auto const afterQuoteAssetName2 = + sle->getFieldArray(sfPriceDataSeries)[1] + .getFieldCurrency(sfQuoteAsset) + .getText(); + + if (env.current()->rules().enabled(fixPriceOracleOrder)) + { + BEAST_EXPECT(afterQuoteAssetName1 == beforeQuoteAssetName1); + BEAST_EXPECT(afterQuoteAssetName2 == beforeQuoteAssetName2); + } + else + { + BEAST_EXPECT(afterQuoteAssetName1 != beforeQuoteAssetName1); + BEAST_EXPECT(afterQuoteAssetName2 != beforeQuoteAssetName2); + } + }; + test(env, {{"XRP", "USD", 742, 2}, {"XRP", "EUR", 711, 2}}); + } } void diff --git a/src/xrpld/app/tx/detail/SetOracle.cpp b/src/xrpld/app/tx/detail/SetOracle.cpp index 8559c3e7b9..d598507cb7 100644 --- a/src/xrpld/app/tx/detail/SetOracle.cpp +++ b/src/xrpld/app/tx/detail/SetOracle.cpp @@ -209,6 +209,17 @@ SetOracle::doApply() { auto const oracleID = keylet::oracle(account_, ctx_.tx[sfOracleDocumentID]); + auto populatePriceData = [](STObject& priceData, STObject const& entry) { + setPriceDataInnerObjTemplate(priceData); + priceData.setFieldCurrency( + sfBaseAsset, entry.getFieldCurrency(sfBaseAsset)); + priceData.setFieldCurrency( + sfQuoteAsset, entry.getFieldCurrency(sfQuoteAsset)); + priceData.setFieldU64(sfAssetPrice, entry.getFieldU64(sfAssetPrice)); + if (entry.isFieldPresent(sfScale)) + priceData.setFieldU8(sfScale, entry.getFieldU8(sfScale)); + }; + if (auto sle = ctx_.view().peek(oracleID)) { // update @@ -249,15 +260,7 @@ SetOracle::doApply() { // add a token pair with the price STObject priceData{sfPriceData}; - setPriceDataInnerObjTemplate(priceData); - priceData.setFieldCurrency( - sfBaseAsset, entry.getFieldCurrency(sfBaseAsset)); - priceData.setFieldCurrency( - sfQuoteAsset, entry.getFieldCurrency(sfQuoteAsset)); - priceData.setFieldU64( - sfAssetPrice, entry.getFieldU64(sfAssetPrice)); - if (entry.isFieldPresent(sfScale)) - priceData.setFieldU8(sfScale, entry.getFieldU8(sfScale)); + populatePriceData(priceData, entry); pairs.emplace(key, std::move(priceData)); } } @@ -285,7 +288,26 @@ SetOracle::doApply() sle->setFieldVL(sfProvider, ctx_.tx[sfProvider]); if (ctx_.tx.isFieldPresent(sfURI)) sle->setFieldVL(sfURI, ctx_.tx[sfURI]); - auto const& series = ctx_.tx.getFieldArray(sfPriceDataSeries); + + STArray series; + if (!ctx_.view().rules().enabled(fixPriceOracleOrder)) + { + series = ctx_.tx.getFieldArray(sfPriceDataSeries); + } + else + { + std::map, STObject> pairs; + for (auto const& entry : ctx_.tx.getFieldArray(sfPriceDataSeries)) + { + auto const key = tokenPairKey(entry); + STObject priceData{sfPriceData}; + populatePriceData(priceData, entry); + pairs.emplace(key, std::move(priceData)); + } + for (auto const& iter : pairs) + series.push_back(std::move(iter.second)); + } + sle->setFieldArray(sfPriceDataSeries, series); sle->setFieldVL(sfAssetClass, ctx_.tx[sfAssetClass]); sle->setFieldU32(sfLastUpdateTime, ctx_.tx[sfLastUpdateTime]); From dbeb841b5ae2e67bfe336be45e0b8b41daa27a76 Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Wed, 6 Aug 2025 11:18:41 +0100 Subject: [PATCH 105/244] docs: Update BUILD.md for Conan 2 (#5478) This change updates BUILD.md for Conan 2, add fixes/workarounds for Apple Clang 17, Clang 20 and CMake 4. This also removes (from BUILD.md only) workarounds for compiler versions which we no longer support e.g. Clang 15 and adds compilation flag -Wno-deprecated-declarations to enable building with Clang 20 on Linux. --- BUILD.md | 400 ++++++++++++++++++++++++++------------ docs/build/environment.md | 56 +++--- 2 files changed, 303 insertions(+), 153 deletions(-) diff --git a/BUILD.md b/BUILD.md index 072e38af93..aa18767397 100644 --- a/BUILD.md +++ b/BUILD.md @@ -3,29 +3,29 @@ | These instructions assume you have a C++ development environment ready with Git, Python, Conan, CMake, and a C++ compiler. For help setting one up on Linux, macOS, or Windows, [see this guide](./docs/build/environment.md). | > These instructions also assume a basic familiarity with Conan and CMake. -> If you are unfamiliar with Conan, -> you can read our [crash course](./docs/build/conan.md) -> or the official [Getting Started][3] walkthrough. +> If you are unfamiliar with Conan, you can read our +> [crash course](./docs/build/conan.md) or the official [Getting Started][3] +> walkthrough. ## Branches For a stable release, choose the `master` branch or one of the [tagged releases](https://github.com/ripple/rippled/releases). -``` +```bash git checkout master ``` For the latest release candidate, choose the `release` branch. -``` +```bash git checkout release ``` For the latest set of untested features, or to contribute, choose the `develop` branch. -``` +```bash git checkout develop ``` @@ -33,151 +33,282 @@ git checkout develop See [System Requirements](https://xrpl.org/system-requirements.html). -Building rippled generally requires git, Python, Conan, CMake, and a C++ compiler. Some guidance on setting up such a [C++ development environment can be found here](./docs/build/environment.md). +Building rippled generally requires git, Python, Conan, CMake, and a C++ +compiler. Some guidance on setting up such a [C++ development environment can be +found here](./docs/build/environment.md). -- [Python 3.7](https://www.python.org/downloads/) -- [Conan 1.60](https://conan.io/downloads.html)[^1] -- [CMake 3.16](https://cmake.org/download/) +- [Python 3.11](https://www.python.org/downloads/), or higher +- [Conan 2.17](https://conan.io/downloads.html)[^1], or higher +- [CMake 3.22](https://cmake.org/download/)[^2], or higher -[^1]: It is possible to build with Conan 2.x, -but the instructions are significantly different, -which is why we are not recommending it yet. -Notably, the `conan profile update` command is removed in 2.x. -Profiles must be edited by hand. +[^1]: It is possible to build with Conan 1.60+, but the instructions are +significantly different, which is why we are not recommending it. + +[^2]: CMake 4 is not yet supported by all dependencies required by this project. +If you are affected by this issue, follow [conan workaround for cmake +4](#workaround-for-cmake-4) `rippled` is written in the C++20 dialect and includes the `` header. The [minimum compiler versions][2] required are: | Compiler | Version | -|-------------|---------| -| GCC | 11 | -| Clang | 13 | -| Apple Clang | 13.1.6 | -| MSVC | 19.23 | +|-------------|-----| +| GCC | 12 | +| Clang | 16 | +| Apple Clang | 16 | +| MSVC | 19.44[^3] | ### Linux -The Ubuntu operating system has received the highest level of -quality assurance, testing, and support. +The Ubuntu Linux distribution has received the highest level of quality +assurance, testing, and support. We also support Red Hat and use Debian +internally. -Here are [sample instructions for setting up a C++ development environment on Linux](./docs/build/environment.md#linux). +Here are [sample instructions for setting up a C++ development environment on +Linux](./docs/build/environment.md#linux). ### Mac Many rippled engineers use macOS for development. -Here are [sample instructions for setting up a C++ development environment on macOS](./docs/build/environment.md#macos). +Here are [sample instructions for setting up a C++ development environment on +macOS](./docs/build/environment.md#macos). ### Windows -Windows is not recommended for production use at this time. +Windows is used by some engineers for development only. -- Additionally, 32-bit Windows development is not supported. - -[Boost]: https://www.boost.org/ +[^3]: Windows is not recommended for production use. ## Steps ### Set Up Conan -After you have a [C++ development environment](./docs/build/environment.md) ready with Git, Python, Conan, CMake, and a C++ compiler, you may need to set up your Conan profile. +After you have a [C++ development environment](./docs/build/environment.md) ready with Git, Python, +Conan, CMake, and a C++ compiler, you may need to set up your Conan profile. -These instructions assume a basic familiarity with Conan and CMake. +These instructions assume a basic familiarity with Conan and CMake. If you are +unfamiliar with Conan, then please read [this crash course](./docs/build/conan.md) or the official +[Getting Started][3] walkthrough. -If you are unfamiliar with Conan, then please read [this crash course](./docs/build/conan.md) or the official [Getting Started][3] walkthrough. +#### Default profile +We recommend that you import the provided `conan/profiles/default` profile: -You'll need at least one Conan profile: - - ``` - conan profile new default --detect - ``` - -Update the compiler settings: - - ``` - conan profile update settings.compiler.cppstd=20 default - ``` - -Configure Conan (1.x only) to use recipe revisions: - - ``` - conan config set general.revisions_enabled=1 - ``` - -**Linux** developers will commonly have a default Conan [profile][] that compiles -with GCC and links with libstdc++. -If you are linking with libstdc++ (see profile setting `compiler.libcxx`), -then you will need to choose the `libstdc++11` ABI: - - ``` - conan profile update settings.compiler.libcxx=libstdc++11 default - ``` - - -Ensure inter-operability between `boost::string_view` and `std::string_view` types: - -``` -conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_BEAST_USE_STD_STRING_VIEW"]' default -conan profile update 'env.CXXFLAGS="-DBOOST_BEAST_USE_STD_STRING_VIEW"' default +```bash +conan config install conan/profiles/ -tf $(conan config home)/profiles/ ``` -If you have other flags in the `conf.tools.build` or `env.CXXFLAGS` sections, make sure to retain the existing flags and append the new ones. You can check them with: -``` -conan profile show default +You can check your Conan profile by running: + +```bash +conan profile show ``` +#### Custom profile -**Windows** developers may need to use the x64 native build tools. -An easy way to do that is to run the shortcut "x64 Native Tools Command -Prompt" for the version of Visual Studio that you have installed. +If the default profile does not work for you and you do not yet have a Conan +profile, you can create one by running: - Windows developers must also build `rippled` and its dependencies for the x64 - architecture: - - ``` - conan profile update settings.arch=x86_64 default - ``` - -### Multiple compilers - -When `/usr/bin/g++` exists on a platform, it is the default cpp compiler. This -default works for some users. - -However, if this compiler cannot build rippled or its dependencies, then you can -install another compiler and set Conan and CMake to use it. -Update the `conf.tools.build:compiler_executables` setting in order to set the correct variables (`CMAKE__COMPILER`) in the -generated CMake toolchain file. -For example, on Ubuntu 20, you may have gcc at `/usr/bin/gcc` and g++ at `/usr/bin/g++`; if that is the case, you can select those compilers with: -``` -conan profile update 'conf.tools.build:compiler_executables={"c": "/usr/bin/gcc", "cpp": "/usr/bin/g++"}' default +```bash +conan profile detect ``` -Replace `/usr/bin/gcc` and `/usr/bin/g++` with paths to the desired compilers. +You may need to make changes to the profile to suit your environment. You can +refer to the provided `conan/profiles/default` profile for inspiration, and you +may also need to apply the required [tweaks](#conan-profile-tweaks) to this +default profile. -It should choose the compiler for dependencies as well, -but not all of them have a Conan recipe that respects this setting (yet). -For the rest, you can set these environment variables. -Replace `` with paths to the desired compilers: +### Patched recipes -- `conan profile update env.CC= default` -- `conan profile update env.CXX= default` +The recipes in Conan Center occasionally need to be patched for compatibility +with the latest version of `rippled`. We maintain a fork of the Conan Center +[here](https://github.com/XRPLF/conan-center-index/) containing the patches. -Export our [Conan recipe for Snappy](./external/snappy). -It does not explicitly link the C++ standard library, -which allows you to statically link it with GCC, if you want. +To ensure our patched recipes are used, you must add our Conan remote at a +higher index than the default Conan Center remote, so it is consulted first. You +can do this by running: - ``` - # Conan 2.x - conan export --version 1.1.10 external/snappy +```bash +conan remote add --index 0 xrplf "https://conan.ripplex.io" +``` + +Alternatively, you can use the `conan export` command to add the patched recipes +that are also (for the time being) located in the `external/` directory to your +local Conan cache: + +```bash +conan export --version 1.1.10 external/snappy +conan export --version 4.0.3 external/soci +``` + +### Conan profile tweaks + +#### Missing compiler version + +If you see an error similar to the following after running `conan profile show`: + +```bash +ERROR: Invalid setting '17' is not a valid 'settings.compiler.version' value. +Possible values are ['5.0', '5.1', '6.0', '6.1', '7.0', '7.3', '8.0', '8.1', +'9.0', '9.1', '10.0', '11.0', '12.0', '13', '13.0', '13.1', '14', '14.0', '15', +'15.0', '16', '16.0'] +Read "http://docs.conan.io/2/knowledge/faq.html#error-invalid-setting" +``` + +you need to amend the list of compiler versions in +`$(conan config home)/settings.yml`, by appending the required version number(s) +to the `version` array specific for your compiler. For example: + +```yaml + apple-clang: + version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", + "9.1", "10.0", "11.0", "12.0", "13", "13.0", "13.1", "14", + "14.0", "15", "15.0", "16", "16.0", "17", "17.0"] +``` + +#### Multiple compilers + +If you have multiple compilers installed, make sure to select the one to use in +your default Conan configuration **before** running `conan profile detect`, by +setting the `CC` and `CXX` environment variables. + +For example, if you are running MacOS and have [homebrew +LLVM@18](https://formulae.brew.sh/formula/llvm@18), and want to use it as a +compiler in the new Conan profile: + + ```bash + export CC=$(brew --prefix llvm@18)/bin/clang + export CXX=$(brew --prefix llvm@18)/bin/clang++ + conan profile detect ``` -Export our [Conan recipe for SOCI](./external/soci). -It patches their CMake to correctly import its dependencies. +You should also explicitly set the path to the compiler in the profile file, +which helps to avoid errors when `CC` and/or `CXX` are set and disagree with the +selected Conan profile. For example: - ``` - # Conan 2.x - conan export --version 4.0.3 external/soci - ``` +```text +[conf] +tools.build:compiler_executables={'c':'/usr/bin/gcc','cpp':'/usr/bin/g++'} +``` + +#### Multiple profiles + +You can manage multiple Conan profiles in the directory +`$(conan config home)/profiles`, for example renaming `default` to a different +name and then creating a new `default` profile for a different compiler. + +#### Select language + +The default profile created by Conan will typically select different C++ dialect +than C++20 used by this project. You should set `20` in the profile line +starting with `compiler.cppstd=`. For example: + +```bash +sed -i.bak -e 's|^compiler\.cppstd=.*$|compiler.cppstd=20|' $(conan config home)/profiles/default +``` + +#### Select standard library in Linux + +**Linux** developers will commonly have a default Conan [profile][] that +compiles with GCC and links with libstdc++. If you are linking with libstdc++ +(see profile setting `compiler.libcxx`), then you will need to choose the +`libstdc++11` ABI: + +```bash +sed -i.bak -e 's|^compiler\.libcxx=.*$|compiler.libcxx=libstdc++11|' $(conan config home)/profiles/default +``` + +#### Select architecture and runtime in Windows + +**Windows** developers may need to use the x64 native build tools. An easy way +to do that is to run the shortcut "x64 Native Tools Command Prompt" for the +version of Visual Studio that you have installed. + +Windows developers must also build `rippled` and its dependencies for the x64 +architecture: + +```bash +sed -i.bak -e 's|^arch=.*$|arch=x86_64|' $(conan config home)/profiles/default +``` + +**Windows** developers also must select static runtime: + +```bash +sed -i.bak -e 's|^compiler\.runtime=.*$|compiler.runtime=static|' $(conan config home)/profiles/default +``` + +#### Workaround for CMake 4 + +If your system CMake is version 4 rather than 3, you may have to configure Conan +profile to use CMake version 3 for dependencies, by adding the following two +lines to your profile: + +```text +[tool_requires] +!cmake/*: cmake/[>=3 <4] +``` + +This will force Conan to download and use a locally cached CMake 3 version, and +is needed because some of the dependencies used by this project do not support +CMake 4. + +#### Clang workaround for grpc + +If your compiler is clang, version 19 or later, or apple-clang, version 17 or +later, you may encounter a compilation error while building the `grpc` +dependency: + +```text +In file included from .../lib/promise/try_seq.h:26: +.../lib/promise/detail/basic_seq.h:499:38: error: a template argument list is expected after a name prefixed by the template keyword [-Wmissing-template-arg-list-after-template-kw] + 499 | Traits::template CallSeqFactory(f_, *cur_, std::move(arg))); + | ^ +``` + +The workaround for this error is to add two lines to profile: + +```text +[conf] +tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw'] +``` + +#### Workaround for gcc 12 + +If your compiler is gcc, version 12, and you have enabled `werr` option, you may +encounter a compilation error such as: + +```text +/usr/include/c++/12/bits/char_traits.h:435:56: error: 'void* __builtin_memcpy(void*, const void*, long unsigned int)' accessing 9223372036854775810 or more bytes at offsets [2, 9223372036854775807] and 1 may overlap up to 9223372036854775813 bytes at offset -3 [-Werror=restrict] + 435 | return static_cast(__builtin_memcpy(__s1, __s2, __n)); + | ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~ +cc1plus: all warnings being treated as errors +``` + +The workaround for this error is to add two lines to your profile: + +```text +[conf] +tools.build:cxxflags=['-Wno-restrict'] +``` + +#### Workaround for clang 16 + +If your compiler is clang, version 16, you may encounter compilation error such +as: + +```text +In file included from .../boost/beast/websocket/stream.hpp:2857: +.../boost/beast/websocket/impl/read.hpp:695:17: error: call to 'async_teardown' is ambiguous + async_teardown(impl.role, impl.stream(), + ^~~~~~~~~~~~~~ +``` + +The workaround for this error is to add two lines to your profile: + +```text +[conf] +tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS'] +``` ### Build and Test @@ -245,7 +376,6 @@ It patches their CMake to correctly import its dependencies. cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release -Dxrpld=ON -Dtests=ON .. ``` - Multi-config generators: ``` @@ -257,13 +387,13 @@ It patches their CMake to correctly import its dependencies. 5. Build `rippled`. For a single-configuration generator, it will build whatever configuration - you passed for `CMAKE_BUILD_TYPE`. For a multi-configuration generator, - you must pass the option `--config` to select the build configuration. + you passed for `CMAKE_BUILD_TYPE`. For a multi-configuration generator, you + must pass the option `--config` to select the build configuration. Single-config generators: ``` - cmake --build . -j $(nproc) + cmake --build . ``` Multi-config generators: @@ -278,18 +408,22 @@ It patches their CMake to correctly import its dependencies. Single-config generators: ``` - ./rippled --unittest + ./rippled --unittest --unittest-jobs N ``` Multi-config generators: ``` - ./Release/rippled --unittest - ./Debug/rippled --unittest + ./Release/rippled --unittest --unittest-jobs N + ./Debug/rippled --unittest --unittest-jobs N ``` - The location of `rippled` in your build directory depends on your CMake - generator. Pass `--help` to see the rest of the command line options. + Replace the `--unittest-jobs` parameter N with the desired unit tests + concurrency. Recommended setting is half of the number of available CPU + cores. + + The location of `rippled` binary in your build directory depends on your + CMake generator. Pass `--help` to see the rest of the command line options. ## Coverage report @@ -347,7 +481,7 @@ cmake --build . --target coverage After the `coverage` target is completed, the generated coverage report will be stored inside the build directory, as either of: -- file named `coverage.`_extension_ , with a suitable extension for the report format, or +- file named `coverage.`_extension_, with a suitable extension for the report format, or - directory named `coverage`, with the `index.html` and other files inside, for the `html-details` or `html-nested` report formats. @@ -355,12 +489,14 @@ stored inside the build directory, as either of: | Option | Default Value | Description | | --- | ---| ---| -| `assert` | OFF | Enable assertions. +| `assert` | OFF | Enable assertions. | | `coverage` | OFF | Prepare the coverage report. | | `san` | N/A | Enable a sanitizer with Clang. Choices are `thread` and `address`. | | `tests` | OFF | Build tests. | -| `unity` | ON | Configure a unity build. | +| `unity` | OFF | Configure a unity build. | | `xrpld` | OFF | Build the xrpld (`rippled`) application, and not just the libxrpl library. | +| `werr` | OFF | Treat compilation warnings as errors | +| `wextra` | OFF | Enable additional compilation warnings | [Unity builds][5] may be faster for the first build (at the cost of much more memory) since they concatenate sources into fewer @@ -375,12 +511,28 @@ and can be helpful for detecting `#include` omissions. After any updates or changes to dependencies, you may need to do the following: 1. Remove your build directory. -2. Remove the Conan cache: `conan remove "*" -c` -3. Re-run [conan install](#build-and-test). +2. Remove individual libraries from the Conan cache, e.g. -### 'protobuf/port_def.inc' file not found + ```bash + conan remove 'grpc/*' + ``` -If `cmake --build .` results in an error due to a missing a protobuf file, then you might have generated CMake files for a different `build_type` than the `CMAKE_BUILD_TYPE` you passed to conan. + **or** + + Remove all libraries from Conan cache: + + ```bash + conan remove '*' + ``` + +3. Re-run [conan export](#export-updated-recipes) +4. Re-run [conan install](#build-and-test). + +### `protobuf/port_def.inc` file not found + +If `cmake --build .` results in an error due to a missing a protobuf file, then +you might have generated CMake files for a different `build_type` than the +`CMAKE_BUILD_TYPE` you passed to Conan. ``` /rippled/.build/pb-xrpl.libpb/xrpl/proto/ripple.pb.h:10:10: fatal error: 'google/protobuf/port_def.inc' file not found diff --git a/docs/build/environment.md b/docs/build/environment.md index 40a80d4b25..7301879d09 100644 --- a/docs/build/environment.md +++ b/docs/build/environment.md @@ -10,37 +10,35 @@ platforms: Linux, macOS, or Windows. Package ecosystems vary across Linux distributions, so there is no one set of instructions that will work for every Linux user. -These instructions are written for Ubuntu 22.04. -They are largely copied from the [script][1] used to configure our Docker -container for continuous integration. -That script handles many more responsibilities. -These instructions are just the bare minimum to build one configuration of -rippled. -You can check that codebase for other Linux distributions and versions. -If you cannot find yours there, -then we hope that these instructions can at least guide you in the right -direction. +The instructions below are written for Debian 12 (Bookworm). ``` -apt update -apt install --yes curl git libssl-dev pipx python3.10-dev python3-pip make g++-11 libprotobuf-dev protobuf-compiler +export GCC_RELEASE=12 +sudo apt update +sudo apt install --yes gcc-${GCC_RELEASE} g++-${GCC_RELEASE} python3-pip \ + python-is-python3 python3-venv python3-dev curl wget ca-certificates \ + git build-essential cmake ninja-build libc6-dev +sudo pip install --break-system-packages conan -curl --location --remote-name \ - "https://github.com/Kitware/CMake/releases/download/v3.25.1/cmake-3.25.1.tar.gz" -tar -xzf cmake-3.25.1.tar.gz -rm cmake-3.25.1.tar.gz -cd cmake-3.25.1 -./bootstrap --parallel=$(nproc) -make --jobs $(nproc) -make install -cd .. - -pipx install 'conan<2' -pipx ensurepath +sudo update-alternatives --install /usr/bin/cc cc /usr/bin/gcc-${GCC_RELEASE} 999 +sudo update-alternatives --install \ + /usr/bin/gcc gcc /usr/bin/gcc-${GCC_RELEASE} 100 \ + --slave /usr/bin/g++ g++ /usr/bin/g++-${GCC_RELEASE} \ + --slave /usr/bin/gcc-ar gcc-ar /usr/bin/gcc-ar-${GCC_RELEASE} \ + --slave /usr/bin/gcc-nm gcc-nm /usr/bin/gcc-nm-${GCC_RELEASE} \ + --slave /usr/bin/gcc-ranlib gcc-ranlib /usr/bin/gcc-ranlib-${GCC_RELEASE} \ + --slave /usr/bin/gcov gcov /usr/bin/gcov-${GCC_RELEASE} \ + --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-${GCC_RELEASE} \ + --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-${GCC_RELEASE} \ + --slave /usr/bin/lto-dump lto-dump /usr/bin/lto-dump-${GCC_RELEASE} +sudo update-alternatives --auto cc +sudo update-alternatives --auto gcc ``` -[1]: https://github.com/thejohnfreeman/rippled-docker/blob/master/ubuntu-22.04/install.sh - +If you use different Linux distribution, hope the instruction above can guide +you in the right direction. We try to maintain compatibility with all recent +compiler releases, so if you use a rolling distribution like e.g. Arch or CentOS +then there is a chance that everything will "just work". ## macOS @@ -100,10 +98,10 @@ and use it to install Conan: brew update brew install xz brew install pyenv -pyenv install 3.10-dev -pyenv global 3.10-dev +pyenv install 3.11 +pyenv global 3.11 eval "$(pyenv init -)" -pip install 'conan<2' +pip install 'conan' ``` Install CMake with Homebrew too: From 69314e68321b9b33d873a1fda75b3f3b6e048214 Mon Sep 17 00:00:00 2001 From: Bart Date: Wed, 6 Aug 2025 11:46:13 -0400 Subject: [PATCH 106/244] refactor: Remove external libraries as they are hosted in our Conan Center Index fork (#5643) This change: * Removes the patched Conan recipes from the `external/` directory. * Adds instructions for contributors how to obtain our patched recipes. * Updates the Conan remote name and remote URL (the underlying package repository isn't changed). * If the remote already exists, updates the URL instead of removing and re-adding. * This is not done for the libXRPL job as it still uses Conan 1. This job will be switched to Conan 2 soon. * Removes duplicate Conan remote CI pipeline steps. * Overwrites the existing global.conf on MacOS and Windows machines, as those do not run CI pipelines in isolation but all share the same Conan installation; appending the same config over and over bloats the file. --- .github/actions/dependencies/action.yml | 26 +-- .github/workflows/libxrpl.yml | 16 +- .github/workflows/macos.yml | 21 +- .github/workflows/nix.yml | 4 +- .github/workflows/windows.yml | 21 +- BUILD.md | 25 ++- external/README.md | 8 +- external/snappy/conandata.yml | 40 ---- external/snappy/conanfile.py | 89 -------- .../1.1.10-0001-fix-inlining-failure.patch | 13 -- ....10-0003-fix-clobber-list-older-llvm.patch | 13 -- .../1.1.9-0001-fix-inlining-failure.patch | 14 -- .../snappy/patches/1.1.9-0002-no-Werror.patch | 12 - ...1.9-0003-fix-clobber-list-older-llvm.patch | 12 - .../patches/1.1.9-0004-rtti-by-default.patch | 20 -- external/soci/conandata.yml | 12 - external/soci/conanfile.py | 212 ------------------ ...-INSTALL_NAME_DIR-for-relocatable-li.patch | 39 ---- .../soci/patches/0002-Fix-soci_backend.patch | 24 -- 19 files changed, 41 insertions(+), 580 deletions(-) delete mode 100644 external/snappy/conandata.yml delete mode 100644 external/snappy/conanfile.py delete mode 100644 external/snappy/patches/1.1.10-0001-fix-inlining-failure.patch delete mode 100644 external/snappy/patches/1.1.10-0003-fix-clobber-list-older-llvm.patch delete mode 100644 external/snappy/patches/1.1.9-0001-fix-inlining-failure.patch delete mode 100644 external/snappy/patches/1.1.9-0002-no-Werror.patch delete mode 100644 external/snappy/patches/1.1.9-0003-fix-clobber-list-older-llvm.patch delete mode 100644 external/snappy/patches/1.1.9-0004-rtti-by-default.patch delete mode 100644 external/soci/conandata.yml delete mode 100644 external/soci/conanfile.py delete mode 100644 external/soci/patches/0001-Remove-hardcoded-INSTALL_NAME_DIR-for-relocatable-li.patch delete mode 100644 external/soci/patches/0002-Fix-soci_backend.patch diff --git a/.github/actions/dependencies/action.yml b/.github/actions/dependencies/action.yml index 7ece9710a8..e32d8934ba 100644 --- a/.github/actions/dependencies/action.yml +++ b/.github/actions/dependencies/action.yml @@ -6,29 +6,17 @@ inputs: runs: using: composite steps: - - name: export custom recipes - shell: bash - run: | - conan export --version 1.1.10 external/snappy - conan export --version 4.0.3 external/soci - - name: add Ripple Conan remote + - name: add Conan remote if: env.CONAN_URL != '' shell: bash run: | - if conan remote list | grep -q "ripple"; then - conan remote remove ripple - echo "Removed conan remote ripple" + if conan remote list | grep -q 'xrplf'; then + conan remote update --index 0 --url ${CONAN_URL} xrplf + echo "Updated Conan remote 'xrplf' to ${CONAN_URL}." + else + conan remote add --index 0 xrplf ${CONAN_URL} + echo "Added new conan remote 'xrplf' at ${CONAN_URL}." fi - conan remote add --index 0 ripple "${CONAN_URL}" - echo "Added conan remote ripple at ${CONAN_URL}" - - name: try to authenticate to Ripple Conan remote - if: env.CONAN_LOGIN_USERNAME_RIPPLE != '' && env.CONAN_PASSWORD_RIPPLE != '' - id: remote - shell: bash - run: | - echo "Authenticating to ripple remote..." - conan remote auth ripple --force - conan remote list-users - name: list missing binaries id: binaries shell: bash diff --git a/.github/workflows/libxrpl.yml b/.github/workflows/libxrpl.yml index 79cd872210..a8746fe297 100644 --- a/.github/workflows/libxrpl.yml +++ b/.github/workflows/libxrpl.yml @@ -1,8 +1,8 @@ name: Check libXRPL compatibility with Clio env: - CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/dev - CONAN_LOGIN_USERNAME_RIPPLE: ${{ secrets.CONAN_USERNAME }} - CONAN_PASSWORD_RIPPLE: ${{ secrets.CONAN_TOKEN }} + CONAN_URL: https://conan.ripplex.io + CONAN_LOGIN_USERNAME_XRPLF: ${{ secrets.CONAN_USERNAME }} + CONAN_PASSWORD_XRPLF: ${{ secrets.CONAN_TOKEN }} on: pull_request: paths: @@ -43,20 +43,20 @@ jobs: shell: bash run: | conan export . ${{ steps.channel.outputs.channel }} - - name: Add Ripple Conan remote + - name: Add Conan remote shell: bash run: | conan remote list - conan remote remove ripple || true + conan remote remove xrplf || true # Do not quote the URL. An empty string will be accepted (with a non-fatal warning), but a missing argument will not. - conan remote add ripple ${{ env.CONAN_URL }} --insert 0 + conan remote add xrplf ${{ env.CONAN_URL }} --insert 0 - name: Parse new version id: version shell: bash run: | echo version="$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" \ | awk -F '"' '{print $2}')" | tee ${GITHUB_OUTPUT} - - name: Try to authenticate to Ripple Conan remote + - name: Try to authenticate to Conan remote id: remote shell: bash run: | @@ -64,7 +64,7 @@ jobs: # https://docs.conan.io/1/reference/commands/misc/user.html#using-environment-variables # https://docs.conan.io/1/reference/env_vars.html#conan-login-username-conan-login-username-remote-name # https://docs.conan.io/1/reference/env_vars.html#conan-password-conan-password-remote-name - echo outcome=$(conan user --remote ripple --password >&2 \ + echo outcome=$(conan user --remote xrplf --password >&2 \ && echo success || echo failure) | tee ${GITHUB_OUTPUT} - name: Upload new package id: upload diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index adea15af9e..c056468fc6 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -18,9 +18,7 @@ concurrency: # This part of Conan configuration is specific to this workflow only; we do not want # to pollute conan/profiles directory with settings which might not work for others env: - CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/dev - CONAN_LOGIN_USERNAME_RIPPLE: ${{ secrets.CONAN_USERNAME }} - CONAN_PASSWORD_RIPPLE: ${{ secrets.CONAN_TOKEN }} + CONAN_URL: https://conan.ripplex.io CONAN_GLOBAL_CONF: | core.download:parallel={{os.cpu_count()}} core.upload:parallel={{os.cpu_count()}} @@ -87,24 +85,9 @@ jobs: clang --version - name: configure Conan run : | - echo "${CONAN_GLOBAL_CONF}" >> $(conan config home)/global.conf + echo "${CONAN_GLOBAL_CONF}" > $(conan config home)/global.conf conan config install conan/profiles/ -tf $(conan config home)/profiles/ conan profile show - - name: export custom recipes - shell: bash - run: | - conan export --version 1.1.10 external/snappy - conan export --version 4.0.3 external/soci - - name: add Ripple Conan remote - if: env.CONAN_URL != '' - shell: bash - run: | - if conan remote list | grep -q "ripple"; then - conan remote remove ripple - echo "Removed conan remote ripple" - fi - conan remote add --index 0 ripple "${CONAN_URL}" - echo "Added conan remote ripple at ${CONAN_URL}" - name: build dependencies uses: ./.github/actions/dependencies with: diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index d6490e4caa..14e98e3fb0 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -19,9 +19,7 @@ concurrency: # This part of Conan configuration is specific to this workflow only; we do not want # to pollute conan/profiles directory with settings which might not work for others env: - CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/dev - CONAN_LOGIN_USERNAME_RIPPLE: ${{ secrets.CONAN_USERNAME }} - CONAN_PASSWORD_RIPPLE: ${{ secrets.CONAN_TOKEN }} + CONAN_URL: https://conan.ripplex.io CONAN_GLOBAL_CONF: | core.download:parallel={{ os.cpu_count() }} core.upload:parallel={{ os.cpu_count() }} diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 84a91bcb4e..4de92c8049 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -21,9 +21,7 @@ concurrency: # This part of Conan configuration is specific to this workflow only; we do not want # to pollute conan/profiles directory with settings which might not work for others env: - CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/dev - CONAN_LOGIN_USERNAME_RIPPLE: ${{ secrets.CONAN_USERNAME }} - CONAN_PASSWORD_RIPPLE: ${{ secrets.CONAN_TOKEN }} + CONAN_URL: https://conan.ripplex.io CONAN_GLOBAL_CONF: | core.download:parallel={{os.cpu_count()}} core.upload:parallel={{os.cpu_count()}} @@ -82,24 +80,9 @@ jobs: - name: configure Conan shell: bash run: | - echo "${CONAN_GLOBAL_CONF}" >> $(conan config home)/global.conf + echo "${CONAN_GLOBAL_CONF}" > $(conan config home)/global.conf conan config install conan/profiles/ -tf $(conan config home)/profiles/ conan profile show - - name: export custom recipes - shell: bash - run: | - conan export --version 1.1.10 external/snappy - conan export --version 4.0.3 external/soci - - name: add Ripple Conan remote - if: env.CONAN_URL != '' - shell: bash - run: | - if conan remote list | grep -q "ripple"; then - conan remote remove ripple - echo "Removed conan remote ripple" - fi - conan remote add --index 0 ripple "${CONAN_URL}" - echo "Added conan remote ripple at ${CONAN_URL}" - name: build dependencies uses: ./.github/actions/dependencies with: diff --git a/BUILD.md b/BUILD.md index aa18767397..e1567e297c 100644 --- a/BUILD.md +++ b/BUILD.md @@ -132,15 +132,28 @@ can do this by running: conan remote add --index 0 xrplf "https://conan.ripplex.io" ``` -Alternatively, you can use the `conan export` command to add the patched recipes -that are also (for the time being) located in the `external/` directory to your -local Conan cache: +Alternatively, you can pull the patched recipes into the repository and use them +locally: ```bash -conan export --version 1.1.10 external/snappy -conan export --version 4.0.3 external/soci +cd external +git init +git remote add origin git@github.com:XRPLF/conan-center-index.git +git sparse-checkout init +git sparse-checkout set recipes/snappy +git sparse-checkout add recipes/soci +git fetch origin master +git checkout master +conan export --version 1.1.10 external/recipes/snappy +conan export --version 4.0.3 external/recipes/soci ``` +In the case we switch to a newer version of a dependency that still requires a +patch, it will be necessary for you to pull in the changes and re-export the +updated dependencies with the newer version. However, if we switch to a newer +version that no longer requires a patch, no action is required on your part, as +the new recipe will be automatically pulled from the official Conan Center. + ### Conan profile tweaks #### Missing compiler version @@ -525,7 +538,7 @@ After any updates or changes to dependencies, you may need to do the following: conan remove '*' ``` -3. Re-run [conan export](#export-updated-recipes) +3. Re-run [conan export](#patched-recipes) if needed. 4. Re-run [conan install](#build-and-test). ### `protobuf/port_def.inc` file not found diff --git a/external/README.md b/external/README.md index c810539fd7..a3d04da264 100644 --- a/external/README.md +++ b/external/README.md @@ -1,14 +1,10 @@ # External Conan recipes -The subdirectories in this directory contain either copies or Conan recipes -of external libraries used by rippled. -The Conan recipes include patches we have not yet pushed upstream. +The subdirectories in this directory contain copies of external libraries used +by rippled. | Folder | Upstream | Description | |:----------------|:---------------------------------------------|:------------| | `antithesis-sdk`| [Project](https://github.com/antithesishq/antithesis-sdk-cpp/) | [Antithesis](https://antithesis.com/docs/using_antithesis/sdk/cpp/overview.html) SDK for C++ | | `ed25519-donna` | [Project](https://github.com/floodyberry/ed25519-donna) | [Ed25519](http://ed25519.cr.yp.to/) digital signatures | -| `rocksdb` | [Recipe](https://github.com/conan-io/conan-center-index/tree/master/recipes/rocksdb) | Fast key/value database. (Supports rotational disks better than NuDB.) | | `secp256k1` | [Project](https://github.com/bitcoin-core/secp256k1) | ECDSA digital signatures using the **secp256k1** curve | -| `snappy` | [Recipe](https://github.com/conan-io/conan-center-index/tree/master/recipes/snappy) | "Snappy" lossless compression algorithm. | -| `soci` | [Recipe](https://github.com/conan-io/conan-center-index/tree/master/recipes/soci) | Abstraction layer for database access. | diff --git a/external/snappy/conandata.yml b/external/snappy/conandata.yml deleted file mode 100644 index 1488c7a2ba..0000000000 --- a/external/snappy/conandata.yml +++ /dev/null @@ -1,40 +0,0 @@ -sources: - "1.1.10": - url: "https://github.com/google/snappy/archive/1.1.10.tar.gz" - sha256: "49d831bffcc5f3d01482340fe5af59852ca2fe76c3e05df0e67203ebbe0f1d90" - "1.1.9": - url: "https://github.com/google/snappy/archive/1.1.9.tar.gz" - sha256: "75c1fbb3d618dd3a0483bff0e26d0a92b495bbe5059c8b4f1c962b478b6e06e7" - "1.1.8": - url: "https://github.com/google/snappy/archive/1.1.8.tar.gz" - sha256: "16b677f07832a612b0836178db7f374e414f94657c138e6993cbfc5dcc58651f" - "1.1.7": - url: "https://github.com/google/snappy/archive/1.1.7.tar.gz" - sha256: "3dfa02e873ff51a11ee02b9ca391807f0c8ea0529a4924afa645fbf97163f9d4" -patches: - "1.1.10": - - patch_file: "patches/1.1.10-0001-fix-inlining-failure.patch" - patch_description: "disable inlining for compilation error" - patch_type: "portability" - - patch_file: "patches/1.1.9-0002-no-Werror.patch" - patch_description: "disable 'warning as error' options" - patch_type: "portability" - - patch_file: "patches/1.1.10-0003-fix-clobber-list-older-llvm.patch" - patch_description: "disable inline asm on apple-clang" - patch_type: "portability" - - patch_file: "patches/1.1.9-0004-rtti-by-default.patch" - patch_description: "remove 'disable rtti'" - patch_type: "conan" - "1.1.9": - - patch_file: "patches/1.1.9-0001-fix-inlining-failure.patch" - patch_description: "disable inlining for compilation error" - patch_type: "portability" - - patch_file: "patches/1.1.9-0002-no-Werror.patch" - patch_description: "disable 'warning as error' options" - patch_type: "portability" - - patch_file: "patches/1.1.9-0003-fix-clobber-list-older-llvm.patch" - patch_description: "disable inline asm on apple-clang" - patch_type: "portability" - - patch_file: "patches/1.1.9-0004-rtti-by-default.patch" - patch_description: "remove 'disable rtti'" - patch_type: "conan" diff --git a/external/snappy/conanfile.py b/external/snappy/conanfile.py deleted file mode 100644 index 23558639f4..0000000000 --- a/external/snappy/conanfile.py +++ /dev/null @@ -1,89 +0,0 @@ -from conan import ConanFile -from conan.tools.build import check_min_cppstd -from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout -from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, rmdir -from conan.tools.scm import Version -import os - -required_conan_version = ">=1.54.0" - - -class SnappyConan(ConanFile): - name = "snappy" - description = "A fast compressor/decompressor" - topics = ("google", "compressor", "decompressor") - url = "https://github.com/conan-io/conan-center-index" - homepage = "https://github.com/google/snappy" - license = "BSD-3-Clause" - - package_type = "library" - settings = "os", "arch", "compiler", "build_type" - options = { - "shared": [True, False], - "fPIC": [True, False], - } - default_options = { - "shared": False, - "fPIC": True, - } - - def export_sources(self): - export_conandata_patches(self) - - def config_options(self): - if self.settings.os == 'Windows': - del self.options.fPIC - - def configure(self): - if self.options.shared: - self.options.rm_safe("fPIC") - - def layout(self): - cmake_layout(self, src_folder="src") - - def validate(self): - if self.settings.compiler.get_safe("cppstd"): - check_min_cppstd(self, 11) - - def source(self): - get(self, **self.conan_data["sources"][self.version], strip_root=True) - - def generate(self): - tc = CMakeToolchain(self) - tc.variables["SNAPPY_BUILD_TESTS"] = False - if Version(self.version) >= "1.1.8": - tc.variables["SNAPPY_FUZZING_BUILD"] = False - tc.variables["SNAPPY_REQUIRE_AVX"] = False - tc.variables["SNAPPY_REQUIRE_AVX2"] = False - tc.variables["SNAPPY_INSTALL"] = True - if Version(self.version) >= "1.1.9": - tc.variables["SNAPPY_BUILD_BENCHMARKS"] = False - tc.generate() - - def build(self): - apply_conandata_patches(self) - cmake = CMake(self) - cmake.configure() - cmake.build() - - def package(self): - copy(self, "COPYING", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses")) - cmake = CMake(self) - cmake.install() - rmdir(self, os.path.join(self.package_folder, "lib", "cmake")) - - def package_info(self): - self.cpp_info.set_property("cmake_file_name", "Snappy") - self.cpp_info.set_property("cmake_target_name", "Snappy::snappy") - # TODO: back to global scope in conan v2 once cmake_find_package* generators removed - self.cpp_info.components["snappylib"].libs = ["snappy"] - if not self.options.shared: - if self.settings.os in ["Linux", "FreeBSD"]: - self.cpp_info.components["snappylib"].system_libs.append("m") - - # TODO: to remove in conan v2 once cmake_find_package* generators removed - self.cpp_info.names["cmake_find_package"] = "Snappy" - self.cpp_info.names["cmake_find_package_multi"] = "Snappy" - self.cpp_info.components["snappylib"].names["cmake_find_package"] = "snappy" - self.cpp_info.components["snappylib"].names["cmake_find_package_multi"] = "snappy" - self.cpp_info.components["snappylib"].set_property("cmake_target_name", "Snappy::snappy") diff --git a/external/snappy/patches/1.1.10-0001-fix-inlining-failure.patch b/external/snappy/patches/1.1.10-0001-fix-inlining-failure.patch deleted file mode 100644 index 66b0f05521..0000000000 --- a/external/snappy/patches/1.1.10-0001-fix-inlining-failure.patch +++ /dev/null @@ -1,13 +0,0 @@ -diff --git a/snappy-stubs-internal.h b/snappy-stubs-internal.h -index 1548ed7..3b4a9f3 100644 ---- a/snappy-stubs-internal.h -+++ b/snappy-stubs-internal.h -@@ -100,7 +100,7 @@ - - // Inlining hints. - #if HAVE_ATTRIBUTE_ALWAYS_INLINE --#define SNAPPY_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline)) -+#define SNAPPY_ATTRIBUTE_ALWAYS_INLINE - #else - #define SNAPPY_ATTRIBUTE_ALWAYS_INLINE - #endif // HAVE_ATTRIBUTE_ALWAYS_INLINE diff --git a/external/snappy/patches/1.1.10-0003-fix-clobber-list-older-llvm.patch b/external/snappy/patches/1.1.10-0003-fix-clobber-list-older-llvm.patch deleted file mode 100644 index 969ce3805d..0000000000 --- a/external/snappy/patches/1.1.10-0003-fix-clobber-list-older-llvm.patch +++ /dev/null @@ -1,13 +0,0 @@ -diff --git a/snappy.cc b/snappy.cc -index d414718..e4efb59 100644 ---- a/snappy.cc -+++ b/snappy.cc -@@ -1132,7 +1132,7 @@ inline size_t AdvanceToNextTagX86Optimized(const uint8_t** ip_p, size_t* tag) { - size_t literal_len = *tag >> 2; - size_t tag_type = *tag; - bool is_literal; --#if defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(__x86_64__) -+#if defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(__x86_64__) && ( (!defined(__clang__) && !defined(__APPLE__)) || (!defined(__APPLE__) && defined(__clang__) && (__clang_major__ >= 9)) || (defined(__APPLE__) && defined(__clang__) && (__clang_major__ > 11)) ) - // TODO clang misses the fact that the (c & 3) already correctly - // sets the zero flag. - asm("and $3, %k[tag_type]\n\t" diff --git a/external/snappy/patches/1.1.9-0001-fix-inlining-failure.patch b/external/snappy/patches/1.1.9-0001-fix-inlining-failure.patch deleted file mode 100644 index cdc119c0d5..0000000000 --- a/external/snappy/patches/1.1.9-0001-fix-inlining-failure.patch +++ /dev/null @@ -1,14 +0,0 @@ -Fixes the following error: -error: inlining failed in call to ‘always_inline’ ‘size_t snappy::AdvanceToNextTag(const uint8_t**, size_t*)’: function body can be overwritten at link time - ---- snappy-stubs-internal.h -+++ snappy-stubs-internal.h -@@ -100,7 +100,7 @@ - - // Inlining hints. - #ifdef HAVE_ATTRIBUTE_ALWAYS_INLINE --#define SNAPPY_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline)) -+#define SNAPPY_ATTRIBUTE_ALWAYS_INLINE - #else - #define SNAPPY_ATTRIBUTE_ALWAYS_INLINE - #endif diff --git a/external/snappy/patches/1.1.9-0002-no-Werror.patch b/external/snappy/patches/1.1.9-0002-no-Werror.patch deleted file mode 100644 index d86e4e0a9d..0000000000 --- a/external/snappy/patches/1.1.9-0002-no-Werror.patch +++ /dev/null @@ -1,12 +0,0 @@ ---- CMakeLists.txt -+++ CMakeLists.txt -@@ -69,7 +69,7 @@ -- # Use -Werror for clang only. -+if(0) - if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") - if(NOT CMAKE_CXX_FLAGS MATCHES "-Werror") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror") - endif(NOT CMAKE_CXX_FLAGS MATCHES "-Werror") - endif(CMAKE_CXX_COMPILER_ID MATCHES "Clang") -- -+endif() diff --git a/external/snappy/patches/1.1.9-0003-fix-clobber-list-older-llvm.patch b/external/snappy/patches/1.1.9-0003-fix-clobber-list-older-llvm.patch deleted file mode 100644 index 84bc674fdd..0000000000 --- a/external/snappy/patches/1.1.9-0003-fix-clobber-list-older-llvm.patch +++ /dev/null @@ -1,12 +0,0 @@ -asm clobbers do not work for clang < 9 and apple-clang < 11 (found by SpaceIm) ---- snappy.cc -+++ snappy.cc -@@ -1026,7 +1026,7 @@ - size_t literal_len = *tag >> 2; - size_t tag_type = *tag; - bool is_literal; --#if defined(__GNUC__) && defined(__x86_64__) -+#if defined(__GNUC__) && defined(__x86_64__) && ( (!defined(__clang__) && !defined(__APPLE__)) || (!defined(__APPLE__) && defined(__clang__) && (__clang_major__ >= 9)) || (defined(__APPLE__) && defined(__clang__) && (__clang_major__ > 11)) ) - // TODO clang misses the fact that the (c & 3) already correctly - // sets the zero flag. - asm("and $3, %k[tag_type]\n\t" diff --git a/external/snappy/patches/1.1.9-0004-rtti-by-default.patch b/external/snappy/patches/1.1.9-0004-rtti-by-default.patch deleted file mode 100644 index c353a489d0..0000000000 --- a/external/snappy/patches/1.1.9-0004-rtti-by-default.patch +++ /dev/null @@ -1,20 +0,0 @@ ---- a/CMakeLists.txt -+++ b/CMakeLists.txt -@@ -53,8 +53,6 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") - add_definitions(-D_HAS_EXCEPTIONS=0) - - # Disable RTTI. -- string(REGEX REPLACE "/GR" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") -- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /GR-") - else(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") - # Use -Wall for clang and gcc. - if(NOT CMAKE_CXX_FLAGS MATCHES "-Wall") -@@ -78,8 +76,6 @@ endif() - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions") - - # Disable RTTI. -- string(REGEX REPLACE "-frtti" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") -- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti") - endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") - - # BUILD_SHARED_LIBS is a standard CMake variable, but we declare it here to make diff --git a/external/soci/conandata.yml b/external/soci/conandata.yml deleted file mode 100644 index 6eb59aaffa..0000000000 --- a/external/soci/conandata.yml +++ /dev/null @@ -1,12 +0,0 @@ -sources: - "4.0.3": - url: "https://github.com/SOCI/soci/archive/v4.0.3.tar.gz" - sha256: "4b1ff9c8545c5d802fbe06ee6cd2886630e5c03bf740e269bb625b45cf934928" -patches: - "4.0.3": - - patch_file: "patches/0001-Remove-hardcoded-INSTALL_NAME_DIR-for-relocatable-li.patch" - patch_description: "Generate relocatable libraries on MacOS" - patch_type: "portability" - - patch_file: "patches/0002-Fix-soci_backend.patch" - patch_description: "Fix variable names for dependencies" - patch_type: "conan" diff --git a/external/soci/conanfile.py b/external/soci/conanfile.py deleted file mode 100644 index fe4c54e53e..0000000000 --- a/external/soci/conanfile.py +++ /dev/null @@ -1,212 +0,0 @@ -from conan import ConanFile -from conan.tools.build import check_min_cppstd -from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout -from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, rmdir -from conan.tools.microsoft import is_msvc -from conan.tools.scm import Version -from conan.errors import ConanInvalidConfiguration -import os - -required_conan_version = ">=1.55.0" - - -class SociConan(ConanFile): - name = "soci" - homepage = "https://github.com/SOCI/soci" - url = "https://github.com/conan-io/conan-center-index" - description = "The C++ Database Access Library " - topics = ("mysql", "odbc", "postgresql", "sqlite3") - license = "BSL-1.0" - - settings = "os", "arch", "compiler", "build_type" - options = { - "shared": [True, False], - "fPIC": [True, False], - "empty": [True, False], - "with_sqlite3": [True, False], - "with_db2": [True, False], - "with_odbc": [True, False], - "with_oracle": [True, False], - "with_firebird": [True, False], - "with_mysql": [True, False], - "with_postgresql": [True, False], - "with_boost": [True, False], - } - default_options = { - "shared": False, - "fPIC": True, - "empty": False, - "with_sqlite3": False, - "with_db2": False, - "with_odbc": False, - "with_oracle": False, - "with_firebird": False, - "with_mysql": False, - "with_postgresql": False, - "with_boost": False, - } - - def export_sources(self): - export_conandata_patches(self) - - def layout(self): - cmake_layout(self, src_folder="src") - - def config_options(self): - if self.settings.os == "Windows": - self.options.rm_safe("fPIC") - - def configure(self): - if self.options.shared: - self.options.rm_safe("fPIC") - - def requirements(self): - if self.options.with_sqlite3: - self.requires("sqlite3/3.47.0") - if self.options.with_odbc and self.settings.os != "Windows": - self.requires("odbc/2.3.11") - if self.options.with_mysql: - self.requires("libmysqlclient/8.1.0") - if self.options.with_postgresql: - self.requires("libpq/15.5") - if self.options.with_boost: - self.requires("boost/1.86.0") - - @property - def _minimum_compilers_version(self): - return { - "Visual Studio": "14", - "gcc": "4.8", - "clang": "3.8", - "apple-clang": "8.0" - } - - def validate(self): - if self.settings.compiler.get_safe("cppstd"): - check_min_cppstd(self, 11) - - compiler = str(self.settings.compiler) - compiler_version = Version(self.settings.compiler.version.value) - if compiler not in self._minimum_compilers_version: - self.output.warning("{} recipe lacks information about the {} compiler support.".format(self.name, self.settings.compiler)) - elif compiler_version < self._minimum_compilers_version[compiler]: - raise ConanInvalidConfiguration("{} requires a {} version >= {}".format(self.name, compiler, compiler_version)) - - prefix = "Dependencies for" - message = "not configured in this conan package." - if self.options.with_db2: - # self.requires("db2/0.0.0") # TODO add support for db2 - raise ConanInvalidConfiguration("{} DB2 {} ".format(prefix, message)) - if self.options.with_oracle: - # self.requires("oracle_db/0.0.0") # TODO add support for oracle - raise ConanInvalidConfiguration("{} ORACLE {} ".format(prefix, message)) - if self.options.with_firebird: - # self.requires("firebird/0.0.0") # TODO add support for firebird - raise ConanInvalidConfiguration("{} firebird {} ".format(prefix, message)) - - def source(self): - get(self, **self.conan_data["sources"][self.version], strip_root=True) - - def generate(self): - tc = CMakeToolchain(self) - - tc.variables["SOCI_SHARED"] = self.options.shared - tc.variables["SOCI_STATIC"] = not self.options.shared - tc.variables["SOCI_TESTS"] = False - tc.variables["SOCI_CXX11"] = True - tc.variables["SOCI_EMPTY"] = self.options.empty - tc.variables["WITH_SQLITE3"] = self.options.with_sqlite3 - tc.variables["WITH_DB2"] = self.options.with_db2 - tc.variables["WITH_ODBC"] = self.options.with_odbc - tc.variables["WITH_ORACLE"] = self.options.with_oracle - tc.variables["WITH_FIREBIRD"] = self.options.with_firebird - tc.variables["WITH_MYSQL"] = self.options.with_mysql - tc.variables["WITH_POSTGRESQL"] = self.options.with_postgresql - tc.variables["WITH_BOOST"] = self.options.with_boost - tc.generate() - - deps = CMakeDeps(self) - deps.generate() - - def build(self): - apply_conandata_patches(self) - cmake = CMake(self) - cmake.configure() - cmake.build() - - def package(self): - copy(self, "LICENSE_1_0.txt", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder) - - cmake = CMake(self) - cmake.install() - - rmdir(self, os.path.join(self.package_folder, "lib", "cmake")) - - def package_info(self): - self.cpp_info.set_property("cmake_file_name", "SOCI") - - target_suffix = "" if self.options.shared else "_static" - lib_prefix = "lib" if is_msvc(self) and not self.options.shared else "" - version = Version(self.version) - lib_suffix = "_{}_{}".format(version.major, version.minor) if self.settings.os == "Windows" else "" - - # soci_core - self.cpp_info.components["soci_core"].set_property("cmake_target_name", "SOCI::soci_core{}".format(target_suffix)) - self.cpp_info.components["soci_core"].libs = ["{}soci_core{}".format(lib_prefix, lib_suffix)] - if self.options.with_boost: - self.cpp_info.components["soci_core"].requires.append("boost::headers") - - # soci_empty - if self.options.empty: - self.cpp_info.components["soci_empty"].set_property("cmake_target_name", "SOCI::soci_empty{}".format(target_suffix)) - self.cpp_info.components["soci_empty"].libs = ["{}soci_empty{}".format(lib_prefix, lib_suffix)] - self.cpp_info.components["soci_empty"].requires = ["soci_core"] - - # soci_sqlite3 - if self.options.with_sqlite3: - self.cpp_info.components["soci_sqlite3"].set_property("cmake_target_name", "SOCI::soci_sqlite3{}".format(target_suffix)) - self.cpp_info.components["soci_sqlite3"].libs = ["{}soci_sqlite3{}".format(lib_prefix, lib_suffix)] - self.cpp_info.components["soci_sqlite3"].requires = ["soci_core", "sqlite3::sqlite3"] - - # soci_odbc - if self.options.with_odbc: - self.cpp_info.components["soci_odbc"].set_property("cmake_target_name", "SOCI::soci_odbc{}".format(target_suffix)) - self.cpp_info.components["soci_odbc"].libs = ["{}soci_odbc{}".format(lib_prefix, lib_suffix)] - self.cpp_info.components["soci_odbc"].requires = ["soci_core"] - if self.settings.os == "Windows": - self.cpp_info.components["soci_odbc"].system_libs.append("odbc32") - else: - self.cpp_info.components["soci_odbc"].requires.append("odbc::odbc") - - # soci_mysql - if self.options.with_mysql: - self.cpp_info.components["soci_mysql"].set_property("cmake_target_name", "SOCI::soci_mysql{}".format(target_suffix)) - self.cpp_info.components["soci_mysql"].libs = ["{}soci_mysql{}".format(lib_prefix, lib_suffix)] - self.cpp_info.components["soci_mysql"].requires = ["soci_core", "libmysqlclient::libmysqlclient"] - - # soci_postgresql - if self.options.with_postgresql: - self.cpp_info.components["soci_postgresql"].set_property("cmake_target_name", "SOCI::soci_postgresql{}".format(target_suffix)) - self.cpp_info.components["soci_postgresql"].libs = ["{}soci_postgresql{}".format(lib_prefix, lib_suffix)] - self.cpp_info.components["soci_postgresql"].requires = ["soci_core", "libpq::libpq"] - - # TODO: to remove in conan v2 once cmake_find_package* generators removed - self.cpp_info.names["cmake_find_package"] = "SOCI" - self.cpp_info.names["cmake_find_package_multi"] = "SOCI" - self.cpp_info.components["soci_core"].names["cmake_find_package"] = "soci_core{}".format(target_suffix) - self.cpp_info.components["soci_core"].names["cmake_find_package_multi"] = "soci_core{}".format(target_suffix) - if self.options.empty: - self.cpp_info.components["soci_empty"].names["cmake_find_package"] = "soci_empty{}".format(target_suffix) - self.cpp_info.components["soci_empty"].names["cmake_find_package_multi"] = "soci_empty{}".format(target_suffix) - if self.options.with_sqlite3: - self.cpp_info.components["soci_sqlite3"].names["cmake_find_package"] = "soci_sqlite3{}".format(target_suffix) - self.cpp_info.components["soci_sqlite3"].names["cmake_find_package_multi"] = "soci_sqlite3{}".format(target_suffix) - if self.options.with_odbc: - self.cpp_info.components["soci_odbc"].names["cmake_find_package"] = "soci_odbc{}".format(target_suffix) - self.cpp_info.components["soci_odbc"].names["cmake_find_package_multi"] = "soci_odbc{}".format(target_suffix) - if self.options.with_mysql: - self.cpp_info.components["soci_mysql"].names["cmake_find_package"] = "soci_mysql{}".format(target_suffix) - self.cpp_info.components["soci_mysql"].names["cmake_find_package_multi"] = "soci_mysql{}".format(target_suffix) - if self.options.with_postgresql: - self.cpp_info.components["soci_postgresql"].names["cmake_find_package"] = "soci_postgresql{}".format(target_suffix) - self.cpp_info.components["soci_postgresql"].names["cmake_find_package_multi"] = "soci_postgresql{}".format(target_suffix) diff --git a/external/soci/patches/0001-Remove-hardcoded-INSTALL_NAME_DIR-for-relocatable-li.patch b/external/soci/patches/0001-Remove-hardcoded-INSTALL_NAME_DIR-for-relocatable-li.patch deleted file mode 100644 index 5de0027f75..0000000000 --- a/external/soci/patches/0001-Remove-hardcoded-INSTALL_NAME_DIR-for-relocatable-li.patch +++ /dev/null @@ -1,39 +0,0 @@ -From d491bf7b5040d314ffd0c6310ba01f78ff44c85e Mon Sep 17 00:00:00 2001 -From: Rasmus Thomsen -Date: Fri, 14 Apr 2023 09:16:29 +0200 -Subject: [PATCH] Remove hardcoded INSTALL_NAME_DIR for relocatable libraries - on MacOS - ---- - cmake/SociBackend.cmake | 2 +- - src/core/CMakeLists.txt | 1 - - 2 files changed, 1 insertion(+), 2 deletions(-) - -diff --git a/cmake/SociBackend.cmake b/cmake/SociBackend.cmake -index 5d4ef0df..39fe1f77 100644 ---- a/cmake/SociBackend.cmake -+++ b/cmake/SociBackend.cmake -@@ -171,7 +171,7 @@ macro(soci_backend NAME) - set_target_properties(${THIS_BACKEND_TARGET} - PROPERTIES - SOVERSION ${${PROJECT_NAME}_SOVERSION} -- INSTALL_NAME_DIR ${CMAKE_INSTALL_PREFIX}/lib) -+ ) - - if(APPLE) - set_target_properties(${THIS_BACKEND_TARGET} -diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt -index 3e7deeae..f9eae564 100644 ---- a/src/core/CMakeLists.txt -+++ b/src/core/CMakeLists.txt -@@ -59,7 +59,6 @@ if (SOCI_SHARED) - PROPERTIES - VERSION ${SOCI_VERSION} - SOVERSION ${SOCI_SOVERSION} -- INSTALL_NAME_DIR ${CMAKE_INSTALL_PREFIX}/lib - CLEAN_DIRECT_OUTPUT 1) - endif() - --- -2.25.1 - diff --git a/external/soci/patches/0002-Fix-soci_backend.patch b/external/soci/patches/0002-Fix-soci_backend.patch deleted file mode 100644 index eab3c3763c..0000000000 --- a/external/soci/patches/0002-Fix-soci_backend.patch +++ /dev/null @@ -1,24 +0,0 @@ -diff --git a/cmake/SociBackend.cmake b/cmake/SociBackend.cmake -index 0a664667..3fa2ed95 100644 ---- a/cmake/SociBackend.cmake -+++ b/cmake/SociBackend.cmake -@@ -31,14 +31,13 @@ macro(soci_backend_deps_found NAME DEPS SUCCESS) - if(NOT DEPEND_FOUND) - list(APPEND DEPS_NOT_FOUND ${dep}) - else() -- string(TOUPPER "${dep}" DEPU) -- if( ${DEPU}_INCLUDE_DIR ) -- list(APPEND DEPS_INCLUDE_DIRS ${${DEPU}_INCLUDE_DIR}) -+ if( ${dep}_INCLUDE_DIR ) -+ list(APPEND DEPS_INCLUDE_DIRS ${${dep}_INCLUDE_DIR}) - endif() -- if( ${DEPU}_INCLUDE_DIRS ) -- list(APPEND DEPS_INCLUDE_DIRS ${${DEPU}_INCLUDE_DIRS}) -+ if( ${dep}_INCLUDE_DIRS ) -+ list(APPEND DEPS_INCLUDE_DIRS ${${dep}_INCLUDE_DIRS}) - endif() -- list(APPEND DEPS_LIBRARIES ${${DEPU}_LIBRARIES}) -+ list(APPEND DEPS_LIBRARIES ${${dep}_LIBRARIES}) - endif() - endforeach() - From 991891625aebb6856edbb5164d82c4f79ad64507 Mon Sep 17 00:00:00 2001 From: Bart Date: Thu, 7 Aug 2025 06:52:58 -0400 Subject: [PATCH 107/244] Upload Conan dependencies upon merge into develop (#5654) This change uploads built Conan dependencies to the Conan remote upon merge into the develop branch. At the moment, whenever Conan dependencies change, we need to remember to manually push them to our Conan remote, so they are cached for future reuse. If we forget to do so, these changed dependencies need to be rebuilt over and over again, which can take a long time. --- .github/actions/dependencies/action.yml | 10 +++++++++- .github/workflows/macos.yml | 2 ++ .github/workflows/nix.yml | 2 ++ .github/workflows/windows.yml | 2 ++ 4 files changed, 15 insertions(+), 1 deletion(-) diff --git a/.github/actions/dependencies/action.yml b/.github/actions/dependencies/action.yml index e32d8934ba..8dc78450c1 100644 --- a/.github/actions/dependencies/action.yml +++ b/.github/actions/dependencies/action.yml @@ -15,7 +15,7 @@ runs: echo "Updated Conan remote 'xrplf' to ${CONAN_URL}." else conan remote add --index 0 xrplf ${CONAN_URL} - echo "Added new conan remote 'xrplf' at ${CONAN_URL}." + echo "Added new Conan remote 'xrplf' at ${CONAN_URL}." fi - name: list missing binaries id: binaries @@ -36,3 +36,11 @@ runs: --options:host "&:xrpld=True" \ --settings:all build_type=${{ inputs.configuration }} \ .. + - name: upload dependencies + if: ${{ env.CONAN_URL != '' && env.CONAN_LOGIN_USERNAME_XRPLF != '' && env.CONAN_PASSWORD_XRPLF != '' && github.ref_type == 'branch' && github.ref_name == github.event.repository.default_branch }} + shell: bash + run: | + echo "Logging into Conan remote 'xrplf' at ${CONAN_URL}." + conan remote login xrplf "${{ env.CONAN_LOGIN_USERNAME_XRPLF }}" --password "${{ env.CONAN_PASSWORD_XRPLF }}" + echo "Uploading dependencies for configuration '${{ inputs.configuration }}'." + conan upload --all --confirm --remote xrplf . --settings build_type=${{ inputs.configuration }} diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index c056468fc6..ee912fda40 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -19,6 +19,8 @@ concurrency: # to pollute conan/profiles directory with settings which might not work for others env: CONAN_URL: https://conan.ripplex.io + CONAN_LOGIN_USERNAME_XRPLF: ${{ secrets.CONAN_USERNAME }} + CONAN_PASSWORD_XRPLF: ${{ secrets.CONAN_TOKEN }} CONAN_GLOBAL_CONF: | core.download:parallel={{os.cpu_count()}} core.upload:parallel={{os.cpu_count()}} diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index 14e98e3fb0..c58c97364d 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -20,6 +20,8 @@ concurrency: # to pollute conan/profiles directory with settings which might not work for others env: CONAN_URL: https://conan.ripplex.io + CONAN_LOGIN_USERNAME_XRPLF: ${{ secrets.CONAN_USERNAME }} + CONAN_PASSWORD_XRPLF: ${{ secrets.CONAN_TOKEN }} CONAN_GLOBAL_CONF: | core.download:parallel={{ os.cpu_count() }} core.upload:parallel={{ os.cpu_count() }} diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 4de92c8049..0b3fa5ff09 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -22,6 +22,8 @@ concurrency: # to pollute conan/profiles directory with settings which might not work for others env: CONAN_URL: https://conan.ripplex.io + CONAN_LOGIN_USERNAME_XRPLF: ${{ secrets.CONAN_USERNAME }} + CONAN_PASSWORD_XRPLF: ${{ secrets.CONAN_TOKEN }} CONAN_GLOBAL_CONF: | core.download:parallel={{os.cpu_count()}} core.upload:parallel={{os.cpu_count()}} From 94decc753b515e7499808ca0d5b9e24d172c691e Mon Sep 17 00:00:00 2001 From: Valentin Balaschenko <13349202+vlntb@users.noreply.github.com> Date: Thu, 7 Aug 2025 22:04:07 +0100 Subject: [PATCH 108/244] perf: Move mutex to the partition level (#5486) This change introduces two key optimizations: * Mutex scope reduction: Limits the lock to individual partitions within `TaggedCache`, reducing contention. * Decoupling: Removes the tight coupling between `LedgerHistory` and `TaggedCache`, improving modularity and testability. Lock contention analysis based on eBPF showed significant improvements as a result of this change. --- include/xrpl/basics/SHAMapHash.h | 1 - include/xrpl/basics/TaggedCache.h | 26 +-- include/xrpl/basics/TaggedCache.ipp | 197 +++++++++--------- .../xrpl/basics/partitioned_unordered_map.h | 12 ++ include/xrpl/protocol/Protocol.h | 1 - src/test/basics/TaggedCache_test.cpp | 24 +-- src/xrpld/app/ledger/LedgerHistory.cpp | 15 +- src/xrpld/rpc/handlers/GetCounts.cpp | 2 +- 8 files changed, 135 insertions(+), 143 deletions(-) diff --git a/include/xrpl/basics/SHAMapHash.h b/include/xrpl/basics/SHAMapHash.h index 2d2dcdc3ef..1ec326409c 100644 --- a/include/xrpl/basics/SHAMapHash.h +++ b/include/xrpl/basics/SHAMapHash.h @@ -21,7 +21,6 @@ #define RIPPLE_BASICS_SHAMAP_HASH_H_INCLUDED #include -#include #include diff --git a/include/xrpl/basics/TaggedCache.h b/include/xrpl/basics/TaggedCache.h index 99c91fe393..7eace6fe72 100644 --- a/include/xrpl/basics/TaggedCache.h +++ b/include/xrpl/basics/TaggedCache.h @@ -90,9 +90,6 @@ public: int getCacheSize() const; - int - getTrackSize() const; - float getHitRate(); @@ -170,9 +167,6 @@ public: bool retrieve(key_type const& key, T& data); - mutex_type& - peekMutex(); - std::vector getKeys() const; @@ -193,11 +187,14 @@ public: private: SharedPointerType - initialFetch(key_type const& key, std::lock_guard const& l); + initialFetch(key_type const& key); void collect_metrics(); + Mutex& + lockPartition(key_type const& key) const; + private: struct Stats { @@ -300,8 +297,8 @@ private: [[maybe_unused]] clock_type::time_point const& now, typename KeyValueCacheType::map_type& partition, SweptPointersVector& stuffToSweep, - std::atomic& allRemovals, - std::lock_guard const&); + std::atomic& allRemoval, + Mutex& partitionLock); [[nodiscard]] std::thread sweepHelper( @@ -310,14 +307,12 @@ private: typename KeyOnlyCacheType::map_type& partition, SweptPointersVector&, std::atomic& allRemovals, - std::lock_guard const&); + Mutex& partitionLock); beast::Journal m_journal; clock_type& m_clock; Stats m_stats; - mutex_type mutable m_mutex; - // Used for logging std::string m_name; @@ -328,10 +323,11 @@ private: clock_type::duration const m_target_age; // Number of items cached - int m_cache_count; + std::atomic m_cache_count; cache_type m_cache; // Hold strong reference to recent objects - std::uint64_t m_hits; - std::uint64_t m_misses; + std::atomic m_hits; + std::atomic m_misses; + mutable std::vector partitionLocks_; }; } // namespace ripple diff --git a/include/xrpl/basics/TaggedCache.ipp b/include/xrpl/basics/TaggedCache.ipp index 16a3f7587a..c909ec6ad1 100644 --- a/include/xrpl/basics/TaggedCache.ipp +++ b/include/xrpl/basics/TaggedCache.ipp @@ -22,6 +22,7 @@ #include #include +#include namespace ripple { @@ -60,6 +61,7 @@ inline TaggedCache< , m_hits(0) , m_misses(0) { + partitionLocks_ = std::vector(m_cache.partitions()); } template < @@ -105,8 +107,13 @@ TaggedCache< KeyEqual, Mutex>::size() const { - std::lock_guard lock(m_mutex); - return m_cache.size(); + std::size_t totalSize = 0; + for (size_t i = 0; i < partitionLocks_.size(); ++i) + { + std::lock_guard lock(partitionLocks_[i]); + totalSize += m_cache.map()[i].size(); + } + return totalSize; } template < @@ -129,32 +136,7 @@ TaggedCache< KeyEqual, Mutex>::getCacheSize() const { - std::lock_guard lock(m_mutex); - return m_cache_count; -} - -template < - class Key, - class T, - bool IsKeyCache, - class SharedWeakUnionPointer, - class SharedPointerType, - class Hash, - class KeyEqual, - class Mutex> -inline int -TaggedCache< - Key, - T, - IsKeyCache, - SharedWeakUnionPointer, - SharedPointerType, - Hash, - KeyEqual, - Mutex>::getTrackSize() const -{ - std::lock_guard lock(m_mutex); - return m_cache.size(); + return m_cache_count.load(std::memory_order_relaxed); } template < @@ -177,9 +159,10 @@ TaggedCache< KeyEqual, Mutex>::getHitRate() { - std::lock_guard lock(m_mutex); - auto const total = static_cast(m_hits + m_misses); - return m_hits * (100.0f / std::max(1.0f, total)); + auto const hits = m_hits.load(std::memory_order_relaxed); + auto const misses = m_misses.load(std::memory_order_relaxed); + float const total = float(hits + misses); + return hits * (100.0f / std::max(1.0f, total)); } template < @@ -202,9 +185,12 @@ TaggedCache< KeyEqual, Mutex>::clear() { - std::lock_guard lock(m_mutex); + for (auto& mutex : partitionLocks_) + mutex.lock(); m_cache.clear(); - m_cache_count = 0; + for (auto& mutex : partitionLocks_) + mutex.unlock(); + m_cache_count.store(0, std::memory_order_relaxed); } template < @@ -227,11 +213,9 @@ TaggedCache< KeyEqual, Mutex>::reset() { - std::lock_guard lock(m_mutex); - m_cache.clear(); - m_cache_count = 0; - m_hits = 0; - m_misses = 0; + clear(); + m_hits.store(0, std::memory_order_relaxed); + m_misses.store(0, std::memory_order_relaxed); } template < @@ -255,7 +239,7 @@ TaggedCache< KeyEqual, Mutex>::touch_if_exists(KeyComparable const& key) { - std::lock_guard lock(m_mutex); + std::lock_guard lock(lockPartition(key)); auto const iter(m_cache.find(key)); if (iter == m_cache.end()) { @@ -297,8 +281,6 @@ TaggedCache< auto const start = std::chrono::steady_clock::now(); { - std::lock_guard lock(m_mutex); - if (m_target_size == 0 || (static_cast(m_cache.size()) <= m_target_size)) { @@ -330,12 +312,13 @@ TaggedCache< m_cache.map()[p], allStuffToSweep[p], allRemovals, - lock)); + partitionLocks_[p])); } for (std::thread& worker : workers) worker.join(); - m_cache_count -= allRemovals; + int removals = allRemovals.load(std::memory_order_relaxed); + m_cache_count.fetch_sub(removals, std::memory_order_relaxed); } // At this point allStuffToSweep will go out of scope outside the lock // and decrement the reference count on each strong pointer. @@ -369,7 +352,8 @@ TaggedCache< { // Remove from cache, if !valid, remove from map too. Returns true if // removed from cache - std::lock_guard lock(m_mutex); + + std::lock_guard lock(lockPartition(key)); auto cit = m_cache.find(key); @@ -382,7 +366,7 @@ TaggedCache< if (entry.isCached()) { - --m_cache_count; + m_cache_count.fetch_sub(1, std::memory_order_relaxed); entry.ptr.convertToWeak(); ret = true; } @@ -420,17 +404,16 @@ TaggedCache< { // Return canonical value, store if needed, refresh in cache // Return values: true=we had the data already - std::lock_guard lock(m_mutex); + std::lock_guard lock(lockPartition(key)); auto cit = m_cache.find(key); - if (cit == m_cache.end()) { m_cache.emplace( std::piecewise_construct, std::forward_as_tuple(key), std::forward_as_tuple(m_clock.now(), data)); - ++m_cache_count; + m_cache_count.fetch_add(1, std::memory_order_relaxed); return false; } @@ -479,12 +462,12 @@ TaggedCache< data = cachedData; } - ++m_cache_count; + m_cache_count.fetch_add(1, std::memory_order_relaxed); return true; } entry.ptr = data; - ++m_cache_count; + m_cache_count.fetch_add(1, std::memory_order_relaxed); return false; } @@ -560,10 +543,11 @@ TaggedCache< KeyEqual, Mutex>::fetch(key_type const& key) { - std::lock_guard l(m_mutex); - auto ret = initialFetch(key, l); + std::lock_guard lock(lockPartition(key)); + + auto ret = initialFetch(key); if (!ret) - ++m_misses; + m_misses.fetch_add(1, std::memory_order_relaxed); return ret; } @@ -627,8 +611,8 @@ TaggedCache< Mutex>::insert(key_type const& key) -> std::enable_if_t { - std::lock_guard lock(m_mutex); clock_type::time_point const now(m_clock.now()); + std::lock_guard lock(lockPartition(key)); auto [it, inserted] = m_cache.emplace( std::piecewise_construct, std::forward_as_tuple(key), @@ -668,29 +652,6 @@ TaggedCache< return true; } -template < - class Key, - class T, - bool IsKeyCache, - class SharedWeakUnionPointer, - class SharedPointerType, - class Hash, - class KeyEqual, - class Mutex> -inline auto -TaggedCache< - Key, - T, - IsKeyCache, - SharedWeakUnionPointer, - SharedPointerType, - Hash, - KeyEqual, - Mutex>::peekMutex() -> mutex_type& -{ - return m_mutex; -} - template < class Key, class T, @@ -714,10 +675,13 @@ TaggedCache< std::vector v; { - std::lock_guard lock(m_mutex); v.reserve(m_cache.size()); - for (auto const& _ : m_cache) - v.push_back(_.first); + for (std::size_t i = 0; i < partitionLocks_.size(); ++i) + { + std::lock_guard lock(partitionLocks_[i]); + for (auto const& entry : m_cache.map()[i]) + v.push_back(entry.first); + } } return v; @@ -743,11 +707,12 @@ TaggedCache< KeyEqual, Mutex>::rate() const { - std::lock_guard lock(m_mutex); - auto const tot = m_hits + m_misses; + auto const hits = m_hits.load(std::memory_order_relaxed); + auto const misses = m_misses.load(std::memory_order_relaxed); + auto const tot = hits + misses; if (tot == 0) - return 0; - return double(m_hits) / tot; + return 0.0; + return double(hits) / tot; } template < @@ -771,18 +736,16 @@ TaggedCache< KeyEqual, Mutex>::fetch(key_type const& digest, Handler const& h) { - { - std::lock_guard l(m_mutex); - if (auto ret = initialFetch(digest, l)) - return ret; - } + std::lock_guard lock(lockPartition(digest)); + + if (auto ret = initialFetch(digest)) + return ret; auto sle = h(); if (!sle) return {}; - std::lock_guard l(m_mutex); - ++m_misses; + m_misses.fetch_add(1, std::memory_order_relaxed); auto const [it, inserted] = m_cache.emplace(digest, Entry(m_clock.now(), std::move(sle))); if (!inserted) @@ -809,9 +772,10 @@ TaggedCache< SharedPointerType, Hash, KeyEqual, - Mutex>:: - initialFetch(key_type const& key, std::lock_guard const& l) + Mutex>::initialFetch(key_type const& key) { + std::lock_guard lock(lockPartition(key)); + auto cit = m_cache.find(key); if (cit == m_cache.end()) return {}; @@ -819,7 +783,7 @@ TaggedCache< Entry& entry = cit->second; if (entry.isCached()) { - ++m_hits; + m_hits.fetch_add(1, std::memory_order_relaxed); entry.touch(m_clock.now()); return entry.ptr.getStrong(); } @@ -827,12 +791,13 @@ TaggedCache< if (entry.isCached()) { // independent of cache size, so not counted as a hit - ++m_cache_count; + m_cache_count.fetch_add(1, std::memory_order_relaxed); entry.touch(m_clock.now()); return entry.ptr.getStrong(); } m_cache.erase(cit); + return {}; } @@ -861,10 +826,11 @@ TaggedCache< { beast::insight::Gauge::value_type hit_rate(0); { - std::lock_guard lock(m_mutex); - auto const total(m_hits + m_misses); + auto const hits = m_hits.load(std::memory_order_relaxed); + auto const misses = m_misses.load(std::memory_order_relaxed); + auto const total = hits + misses; if (total != 0) - hit_rate = (m_hits * 100) / total; + hit_rate = (hits * 100) / total; } m_stats.hit_rate.set(hit_rate); } @@ -895,12 +861,16 @@ TaggedCache< typename KeyValueCacheType::map_type& partition, SweptPointersVector& stuffToSweep, std::atomic& allRemovals, - std::lock_guard const&) + Mutex& partitionLock) { return std::thread([&, this]() { + beast::setCurrentThreadName("sweep-KVCache"); + int cacheRemovals = 0; int mapRemovals = 0; + std::lock_guard lock(partitionLock); + // Keep references to all the stuff we sweep // so that we can destroy them outside the lock. stuffToSweep.reserve(partition.size()); @@ -984,12 +954,16 @@ TaggedCache< typename KeyOnlyCacheType::map_type& partition, SweptPointersVector&, std::atomic& allRemovals, - std::lock_guard const&) + Mutex& partitionLock) { return std::thread([&, this]() { + beast::setCurrentThreadName("sweep-KCache"); + int cacheRemovals = 0; int mapRemovals = 0; + std::lock_guard lock(partitionLock); + // Keep references to all the stuff we sweep // so that we can destroy them outside the lock. { @@ -1024,6 +998,29 @@ TaggedCache< }); } +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +inline Mutex& +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>::lockPartition(key_type const& key) const +{ + return partitionLocks_[m_cache.partition_index(key)]; +} + } // namespace ripple #endif diff --git a/include/xrpl/basics/partitioned_unordered_map.h b/include/xrpl/basics/partitioned_unordered_map.h index 4e503ad0fa..ecaf16a47e 100644 --- a/include/xrpl/basics/partitioned_unordered_map.h +++ b/include/xrpl/basics/partitioned_unordered_map.h @@ -277,6 +277,12 @@ public: return map_; } + partition_map_type const& + map() const + { + return map_; + } + iterator begin() { @@ -321,6 +327,12 @@ public: return cend(); } + std::size_t + partition_index(key_type const& key) const + { + return partitioner(key); + } + private: template void diff --git a/include/xrpl/protocol/Protocol.h b/include/xrpl/protocol/Protocol.h index 898fd06fbd..bd39233cca 100644 --- a/include/xrpl/protocol/Protocol.h +++ b/include/xrpl/protocol/Protocol.h @@ -22,7 +22,6 @@ #include #include -#include #include diff --git a/src/test/basics/TaggedCache_test.cpp b/src/test/basics/TaggedCache_test.cpp index 797838fcfa..ce33455110 100644 --- a/src/test/basics/TaggedCache_test.cpp +++ b/src/test/basics/TaggedCache_test.cpp @@ -58,10 +58,10 @@ public: // Insert an item, retrieve it, and age it so it gets purged. { BEAST_EXPECT(c.getCacheSize() == 0); - BEAST_EXPECT(c.getTrackSize() == 0); + BEAST_EXPECT(c.size() == 0); BEAST_EXPECT(!c.insert(1, "one")); BEAST_EXPECT(c.getCacheSize() == 1); - BEAST_EXPECT(c.getTrackSize() == 1); + BEAST_EXPECT(c.size() == 1); { std::string s; @@ -72,7 +72,7 @@ public: ++clock; c.sweep(); BEAST_EXPECT(c.getCacheSize() == 0); - BEAST_EXPECT(c.getTrackSize() == 0); + BEAST_EXPECT(c.size() == 0); } // Insert an item, maintain a strong pointer, age it, and @@ -80,7 +80,7 @@ public: { BEAST_EXPECT(!c.insert(2, "two")); BEAST_EXPECT(c.getCacheSize() == 1); - BEAST_EXPECT(c.getTrackSize() == 1); + BEAST_EXPECT(c.size() == 1); { auto p = c.fetch(2); @@ -88,14 +88,14 @@ public: ++clock; c.sweep(); BEAST_EXPECT(c.getCacheSize() == 0); - BEAST_EXPECT(c.getTrackSize() == 1); + BEAST_EXPECT(c.size() == 1); } // Make sure its gone now that our reference is gone ++clock; c.sweep(); BEAST_EXPECT(c.getCacheSize() == 0); - BEAST_EXPECT(c.getTrackSize() == 0); + BEAST_EXPECT(c.size() == 0); } // Insert the same key/value pair and make sure we get the same result @@ -111,7 +111,7 @@ public: ++clock; c.sweep(); BEAST_EXPECT(c.getCacheSize() == 0); - BEAST_EXPECT(c.getTrackSize() == 0); + BEAST_EXPECT(c.size() == 0); } // Put an object in but keep a strong pointer to it, advance the clock a @@ -121,24 +121,24 @@ public: // Put an object in BEAST_EXPECT(!c.insert(4, "four")); BEAST_EXPECT(c.getCacheSize() == 1); - BEAST_EXPECT(c.getTrackSize() == 1); + BEAST_EXPECT(c.size() == 1); { // Keep a strong pointer to it auto const p1 = c.fetch(4); BEAST_EXPECT(p1 != nullptr); BEAST_EXPECT(c.getCacheSize() == 1); - BEAST_EXPECT(c.getTrackSize() == 1); + BEAST_EXPECT(c.size() == 1); // Advance the clock a lot ++clock; c.sweep(); BEAST_EXPECT(c.getCacheSize() == 0); - BEAST_EXPECT(c.getTrackSize() == 1); + BEAST_EXPECT(c.size() == 1); // Canonicalize a new object with the same key auto p2 = std::make_shared("four"); BEAST_EXPECT(c.canonicalize_replace_client(4, p2)); BEAST_EXPECT(c.getCacheSize() == 1); - BEAST_EXPECT(c.getTrackSize() == 1); + BEAST_EXPECT(c.size() == 1); // Make sure we get the original object BEAST_EXPECT(p1.get() == p2.get()); } @@ -146,7 +146,7 @@ public: ++clock; c.sweep(); BEAST_EXPECT(c.getCacheSize() == 0); - BEAST_EXPECT(c.getTrackSize() == 0); + BEAST_EXPECT(c.size() == 0); } } }; diff --git a/src/xrpld/app/ledger/LedgerHistory.cpp b/src/xrpld/app/ledger/LedgerHistory.cpp index ccec209bd4..dcbd722120 100644 --- a/src/xrpld/app/ledger/LedgerHistory.cpp +++ b/src/xrpld/app/ledger/LedgerHistory.cpp @@ -63,8 +63,6 @@ LedgerHistory::insert( ledger->stateMap().getHash().isNonZero(), "ripple::LedgerHistory::insert : nonzero hash"); - std::unique_lock sl(m_ledgers_by_hash.peekMutex()); - bool const alreadyHad = m_ledgers_by_hash.canonicalize_replace_cache( ledger->info().hash, ledger); if (validated) @@ -76,7 +74,6 @@ LedgerHistory::insert( LedgerHash LedgerHistory::getLedgerHash(LedgerIndex index) { - std::unique_lock sl(m_ledgers_by_hash.peekMutex()); if (auto it = mLedgersByIndex.find(index); it != mLedgersByIndex.end()) return it->second; return {}; @@ -86,13 +83,11 @@ std::shared_ptr LedgerHistory::getLedgerBySeq(LedgerIndex index) { { - std::unique_lock sl(m_ledgers_by_hash.peekMutex()); auto it = mLedgersByIndex.find(index); if (it != mLedgersByIndex.end()) { uint256 hash = it->second; - sl.unlock(); return getLedgerByHash(hash); } } @@ -108,7 +103,6 @@ LedgerHistory::getLedgerBySeq(LedgerIndex index) { // Add this ledger to the local tracking by index - std::unique_lock sl(m_ledgers_by_hash.peekMutex()); XRPL_ASSERT( ret->isImmutable(), @@ -458,8 +452,6 @@ LedgerHistory::builtLedger( XRPL_ASSERT( !hash.isZero(), "ripple::LedgerHistory::builtLedger : nonzero hash"); - std::unique_lock sl(m_consensus_validated.peekMutex()); - auto entry = std::make_shared(); m_consensus_validated.canonicalize_replace_client(index, entry); @@ -500,8 +492,6 @@ LedgerHistory::validatedLedger( !hash.isZero(), "ripple::LedgerHistory::validatedLedger : nonzero hash"); - std::unique_lock sl(m_consensus_validated.peekMutex()); - auto entry = std::make_shared(); m_consensus_validated.canonicalize_replace_client(index, entry); @@ -535,10 +525,9 @@ LedgerHistory::validatedLedger( bool LedgerHistory::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash) { - std::unique_lock sl(m_ledgers_by_hash.peekMutex()); + auto ledger = m_ledgers_by_hash.fetch(ledgerHash); auto it = mLedgersByIndex.find(ledgerIndex); - - if ((it != mLedgersByIndex.end()) && (it->second != ledgerHash)) + if (ledger && (it != mLedgersByIndex.end()) && (it->second != ledgerHash)) { it->second = ledgerHash; return false; diff --git a/src/xrpld/rpc/handlers/GetCounts.cpp b/src/xrpld/rpc/handlers/GetCounts.cpp index 3c1d8cccdd..2987da46d5 100644 --- a/src/xrpld/rpc/handlers/GetCounts.cpp +++ b/src/xrpld/rpc/handlers/GetCounts.cpp @@ -114,7 +114,7 @@ getCountsJson(Application& app, int minObjectCount) ret[jss::treenode_cache_size] = app.getNodeFamily().getTreeNodeCache()->getCacheSize(); ret[jss::treenode_track_size] = - app.getNodeFamily().getTreeNodeCache()->getTrackSize(); + static_cast(app.getNodeFamily().getTreeNodeCache()->size()); std::string uptime; auto s = UptimeClock::now(); From 39b5031ab5efa543e3007fb3fc7e199381ee68fb Mon Sep 17 00:00:00 2001 From: Bart Date: Fri, 8 Aug 2025 08:47:36 -0400 Subject: [PATCH 109/244] Switch Conan 1 commands to Conan 2 and fix credentials (#5655) This change updates some incorrect Conan commands for Conan 2. As some flags do not exist in Conan 2, such as --settings build_type=[configuration], the commands have been adjusted accordingly. This change further uses the org-level variables and secrets rather than the repo-level ones. --- .github/actions/dependencies/action.yml | 38 ++++++++++--------------- .github/workflows/libxrpl.yml | 12 ++++---- .github/workflows/macos.yml | 9 ++++-- .github/workflows/nix.yml | 11 +++---- .github/workflows/windows.yml | 11 +++---- 5 files changed, 39 insertions(+), 42 deletions(-) diff --git a/.github/actions/dependencies/action.yml b/.github/actions/dependencies/action.yml index 8dc78450c1..0bd28f15dd 100644 --- a/.github/actions/dependencies/action.yml +++ b/.github/actions/dependencies/action.yml @@ -2,33 +2,25 @@ name: dependencies inputs: configuration: required: true -# An implicit input is the environment variable `build_dir`. +# Implicit inputs are the environment variables `build_dir`, CONAN_REMOTE_URL, +# CONAN_REMOTE_USERNAME, and CONAN_REMOTE_PASSWORD. The latter two are only +# used to upload newly built dependencies to the Conan remote. runs: using: composite steps: - name: add Conan remote - if: env.CONAN_URL != '' + if: ${{ env.CONAN_REMOTE_URL != '' }} shell: bash run: | - if conan remote list | grep -q 'xrplf'; then - conan remote update --index 0 --url ${CONAN_URL} xrplf - echo "Updated Conan remote 'xrplf' to ${CONAN_URL}." - else - conan remote add --index 0 xrplf ${CONAN_URL} - echo "Added new Conan remote 'xrplf' at ${CONAN_URL}." - fi - - name: list missing binaries - id: binaries - shell: bash - # Print the list of dependencies that would need to be built locally. - # A non-empty list means we have "failed" to cache binaries remotely. - run: | - echo missing=$(conan info . --build missing --settings build_type=${{ inputs.configuration }} --json 2>/dev/null | grep '^\[') | tee ${GITHUB_OUTPUT} + echo "Adding Conan remote 'xrplf' at ${{ env.CONAN_REMOTE_URL }}." + conan remote add --index 0 --force xrplf ${{ env.CONAN_REMOTE_URL }} + echo "Listing Conan remotes." + conan remote list - name: install dependencies shell: bash run: | - mkdir ${build_dir} - cd ${build_dir} + mkdir -p ${{ env.build_dir }} + cd ${{ env.build_dir }} conan install \ --output-folder . \ --build missing \ @@ -37,10 +29,10 @@ runs: --settings:all build_type=${{ inputs.configuration }} \ .. - name: upload dependencies - if: ${{ env.CONAN_URL != '' && env.CONAN_LOGIN_USERNAME_XRPLF != '' && env.CONAN_PASSWORD_XRPLF != '' && github.ref_type == 'branch' && github.ref_name == github.event.repository.default_branch }} + if: ${{ env.CONAN_REMOTE_URL != '' && env.CONAN_REMOTE_USERNAME != '' && env.CONAN_REMOTE_PASSWORD != '' && github.ref_type == 'branch' && github.ref_name == github.event.repository.default_branch }} shell: bash run: | - echo "Logging into Conan remote 'xrplf' at ${CONAN_URL}." - conan remote login xrplf "${{ env.CONAN_LOGIN_USERNAME_XRPLF }}" --password "${{ env.CONAN_PASSWORD_XRPLF }}" - echo "Uploading dependencies for configuration '${{ inputs.configuration }}'." - conan upload --all --confirm --remote xrplf . --settings build_type=${{ inputs.configuration }} + echo "Logging into Conan remote 'xrplf' at ${{ env.CONAN_REMOTE_URL }}." + conan remote login xrplf "${{ env.CONAN_REMOTE_USERNAME }}" --password "${{ env.CONAN_REMOTE_PASSWORD }}" + echo "Uploading dependencies." + conan upload '*' --confirm --check --remote xrplf diff --git a/.github/workflows/libxrpl.yml b/.github/workflows/libxrpl.yml index a8746fe297..8223b7996f 100644 --- a/.github/workflows/libxrpl.yml +++ b/.github/workflows/libxrpl.yml @@ -1,8 +1,8 @@ name: Check libXRPL compatibility with Clio env: - CONAN_URL: https://conan.ripplex.io - CONAN_LOGIN_USERNAME_XRPLF: ${{ secrets.CONAN_USERNAME }} - CONAN_PASSWORD_XRPLF: ${{ secrets.CONAN_TOKEN }} + CONAN_REMOTE_URL: ${{ vars.CONAN_REMOTE_URL }} + CONAN_LOGIN_USERNAME_XRPLF: ${{ secrets.CONAN_REMOTE_USERNAME }} + CONAN_PASSWORD_XRPLF: ${{ secrets.CONAN_REMOTE_PASSWORD }} on: pull_request: paths: @@ -46,10 +46,10 @@ jobs: - name: Add Conan remote shell: bash run: | + echo "Adding Conan remote 'xrplf' at ${{ env.CONAN_REMOTE_URL }}." + conan remote add xrplf ${{ env.CONAN_REMOTE_URL }} --insert 0 --force + echo "Listing Conan remotes." conan remote list - conan remote remove xrplf || true - # Do not quote the URL. An empty string will be accepted (with a non-fatal warning), but a missing argument will not. - conan remote add xrplf ${{ env.CONAN_URL }} --insert 0 - name: Parse new version id: version shell: bash diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index ee912fda40..6c6091cc2e 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -18,9 +18,12 @@ concurrency: # This part of Conan configuration is specific to this workflow only; we do not want # to pollute conan/profiles directory with settings which might not work for others env: - CONAN_URL: https://conan.ripplex.io - CONAN_LOGIN_USERNAME_XRPLF: ${{ secrets.CONAN_USERNAME }} - CONAN_PASSWORD_XRPLF: ${{ secrets.CONAN_TOKEN }} + CONAN_REMOTE_URL: ${{ vars.CONAN_REMOTE_URL }} + CONAN_REMOTE_USERNAME: ${{ secrets.CONAN_REMOTE_USERNAME }} + CONAN_REMOTE_PASSWORD: ${{ secrets.CONAN_REMOTE_PASSWORD }} + # This part of the Conan configuration is specific to this workflow only; we + # do not want to pollute the 'conan/profiles' directory with settings that + # might not work for other workflows. CONAN_GLOBAL_CONF: | core.download:parallel={{os.cpu_count()}} core.upload:parallel={{os.cpu_count()}} diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index c58c97364d..5beb5d291d 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -16,12 +16,13 @@ concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true -# This part of Conan configuration is specific to this workflow only; we do not want -# to pollute conan/profiles directory with settings which might not work for others env: - CONAN_URL: https://conan.ripplex.io - CONAN_LOGIN_USERNAME_XRPLF: ${{ secrets.CONAN_USERNAME }} - CONAN_PASSWORD_XRPLF: ${{ secrets.CONAN_TOKEN }} + CONAN_REMOTE_URL: ${{ vars.CONAN_REMOTE_URL }} + CONAN_REMOTE_USERNAME: ${{ secrets.CONAN_REMOTE_USERNAME }} + CONAN_REMOTE_PASSWORD: ${{ secrets.CONAN_REMOTE_PASSWORD }} + # This part of the Conan configuration is specific to this workflow only; we + # do not want to pollute the 'conan/profiles' directory with settings that + # might not work for other workflows. CONAN_GLOBAL_CONF: | core.download:parallel={{ os.cpu_count() }} core.upload:parallel={{ os.cpu_count() }} diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 0b3fa5ff09..9e2322b119 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -18,12 +18,13 @@ on: concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true -# This part of Conan configuration is specific to this workflow only; we do not want -# to pollute conan/profiles directory with settings which might not work for others env: - CONAN_URL: https://conan.ripplex.io - CONAN_LOGIN_USERNAME_XRPLF: ${{ secrets.CONAN_USERNAME }} - CONAN_PASSWORD_XRPLF: ${{ secrets.CONAN_TOKEN }} + CONAN_REMOTE_URL: ${{ vars.CONAN_REMOTE_URL }} + CONAN_REMOTE_USERNAME: ${{ secrets.CONAN_REMOTE_USERNAME }} + CONAN_REMOTE_PASSWORD: ${{ secrets.CONAN_REMOTE_PASSWORD }} + # This part of the Conan configuration is specific to this workflow only; we + # do not want to pollute the 'conan/profiles' directory with settings that + # might not work for other workflows. CONAN_GLOBAL_CONF: | core.download:parallel={{os.cpu_count()}} core.upload:parallel={{os.cpu_count()}} From 86ef16dbebda2f4dc9c367a08a0f2ca85c22ce1f Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Fri, 8 Aug 2025 17:13:32 -0400 Subject: [PATCH 110/244] Fix: Don't flag consensus as stalled prematurely (#5627) Fix stalled consensus detection to prevent false positives in situations where there are no disputed transactions. Stalled consensus detection was added to 2.5.0 in response to a network consensus halt that caused a round to run for over an hour. However, it has a flaw that makes it very easy to have false positives. Those false positives are usually mitigated by other checks that prevent them from having an effect, but there have been several instances of validators "running ahead" because there are circumstances where the other checks are "successful", allowing the stall state to be checked. --- src/test/consensus/Consensus_test.cpp | 183 ++++++++++++++++----- src/xrpld/app/consensus/RCLValidations.cpp | 2 +- src/xrpld/consensus/Consensus.cpp | 10 +- src/xrpld/consensus/Consensus.h | 24 ++- src/xrpld/consensus/DisputedTx.h | 27 ++- 5 files changed, 192 insertions(+), 54 deletions(-) diff --git a/src/test/consensus/Consensus_test.cpp b/src/test/consensus/Consensus_test.cpp index db56ab58c6..7899336a6f 100644 --- a/src/test/consensus/Consensus_test.cpp +++ b/src/test/consensus/Consensus_test.cpp @@ -1136,6 +1136,10 @@ public: ConsensusParms p; std::size_t peersUnchanged = 0; + auto logs = std::make_unique(beast::severities::kError); + auto j = logs->journal("Test"); + auto clog = std::make_unique(); + // Three cases: // 1 proposing, initial vote yes // 2 proposing, initial vote no @@ -1172,10 +1176,15 @@ public: BEAST_EXPECT(proposingFalse.getOurVote() == false); BEAST_EXPECT(followingTrue.getOurVote() == true); BEAST_EXPECT(followingFalse.getOurVote() == false); - BEAST_EXPECT(!proposingTrue.stalled(p, true, peersUnchanged)); - BEAST_EXPECT(!proposingFalse.stalled(p, true, peersUnchanged)); - BEAST_EXPECT(!followingTrue.stalled(p, false, peersUnchanged)); - BEAST_EXPECT(!followingFalse.stalled(p, false, peersUnchanged)); + BEAST_EXPECT( + !proposingTrue.stalled(p, true, peersUnchanged, j, clog)); + BEAST_EXPECT( + !proposingFalse.stalled(p, true, peersUnchanged, j, clog)); + BEAST_EXPECT( + !followingTrue.stalled(p, false, peersUnchanged, j, clog)); + BEAST_EXPECT( + !followingFalse.stalled(p, false, peersUnchanged, j, clog)); + BEAST_EXPECT(clog->str() == ""); // I'm in the majority, my vote should not change BEAST_EXPECT(!proposingTrue.updateVote(5, true, p)); @@ -1189,10 +1198,15 @@ public: BEAST_EXPECT(!followingFalse.updateVote(10, false, p)); peersUnchanged = 2; - BEAST_EXPECT(!proposingTrue.stalled(p, true, peersUnchanged)); - BEAST_EXPECT(!proposingFalse.stalled(p, true, peersUnchanged)); - BEAST_EXPECT(!followingTrue.stalled(p, false, peersUnchanged)); - BEAST_EXPECT(!followingFalse.stalled(p, false, peersUnchanged)); + BEAST_EXPECT( + !proposingTrue.stalled(p, true, peersUnchanged, j, clog)); + BEAST_EXPECT( + !proposingFalse.stalled(p, true, peersUnchanged, j, clog)); + BEAST_EXPECT( + !followingTrue.stalled(p, false, peersUnchanged, j, clog)); + BEAST_EXPECT( + !followingFalse.stalled(p, false, peersUnchanged, j, clog)); + BEAST_EXPECT(clog->str() == ""); // Right now, the vote is 51%. The requirement is about to jump to // 65% @@ -1282,10 +1296,15 @@ public: BEAST_EXPECT(followingFalse.getOurVote() == false); peersUnchanged = 3; - BEAST_EXPECT(!proposingTrue.stalled(p, true, peersUnchanged)); - BEAST_EXPECT(!proposingFalse.stalled(p, true, peersUnchanged)); - BEAST_EXPECT(!followingTrue.stalled(p, false, peersUnchanged)); - BEAST_EXPECT(!followingFalse.stalled(p, false, peersUnchanged)); + BEAST_EXPECT( + !proposingTrue.stalled(p, true, peersUnchanged, j, clog)); + BEAST_EXPECT( + !proposingFalse.stalled(p, true, peersUnchanged, j, clog)); + BEAST_EXPECT( + !followingTrue.stalled(p, false, peersUnchanged, j, clog)); + BEAST_EXPECT( + !followingFalse.stalled(p, false, peersUnchanged, j, clog)); + BEAST_EXPECT(clog->str() == ""); // Threshold jumps to 95% BEAST_EXPECT(proposingTrue.updateVote(220, true, p)); @@ -1322,12 +1341,60 @@ public: for (peersUnchanged = 0; peersUnchanged < 6; ++peersUnchanged) { - BEAST_EXPECT(!proposingTrue.stalled(p, true, peersUnchanged)); - BEAST_EXPECT(!proposingFalse.stalled(p, true, peersUnchanged)); - BEAST_EXPECT(!followingTrue.stalled(p, false, peersUnchanged)); - BEAST_EXPECT(!followingFalse.stalled(p, false, peersUnchanged)); + BEAST_EXPECT( + !proposingTrue.stalled(p, true, peersUnchanged, j, clog)); + BEAST_EXPECT( + !proposingFalse.stalled(p, true, peersUnchanged, j, clog)); + BEAST_EXPECT( + !followingTrue.stalled(p, false, peersUnchanged, j, clog)); + BEAST_EXPECT( + !followingFalse.stalled(p, false, peersUnchanged, j, clog)); + BEAST_EXPECT(clog->str() == ""); } + auto expectStalled = [this, &clog]( + int txid, + bool ourVote, + int ourTime, + int peerTime, + int support, + std::uint32_t line) { + using namespace std::string_literals; + + auto const s = clog->str(); + expect(s.find("stalled"), s, __FILE__, line); + expect( + s.starts_with("Transaction "s + std::to_string(txid)), + s, + __FILE__, + line); + expect( + s.find("voting "s + (ourVote ? "YES" : "NO")) != s.npos, + s, + __FILE__, + line); + expect( + s.find("for "s + std::to_string(ourTime) + " rounds."s) != + s.npos, + s, + __FILE__, + line); + expect( + s.find( + "votes in "s + std::to_string(peerTime) + " rounds.") != + s.npos, + s, + __FILE__, + line); + expect( + s.ends_with( + "has "s + std::to_string(support) + "% support. "s), + s, + __FILE__, + line); + clog = std::make_unique(); + }; + for (int i = 0; i < 1; ++i) { BEAST_EXPECT(!proposingTrue.updateVote(250 + 10 * i, true, p)); @@ -1342,22 +1409,34 @@ public: BEAST_EXPECT(followingFalse.getOurVote() == false); // true vote has changed recently, so not stalled - BEAST_EXPECT(!proposingTrue.stalled(p, true, 0)); + BEAST_EXPECT(!proposingTrue.stalled(p, true, 0, j, clog)); + BEAST_EXPECT(clog->str() == ""); // remaining votes have been unchanged in so long that we only // need to hit the second round at 95% to be stalled, regardless // of peers - BEAST_EXPECT(proposingFalse.stalled(p, true, 0)); - BEAST_EXPECT(followingTrue.stalled(p, false, 0)); - BEAST_EXPECT(followingFalse.stalled(p, false, 0)); + BEAST_EXPECT(proposingFalse.stalled(p, true, 0, j, clog)); + expectStalled(98, false, 11, 0, 2, __LINE__); + BEAST_EXPECT(followingTrue.stalled(p, false, 0, j, clog)); + expectStalled(97, true, 11, 0, 97, __LINE__); + BEAST_EXPECT(followingFalse.stalled(p, false, 0, j, clog)); + expectStalled(96, false, 11, 0, 3, __LINE__); // true vote has changed recently, so not stalled - BEAST_EXPECT(!proposingTrue.stalled(p, true, peersUnchanged)); + BEAST_EXPECT( + !proposingTrue.stalled(p, true, peersUnchanged, j, clog)); + BEAST_EXPECTS(clog->str() == "", clog->str()); // remaining votes have been unchanged in so long that we only // need to hit the second round at 95% to be stalled, regardless // of peers - BEAST_EXPECT(proposingFalse.stalled(p, true, peersUnchanged)); - BEAST_EXPECT(followingTrue.stalled(p, false, peersUnchanged)); - BEAST_EXPECT(followingFalse.stalled(p, false, peersUnchanged)); + BEAST_EXPECT( + proposingFalse.stalled(p, true, peersUnchanged, j, clog)); + expectStalled(98, false, 11, 6, 2, __LINE__); + BEAST_EXPECT( + followingTrue.stalled(p, false, peersUnchanged, j, clog)); + expectStalled(97, true, 11, 6, 97, __LINE__); + BEAST_EXPECT( + followingFalse.stalled(p, false, peersUnchanged, j, clog)); + expectStalled(96, false, 11, 6, 3, __LINE__); } for (int i = 1; i < 3; ++i) { @@ -1374,19 +1453,31 @@ public: // true vote changed 2 rounds ago, and peers are changing, so // not stalled - BEAST_EXPECT(!proposingTrue.stalled(p, true, 0)); + BEAST_EXPECT(!proposingTrue.stalled(p, true, 0, j, clog)); + BEAST_EXPECTS(clog->str() == "", clog->str()); // still stalled - BEAST_EXPECT(proposingFalse.stalled(p, true, 0)); - BEAST_EXPECT(followingTrue.stalled(p, false, 0)); - BEAST_EXPECT(followingFalse.stalled(p, false, 0)); + BEAST_EXPECT(proposingFalse.stalled(p, true, 0, j, clog)); + expectStalled(98, false, 11 + i, 0, 2, __LINE__); + BEAST_EXPECT(followingTrue.stalled(p, false, 0, j, clog)); + expectStalled(97, true, 11 + i, 0, 97, __LINE__); + BEAST_EXPECT(followingFalse.stalled(p, false, 0, j, clog)); + expectStalled(96, false, 11 + i, 0, 3, __LINE__); // true vote changed 2 rounds ago, and peers are NOT changing, // so stalled - BEAST_EXPECT(proposingTrue.stalled(p, true, peersUnchanged)); + BEAST_EXPECT( + proposingTrue.stalled(p, true, peersUnchanged, j, clog)); + expectStalled(99, true, 1 + i, 6, 97, __LINE__); // still stalled - BEAST_EXPECT(proposingFalse.stalled(p, true, peersUnchanged)); - BEAST_EXPECT(followingTrue.stalled(p, false, peersUnchanged)); - BEAST_EXPECT(followingFalse.stalled(p, false, peersUnchanged)); + BEAST_EXPECT( + proposingFalse.stalled(p, true, peersUnchanged, j, clog)); + expectStalled(98, false, 11 + i, 6, 2, __LINE__); + BEAST_EXPECT( + followingTrue.stalled(p, false, peersUnchanged, j, clog)); + expectStalled(97, true, 11 + i, 6, 97, __LINE__); + BEAST_EXPECT( + followingFalse.stalled(p, false, peersUnchanged, j, clog)); + expectStalled(96, false, 11 + i, 6, 3, __LINE__); } for (int i = 3; i < 5; ++i) { @@ -1401,15 +1492,27 @@ public: BEAST_EXPECT(followingTrue.getOurVote() == true); BEAST_EXPECT(followingFalse.getOurVote() == false); - BEAST_EXPECT(proposingTrue.stalled(p, true, 0)); - BEAST_EXPECT(proposingFalse.stalled(p, true, 0)); - BEAST_EXPECT(followingTrue.stalled(p, false, 0)); - BEAST_EXPECT(followingFalse.stalled(p, false, 0)); + BEAST_EXPECT(proposingTrue.stalled(p, true, 0, j, clog)); + expectStalled(99, true, 1 + i, 0, 97, __LINE__); + BEAST_EXPECT(proposingFalse.stalled(p, true, 0, j, clog)); + expectStalled(98, false, 11 + i, 0, 2, __LINE__); + BEAST_EXPECT(followingTrue.stalled(p, false, 0, j, clog)); + expectStalled(97, true, 11 + i, 0, 97, __LINE__); + BEAST_EXPECT(followingFalse.stalled(p, false, 0, j, clog)); + expectStalled(96, false, 11 + i, 0, 3, __LINE__); - BEAST_EXPECT(proposingTrue.stalled(p, true, peersUnchanged)); - BEAST_EXPECT(proposingFalse.stalled(p, true, peersUnchanged)); - BEAST_EXPECT(followingTrue.stalled(p, false, peersUnchanged)); - BEAST_EXPECT(followingFalse.stalled(p, false, peersUnchanged)); + BEAST_EXPECT( + proposingTrue.stalled(p, true, peersUnchanged, j, clog)); + expectStalled(99, true, 1 + i, 6, 97, __LINE__); + BEAST_EXPECT( + proposingFalse.stalled(p, true, peersUnchanged, j, clog)); + expectStalled(98, false, 11 + i, 6, 2, __LINE__); + BEAST_EXPECT( + followingTrue.stalled(p, false, peersUnchanged, j, clog)); + expectStalled(97, true, 11 + i, 6, 97, __LINE__); + BEAST_EXPECT( + followingFalse.stalled(p, false, peersUnchanged, j, clog)); + expectStalled(96, false, 11 + i, 6, 3, __LINE__); } } } diff --git a/src/xrpld/app/consensus/RCLValidations.cpp b/src/xrpld/app/consensus/RCLValidations.cpp index a04047c78a..5305c95357 100644 --- a/src/xrpld/app/consensus/RCLValidations.cpp +++ b/src/xrpld/app/consensus/RCLValidations.cpp @@ -136,7 +136,7 @@ RCLValidationsAdaptor::acquire(LedgerHash const& hash) if (!ledger) { - JLOG(j_.debug()) + JLOG(j_.warn()) << "Need validated ledger for preferred ledger analysis " << hash; Application* pApp = &app_; diff --git a/src/xrpld/consensus/Consensus.cpp b/src/xrpld/consensus/Consensus.cpp index fb57687df0..d4edb1445c 100644 --- a/src/xrpld/consensus/Consensus.cpp +++ b/src/xrpld/consensus/Consensus.cpp @@ -139,11 +139,11 @@ checkConsensusReached( return false; } - // We only get stalled when every disputed transaction unequivocally has 80% - // (minConsensusPct) agreement, either for or against. That is: either under - // 20% or over 80% consensus (repectively "nay" or "yay"). This prevents - // manipulation by a minority of byzantine peers of which transactions make - // the cut to get into the ledger. + // We only get stalled when there are disputed transactions and all of them + // unequivocally have 80% (minConsensusPct) agreement, either for or + // against. That is: either under 20% or over 80% consensus (repectively + // "nay" or "yay"). This prevents manipulation by a minority of byzantine + // peers of which transactions make the cut to get into the ledger. if (stalled) { CLOG(clog) << "consensus stalled. "; diff --git a/src/xrpld/consensus/Consensus.h b/src/xrpld/consensus/Consensus.h index f3265cf381..df6cedccff 100644 --- a/src/xrpld/consensus/Consensus.h +++ b/src/xrpld/consensus/Consensus.h @@ -84,8 +84,8 @@ shouldCloseLedger( agree @param stalled the network appears to be stalled, where neither we nor our peers have changed their vote on any disputes in a - while. This is undesirable, and will cause us to end consensus - without 80% agreement. + while. This is undesirable, and should be rare, and will cause us to + end consensus without 80% agreement. @param parms Consensus constant parameters @param proposing whether we should count ourselves @param j journal for logging @@ -1712,15 +1712,29 @@ Consensus::haveConsensus( << ", disagree=" << disagree; ConsensusParms const& parms = adaptor_.parms(); - // Stalling is BAD + // Stalling is BAD. It means that we have a consensus on the close time, so + // peers are talking, but we have disputed transactions that peers are + // unable or unwilling to come to agreement on one way or the other. bool const stalled = haveCloseTimeConsensus_ && + !result_->disputes.empty() && std::ranges::all_of(result_->disputes, - [this, &parms](auto const& dispute) { + [this, &parms, &clog](auto const& dispute) { return dispute.second.stalled( parms, mode_.get() == ConsensusMode::proposing, - peerUnchangedCounter_); + peerUnchangedCounter_, + j_, + clog); }); + if (stalled) + { + std::stringstream ss; + ss << "Consensus detects as stalled with " << (agree + disagree) << "/" + << prevProposers_ << " proposers, and " << result_->disputes.size() + << " stalled disputed transactions."; + JLOG(j_.error()) << ss.str(); + CLOG(clog) << ss.str(); + } // Determine if we actually have consensus or not result_->state = checkConsensus( diff --git a/src/xrpld/consensus/DisputedTx.h b/src/xrpld/consensus/DisputedTx.h index 4ed31b77ca..e774c8366c 100644 --- a/src/xrpld/consensus/DisputedTx.h +++ b/src/xrpld/consensus/DisputedTx.h @@ -85,7 +85,12 @@ public: //! Are we and our peers "stalled" where we probably won't change //! our vote? bool - stalled(ConsensusParms const& p, bool proposing, int peersUnchanged) const + stalled( + ConsensusParms const& p, + bool proposing, + int peersUnchanged, + beast::Journal j, + std::unique_ptr const& clog) const { // at() can throw, but the map is built by hand to ensure all valid // values are available. @@ -123,8 +128,24 @@ public: int const weight = support / total; // Returns true if the tx has more than minCONSENSUS_PCT (80) percent // agreement. Either voting for _or_ voting against the tx. - return weight > p.minCONSENSUS_PCT || - weight < (100 - p.minCONSENSUS_PCT); + bool const stalled = + weight > p.minCONSENSUS_PCT || weight < (100 - p.minCONSENSUS_PCT); + + if (stalled) + { + // stalling is an error condition for even a single + // transaction. + std::stringstream s; + s << "Transaction " << ID() << " is stalled. We have been voting " + << (getOurVote() ? "YES" : "NO") << " for " << currentVoteCounter_ + << " rounds. Peers have not changed their votes in " + << peersUnchanged << " rounds. The transaction has " << weight + << "% support. "; + JLOG(j_.error()) << s.str(); + CLOG(clog) << s.str(); + } + + return stalled; } //! The disputed transaction. From b40a3684ae6be57005b8916a8b13a2c4fa0d1b8a Mon Sep 17 00:00:00 2001 From: Jingchen Date: Mon, 11 Aug 2025 11:21:26 +0100 Subject: [PATCH 111/244] perf: Optimize hash performance by avoiding allocating hash state object (#5469) We're currently calling `XXH3_createState` and `XXH3_freeState` when hashing an object. However, it may be slow because they call `malloc` and `free`, which may affect the performance. This change avoids the use of the streaming API as much as possible by using an internal buffer. --- include/xrpl/beast/hash/xxhasher.h | 120 ++++++++++++--- src/test/beast/xxhasher_test.cpp | 232 +++++++++++++++++++++++++++++ 2 files changed, 331 insertions(+), 21 deletions(-) create mode 100644 src/test/beast/xxhasher_test.cpp diff --git a/include/xrpl/beast/hash/xxhasher.h b/include/xrpl/beast/hash/xxhasher.h index 381980902a..9cd343f544 100644 --- a/include/xrpl/beast/hash/xxhasher.h +++ b/include/xrpl/beast/hash/xxhasher.h @@ -24,32 +24,110 @@ #include +#include #include -#include -#include +#include +#include +#include namespace beast { class xxhasher { -private: - // requires 64-bit std::size_t - static_assert(sizeof(std::size_t) == 8, ""); +public: + using result_type = std::size_t; - XXH3_state_t* state_; +private: + static_assert(sizeof(std::size_t) == 8, "requires 64-bit std::size_t"); + // Have an internal buffer to avoid the streaming API + // A 64-byte buffer should to be big enough for us + static constexpr std::size_t INTERNAL_BUFFER_SIZE = 64; + + alignas(64) std::array buffer_; + std::span readBuffer_; + std::span writeBuffer_; + + std::optional seed_; + XXH3_state_t* state_ = nullptr; + + void + resetBuffers() + { + writeBuffer_ = std::span{buffer_}; + readBuffer_ = {}; + } + + void + updateHash(void const* data, std::size_t len) + { + if (writeBuffer_.size() < len) + { + flushToState(data, len); + } + else + { + std::memcpy(writeBuffer_.data(), data, len); + writeBuffer_ = writeBuffer_.subspan(len); + readBuffer_ = std::span{ + std::begin(buffer_), buffer_.size() - writeBuffer_.size()}; + } + } static XXH3_state_t* allocState() { auto ret = XXH3_createState(); if (ret == nullptr) - throw std::bad_alloc(); + throw std::bad_alloc(); // LCOV_EXCL_LINE return ret; } -public: - using result_type = std::size_t; + void + flushToState(void const* data, std::size_t len) + { + if (!state_) + { + state_ = allocState(); + if (seed_.has_value()) + { + XXH3_64bits_reset_withSeed(state_, *seed_); + } + else + { + XXH3_64bits_reset(state_); + } + } + XXH3_64bits_update(state_, readBuffer_.data(), readBuffer_.size()); + resetBuffers(); + if (data && len) + { + XXH3_64bits_update(state_, data, len); + } + } + result_type + retrieveHash() + { + if (state_) + { + flushToState(nullptr, 0); + return XXH3_64bits_digest(state_); + } + else + { + if (seed_.has_value()) + { + return XXH3_64bits_withSeed( + readBuffer_.data(), readBuffer_.size(), *seed_); + } + else + { + return XXH3_64bits(readBuffer_.data(), readBuffer_.size()); + } + } + } + +public: static constexpr auto const endian = boost::endian::order::native; xxhasher(xxhasher const&) = delete; @@ -58,43 +136,43 @@ public: xxhasher() { - state_ = allocState(); - XXH3_64bits_reset(state_); + resetBuffers(); } ~xxhasher() noexcept { - XXH3_freeState(state_); + if (state_) + { + XXH3_freeState(state_); + } } template < class Seed, std::enable_if_t::value>* = nullptr> - explicit xxhasher(Seed seed) + explicit xxhasher(Seed seed) : seed_(seed) { - state_ = allocState(); - XXH3_64bits_reset_withSeed(state_, seed); + resetBuffers(); } template < class Seed, std::enable_if_t::value>* = nullptr> - xxhasher(Seed seed, Seed) + xxhasher(Seed seed, Seed) : seed_(seed) { - state_ = allocState(); - XXH3_64bits_reset_withSeed(state_, seed); + resetBuffers(); } void operator()(void const* key, std::size_t len) noexcept { - XXH3_64bits_update(state_, key, len); + updateHash(key, len); } explicit - operator std::size_t() noexcept + operator result_type() noexcept { - return XXH3_64bits_digest(state_); + return retrieveHash(); } }; diff --git a/src/test/beast/xxhasher_test.cpp b/src/test/beast/xxhasher_test.cpp new file mode 100644 index 0000000000..6c65fea601 --- /dev/null +++ b/src/test/beast/xxhasher_test.cpp @@ -0,0 +1,232 @@ +//------------------------------------------------------------------------------ +/* +This file is part of rippled: https://github.com/ripple/rippled +Copyright (c) 2025 Ripple Labs Inc. + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +namespace beast { + +class XXHasher_test : public unit_test::suite +{ +public: + void + testWithoutSeed() + { + testcase("Without seed"); + + xxhasher hasher{}; + + std::string objectToHash{"Hello, xxHash!"}; + hasher(objectToHash.data(), objectToHash.size()); + + BEAST_EXPECT( + static_cast(hasher) == + 16042857369214894119ULL); + } + + void + testWithSeed() + { + testcase("With seed"); + + xxhasher hasher{static_cast(102)}; + + std::string objectToHash{"Hello, xxHash!"}; + hasher(objectToHash.data(), objectToHash.size()); + + BEAST_EXPECT( + static_cast(hasher) == + 14440132435660934800ULL); + } + + void + testWithTwoSeeds() + { + testcase("With two seeds"); + xxhasher hasher{ + static_cast(102), static_cast(103)}; + + std::string objectToHash{"Hello, xxHash!"}; + hasher(objectToHash.data(), objectToHash.size()); + + BEAST_EXPECT( + static_cast(hasher) == + 14440132435660934800ULL); + } + + void + testBigObjectWithMultiupleSmallUpdatesWithoutSeed() + { + testcase("Big object with multiple small updates without seed"); + xxhasher hasher{}; + + std::string objectToHash{"Hello, xxHash!"}; + for (int i = 0; i < 100; i++) + { + hasher(objectToHash.data(), objectToHash.size()); + } + + BEAST_EXPECT( + static_cast(hasher) == + 15296278154063476002ULL); + } + + void + testBigObjectWithMultiupleSmallUpdatesWithSeed() + { + testcase("Big object with multiple small updates with seed"); + xxhasher hasher{static_cast(103)}; + + std::string objectToHash{"Hello, xxHash!"}; + for (int i = 0; i < 100; i++) + { + hasher(objectToHash.data(), objectToHash.size()); + } + + BEAST_EXPECT( + static_cast(hasher) == + 17285302196561698791ULL); + } + + void + testBigObjectWithSmallAndBigUpdatesWithoutSeed() + { + testcase("Big object with small and big updates without seed"); + xxhasher hasher{}; + + std::string objectToHash{"Hello, xxHash!"}; + std::string bigObject; + for (int i = 0; i < 20; i++) + { + bigObject += "Hello, xxHash!"; + } + hasher(objectToHash.data(), objectToHash.size()); + hasher(bigObject.data(), bigObject.size()); + hasher(objectToHash.data(), objectToHash.size()); + + BEAST_EXPECT( + static_cast(hasher) == + 1865045178324729219ULL); + } + + void + testBigObjectWithSmallAndBigUpdatesWithSeed() + { + testcase("Big object with small and big updates with seed"); + xxhasher hasher{static_cast(103)}; + + std::string objectToHash{"Hello, xxHash!"}; + std::string bigObject; + for (int i = 0; i < 20; i++) + { + bigObject += "Hello, xxHash!"; + } + hasher(objectToHash.data(), objectToHash.size()); + hasher(bigObject.data(), bigObject.size()); + hasher(objectToHash.data(), objectToHash.size()); + + BEAST_EXPECT( + static_cast(hasher) == + 16189862915636005281ULL); + } + + void + testBigObjectWithOneUpdateWithoutSeed() + { + testcase("Big object with one update without seed"); + xxhasher hasher{}; + + std::string objectToHash; + for (int i = 0; i < 100; i++) + { + objectToHash += "Hello, xxHash!"; + } + hasher(objectToHash.data(), objectToHash.size()); + + BEAST_EXPECT( + static_cast(hasher) == + 15296278154063476002ULL); + } + + void + testBigObjectWithOneUpdateWithSeed() + { + testcase("Big object with one update with seed"); + xxhasher hasher{static_cast(103)}; + + std::string objectToHash; + for (int i = 0; i < 100; i++) + { + objectToHash += "Hello, xxHash!"; + } + hasher(objectToHash.data(), objectToHash.size()); + + BEAST_EXPECT( + static_cast(hasher) == + 17285302196561698791ULL); + } + + void + testOperatorResultTypeDoesNotChangeInternalState() + { + testcase("Operator result type doesn't change the internal state"); + { + xxhasher hasher; + + std::string object{"Hello xxhash"}; + hasher(object.data(), object.size()); + auto xxhashResult1 = static_cast(hasher); + auto xxhashResult2 = static_cast(hasher); + + BEAST_EXPECT(xxhashResult1 == xxhashResult2); + } + { + xxhasher hasher; + + std::string object; + for (int i = 0; i < 100; i++) + { + object += "Hello, xxHash!"; + } + hasher(object.data(), object.size()); + auto xxhashResult1 = hasher.operator xxhasher::result_type(); + auto xxhashResult2 = hasher.operator xxhasher::result_type(); + + BEAST_EXPECT(xxhashResult1 == xxhashResult2); + } + } + + void + run() override + { + testWithoutSeed(); + testWithSeed(); + testWithTwoSeeds(); + testBigObjectWithMultiupleSmallUpdatesWithoutSeed(); + testBigObjectWithMultiupleSmallUpdatesWithSeed(); + testBigObjectWithSmallAndBigUpdatesWithoutSeed(); + testBigObjectWithSmallAndBigUpdatesWithSeed(); + testBigObjectWithOneUpdateWithoutSeed(); + testBigObjectWithOneUpdateWithSeed(); + testOperatorResultTypeDoesNotChangeInternalState(); + } +}; + +BEAST_DEFINE_TESTSUITE(XXHasher, beast_core, beast); +} // namespace beast From bdfc3769513d0b6735b97ce4f29cc0351db04dae Mon Sep 17 00:00:00 2001 From: Bart Date: Mon, 11 Aug 2025 07:24:24 -0400 Subject: [PATCH 112/244] chore: Cleanup bin/ directory (#5660) This change removes ancient and unused files from the `bin/` directory. --- bin/browser.js | 470 ---------------------------------------- bin/debug_local_sign.js | 64 ------ bin/email_hash.js | 18 -- bin/flash_policy.js | 31 --- bin/getRippledInfo | 150 ------------- bin/hexify.js | 23 -- bin/jsonrpc_request.js | 42 ---- bin/jsonrpc_server.js | 68 ------ bin/physical.sh | 218 ------------------- bin/rlint.js | 252 --------------------- bin/sh/install-vcpkg.sh | 51 ----- bin/sh/setup-msvc.sh | 40 ---- bin/start_sync_stop.py | 246 --------------------- bin/stop-test.js | 133 ------------ bin/update_binformat.js | 119 ---------- conanfile.py | 1 - 16 files changed, 1926 deletions(-) delete mode 100755 bin/browser.js delete mode 100644 bin/debug_local_sign.js delete mode 100755 bin/email_hash.js delete mode 100755 bin/flash_policy.js delete mode 100755 bin/getRippledInfo delete mode 100755 bin/hexify.js delete mode 100755 bin/jsonrpc_request.js delete mode 100755 bin/jsonrpc_server.js delete mode 100755 bin/physical.sh delete mode 100755 bin/rlint.js delete mode 100755 bin/sh/install-vcpkg.sh delete mode 100755 bin/sh/setup-msvc.sh delete mode 100644 bin/start_sync_stop.py delete mode 100644 bin/stop-test.js delete mode 100644 bin/update_binformat.js diff --git a/bin/browser.js b/bin/browser.js deleted file mode 100755 index 81618bd002..0000000000 --- a/bin/browser.js +++ /dev/null @@ -1,470 +0,0 @@ -#!/usr/bin/node -// -// ledger?l=L -// transaction?h=H -// ledger_entry?l=L&h=H -// account?l=L&a=A -// directory?l=L&dir_root=H&i=I -// directory?l=L&o=A&i=I // owner directory -// offer?l=L&offer=H -// offer?l=L&account=A&i=I -// ripple_state=l=L&a=A&b=A&c=C -// account_lines?l=L&a=A -// -// A=address -// C=currency 3 letter code -// H=hash -// I=index -// L=current | closed | validated | index | hash -// - -var async = require("async"); -var extend = require("extend"); -var http = require("http"); -var url = require("url"); - -var Remote = require("ripple-lib").Remote; - -var program = process.argv[1]; - -var httpd_response = function (res, opts) { - var self=this; - - res.statusCode = opts.statusCode; - res.end( - "" - + "Title" - + "" - + "State:" + self.state - + "" - + (opts.body || '') - + '
'
-      + (opts.url || '')
-      + '
' - + "" - + "" - ); -}; - -var html_link = function (generic) { - return '' + generic + ''; -}; - -// Build a link to a type. -var build_uri = function (params, opts) { - var c; - - if (params.type === 'account') { - c = { - pathname: 'account', - query: { - l: params.ledger, - a: params.account, - }, - }; - - } else if (params.type === 'ledger') { - c = { - pathname: 'ledger', - query: { - l: params.ledger, - }, - }; - - } else if (params.type === 'transaction') { - c = { - pathname: 'transaction', - query: { - h: params.hash, - }, - }; - } else { - c = {}; - } - - opts = opts || {}; - - c.protocol = "http"; - c.hostname = opts.hostname || self.base.hostname; - c.port = opts.port || self.base.port; - - return url.format(c); -}; - -var build_link = function (item, link) { -console.log(link); - return "" + item + ""; -}; - -var rewrite_field = function (type, obj, field, opts) { - if (field in obj) { - obj[field] = rewrite_type(type, obj[field], opts); - } -}; - -var rewrite_type = function (type, obj, opts) { - if ('amount' === type) { - if ('string' === typeof obj) { - // XRP. - return '' + obj + ''; - - } else { - rewrite_field('address', obj, 'issuer', opts); - - return obj; - } - return build_link( - obj, - build_uri({ - type: 'account', - account: obj - }, opts) - ); - } - if ('address' === type) { - return build_link( - obj, - build_uri({ - type: 'account', - account: obj - }, opts) - ); - } - else if ('ledger' === type) { - return build_link( - obj, - build_uri({ - type: 'ledger', - ledger: obj, - }, opts) - ); - } - else if ('node' === type) { - // A node - if ('PreviousTxnID' in obj) - obj.PreviousTxnID = rewrite_type('transaction', obj.PreviousTxnID, opts); - - if ('Offer' === obj.LedgerEntryType) { - if ('NewFields' in obj) { - if ('TakerGets' in obj.NewFields) - obj.NewFields.TakerGets = rewrite_type('amount', obj.NewFields.TakerGets, opts); - - if ('TakerPays' in obj.NewFields) - obj.NewFields.TakerPays = rewrite_type('amount', obj.NewFields.TakerPays, opts); - } - } - - obj.LedgerEntryType = '' + obj.LedgerEntryType + ''; - - return obj; - } - else if ('transaction' === type) { - // Reference to a transaction. - return build_link( - obj, - build_uri({ - type: 'transaction', - hash: obj - }, opts) - ); - } - - return 'ERROR: ' + type; -}; - -var rewrite_object = function (obj, opts) { - var out = extend({}, obj); - - rewrite_field('address', out, 'Account', opts); - - rewrite_field('ledger', out, 'parent_hash', opts); - rewrite_field('ledger', out, 'ledger_index', opts); - rewrite_field('ledger', out, 'ledger_current_index', opts); - rewrite_field('ledger', out, 'ledger_hash', opts); - - if ('ledger' in obj) { - // It's a ledger header. - out.ledger = rewrite_object(out.ledger, opts); - - if ('ledger_hash' in out.ledger) - out.ledger.ledger_hash = '' + out.ledger.ledger_hash + ''; - - delete out.ledger.hash; - delete out.ledger.totalCoins; - } - - if ('TransactionType' in obj) { - // It's a transaction. - out.TransactionType = '' + obj.TransactionType + ''; - - rewrite_field('amount', out, 'TakerGets', opts); - rewrite_field('amount', out, 'TakerPays', opts); - rewrite_field('ledger', out, 'inLedger', opts); - - out.meta.AffectedNodes = out.meta.AffectedNodes.map(function (node) { - var kind = 'CreatedNode' in node - ? 'CreatedNode' - : 'ModifiedNode' in node - ? 'ModifiedNode' - : 'DeletedNode' in node - ? 'DeletedNode' - : undefined; - - if (kind) { - node[kind] = rewrite_type('node', node[kind], opts); - } - return node; - }); - } - else if ('node' in obj && 'LedgerEntryType' in obj.node) { - // Its a ledger entry. - - if (obj.node.LedgerEntryType === 'AccountRoot') { - rewrite_field('address', out.node, 'Account', opts); - rewrite_field('transaction', out.node, 'PreviousTxnID', opts); - rewrite_field('ledger', out.node, 'PreviousTxnLgrSeq', opts); - } - - out.node.LedgerEntryType = '' + out.node.LedgerEntryType + ''; - } - - return out; -}; - -var augment_object = function (obj, opts, done) { - if (obj.node.LedgerEntryType == 'AccountRoot') { - var tx_hash = obj.node.PreviousTxnID; - var tx_ledger = obj.node.PreviousTxnLgrSeq; - - obj.history = []; - - async.whilst( - function () { return tx_hash; }, - function (callback) { -// console.log("augment_object: request: %s %s", tx_hash, tx_ledger); - opts.remote.request_tx(tx_hash) - .on('success', function (m) { - tx_hash = undefined; - tx_ledger = undefined; - -//console.log("augment_object: ", JSON.stringify(m)); - m.meta.AffectedNodes.filter(function(n) { -// console.log("augment_object: ", JSON.stringify(n)); -// if (n.ModifiedNode) -// console.log("augment_object: %s %s %s %s %s %s/%s", 'ModifiedNode' in n, n.ModifiedNode && (n.ModifiedNode.LedgerEntryType === 'AccountRoot'), n.ModifiedNode && n.ModifiedNode.FinalFields && (n.ModifiedNode.FinalFields.Account === obj.node.Account), Object.keys(n)[0], n.ModifiedNode && (n.ModifiedNode.LedgerEntryType), obj.node.Account, n.ModifiedNode && n.ModifiedNode.FinalFields && n.ModifiedNode.FinalFields.Account); -// if ('ModifiedNode' in n && n.ModifiedNode.LedgerEntryType === 'AccountRoot') -// { -// console.log("***: ", JSON.stringify(m)); -// console.log("***: ", JSON.stringify(n)); -// } - return 'ModifiedNode' in n - && n.ModifiedNode.LedgerEntryType === 'AccountRoot' - && n.ModifiedNode.FinalFields - && n.ModifiedNode.FinalFields.Account === obj.node.Account; - }) - .forEach(function (n) { - tx_hash = n.ModifiedNode.PreviousTxnID; - tx_ledger = n.ModifiedNode.PreviousTxnLgrSeq; - - obj.history.push({ - tx_hash: tx_hash, - tx_ledger: tx_ledger - }); -console.log("augment_object: next: %s %s", tx_hash, tx_ledger); - }); - - callback(); - }) - .on('error', function (m) { - callback(m); - }) - .request(); - }, - function (err) { - if (err) { - done(); - } - else { - async.forEach(obj.history, function (o, callback) { - opts.remote.request_account_info(obj.node.Account) - .ledger_index(o.tx_ledger) - .on('success', function (m) { -//console.log("augment_object: ", JSON.stringify(m)); - o.Balance = m.account_data.Balance; -// o.account_data = m.account_data; - callback(); - }) - .on('error', function (m) { - o.error = m; - callback(); - }) - .request(); - }, - function (err) { - done(err); - }); - } - }); - } - else { - done(); - } -}; - -if (process.argv.length < 4 || process.argv.length > 7) { - console.log("Usage: %s ws_ip ws_port [ [ []]]", program); -} -else { - var ws_ip = process.argv[2]; - var ws_port = process.argv[3]; - var ip = process.argv.length > 4 ? process.argv[4] : "127.0.0.1"; - var port = process.argv.length > 5 ? process.argv[5] : "8080"; - -// console.log("START"); - var self = this; - - var remote = (new Remote({ - websocket_ip: ws_ip, - websocket_port: ws_port, - trace: false - })) - .on('state', function (m) { - console.log("STATE: %s", m); - - self.state = m; - }) -// .once('ledger_closed', callback) - .connect() - ; - - self.base = { - hostname: ip, - port: port, - remote: remote, - }; - -// console.log("SERVE"); - var server = http.createServer(function (req, res) { - var input = ""; - - req.setEncoding(); - - req.on('data', function (buffer) { - // console.log("DATA: %s", buffer); - input = input + buffer; - }); - - req.on('end', function () { - // console.log("URL: %s", req.url); - // console.log("HEADERS: %s", JSON.stringify(req.headers, undefined, 2)); - - var _parsed = url.parse(req.url, true); - var _url = JSON.stringify(_parsed, undefined, 2); - - // console.log("HEADERS: %s", JSON.stringify(_parsed, undefined, 2)); - if (_parsed.pathname === "/account") { - var request = remote - .request_ledger_entry('account_root') - .ledger_index(-1) - .account_root(_parsed.query.a) - .on('success', function (m) { - // console.log("account_root: %s", JSON.stringify(m, undefined, 2)); - - augment_object(m, self.base, function() { - httpd_response(res, - { - statusCode: 200, - url: _url, - body: "
"
-                              + JSON.stringify(rewrite_object(m, self.base), undefined, 2)
-                              + "
" - }); - }); - }) - .request(); - - } else if (_parsed.pathname === "/ledger") { - var request = remote - .request_ledger(undefined, { expand: true, transactions: true }) - .on('success', function (m) { - // console.log("Ledger: %s", JSON.stringify(m, undefined, 2)); - - httpd_response(res, - { - statusCode: 200, - url: _url, - body: "
"
-                          + JSON.stringify(rewrite_object(m, self.base), undefined, 2)
-                          +"
" - }); - }) - - if (_parsed.query.l && _parsed.query.l.length === 64) { - request.ledger_hash(_parsed.query.l); - } - else if (_parsed.query.l) { - request.ledger_index(Number(_parsed.query.l)); - } - else { - request.ledger_index(-1); - } - - request.request(); - - } else if (_parsed.pathname === "/transaction") { - var request = remote - .request_tx(_parsed.query.h) -// .request_transaction_entry(_parsed.query.h) -// .ledger_select(_parsed.query.l) - .on('success', function (m) { - // console.log("transaction: %s", JSON.stringify(m, undefined, 2)); - - httpd_response(res, - { - statusCode: 200, - url: _url, - body: "
"
-                            + JSON.stringify(rewrite_object(m, self.base), undefined, 2)
-                            +"
" - }); - }) - .on('error', function (m) { - httpd_response(res, - { - statusCode: 200, - url: _url, - body: "
"
-                            + 'ERROR: ' + JSON.stringify(m, undefined, 2)
-                            +"
" - }); - }) - .request(); - - } else { - var test = build_uri({ - type: 'account', - ledger: 'closed', - account: 'rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh', - }, self.base); - - httpd_response(res, - { - statusCode: req.url === "/" ? 200 : 404, - url: _url, - }); - } - }); - }); - - server.listen(port, ip, undefined, - function () { - console.log("Listening at: http://%s:%s", ip, port); - }); -} - -// vim:sw=2:sts=2:ts=8:et diff --git a/bin/debug_local_sign.js b/bin/debug_local_sign.js deleted file mode 100644 index 24f9aab481..0000000000 --- a/bin/debug_local_sign.js +++ /dev/null @@ -1,64 +0,0 @@ -var ripple = require('ripple-lib'); - -var v = { - seed: "snoPBrXtMeMyMHUVTgbuqAfg1SUTb", - addr: "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh" -}; - -var remote = ripple.Remote.from_config({ - "trusted" : true, - "websocket_ip" : "127.0.0.1", - "websocket_port" : 5006, - "websocket_ssl" : false, - "local_signing" : true -}); - -var tx_json = { - "Account" : v.addr, - "Amount" : "10000000", - "Destination" : "rEu2ULPiEQm1BAL8pYzmXnNX1aFX9sCks", - "Fee" : "10", - "Flags" : 0, - "Sequence" : 3, - "TransactionType" : "Payment" - - //"SigningPubKey": '0396941B22791A448E5877A44CE98434DB217D6FB97D63F0DAD23BE49ED45173C9' -}; - -remote.on('connected', function () { - var req = remote.request_sign(v.seed, tx_json); - req.message.debug_signing = true; - req.on('success', function (result) { - console.log("SERVER RESULT"); - console.log(result); - - var sim = {}; - var tx = remote.transaction(); - tx.tx_json = tx_json; - tx._secret = v.seed; - tx.complete(); - var unsigned = tx.serialize().to_hex(); - tx.sign(); - - sim.tx_blob = tx.serialize().to_hex(); - sim.tx_json = tx.tx_json; - sim.tx_signing_hash = tx.signing_hash().to_hex(); - sim.tx_unsigned = unsigned; - - console.log("\nLOCAL RESULT"); - console.log(sim); - - remote.connect(false); - }); - req.on('error', function (err) { - if (err.error === "remoteError" && err.remote.error === "srcActNotFound") { - console.log("Please fund account "+v.addr+" to run this test."); - } else { - console.log('error', err); - } - remote.connect(false); - }); - req.request(); - -}); -remote.connect(); diff --git a/bin/email_hash.js b/bin/email_hash.js deleted file mode 100755 index ab4f97c47b..0000000000 --- a/bin/email_hash.js +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/node -// -// Returns a Gravatar style hash as per: http://en.gravatar.com/site/implement/hash/ -// - -if (3 != process.argv.length) { - process.stderr.write("Usage: " + process.argv[1] + " email_address\n\nReturns gravatar style hash.\n"); - process.exit(1); - -} else { - var md5 = require('crypto').createHash('md5'); - - md5.update(process.argv[2].trim().toLowerCase()); - - process.stdout.write(md5.digest('hex') + "\n"); -} - -// vim:sw=2:sts=2:ts=8:et diff --git a/bin/flash_policy.js b/bin/flash_policy.js deleted file mode 100755 index e1361d46dc..0000000000 --- a/bin/flash_policy.js +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/node -// -// This program allows IE 9 ripple-clients to make websocket connections to -// rippled using flash. As IE 9 does not have websocket support, this required -// if you wish to support IE 9 ripple-clients. -// -// http://www.lightsphere.com/dev/articles/flash_socket_policy.html -// -// For better security, be sure to set the Port below to the port of your -// [websocket_public_port]. -// - -var net = require("net"), - port = "*", - domains = ["*:"+port]; // Domain:Port - -net.createServer( - function(socket) { - socket.write("\n"); - socket.write("\n"); - socket.write("\n"); - domains.forEach( - function(domain) { - var parts = domain.split(':'); - socket.write("\t\n"); - } - ); - socket.write("\n"); - socket.end(); - } -).listen(843); diff --git a/bin/getRippledInfo b/bin/getRippledInfo deleted file mode 100755 index abfa449bac..0000000000 --- a/bin/getRippledInfo +++ /dev/null @@ -1,150 +0,0 @@ -#!/usr/bin/env bash - -# This script generates information about your rippled installation -# and system. It can be used to help debug issues that you may face -# in your installation. While this script endeavors to not display any -# sensitive information, it is recommended that you read the output -# before sharing with any third parties. - - -rippled_exe=/opt/ripple/bin/rippled -conf_file=/etc/opt/ripple/rippled.cfg - -while getopts ":e:c:" opt; do - case $opt in - e) - rippled_exe=${OPTARG} - ;; - c) - conf_file=${OPTARG} - ;; - \?) - echo "Invalid option: -$OPTARG" - exit -1 - esac -done - -tmp_loc=$(mktemp -d --tmpdir ripple_info.XXXXX) -chmod 751 ${tmp_loc} -awk_prog=${tmp_loc}/cfg.awk -summary_out=${tmp_loc}/rippled_info.md -printf "# rippled report info\n\n> generated at %s\n" "$(date -R)" > ${summary_out} - -function log_section { - printf "\n## %s\n" "$*" >> ${summary_out} - - while read -r l; do - echo " $l" >> ${summary_out} - done > ${awk_prog} - BEGIN {FS="[[:space:]]*=[[:space:]]*"; skip=0; db_path=""; print > OUT_FILE; split(exl,exa,"|")} - /^#/ {next} - save==2 && /^[[:space:]]*$/ {next} - /^\[.+\]$/ { - section=tolower(gensub(/^\[[[:space:]]*([a-zA-Z_]+)[[:space:]]*\]$/, "\\1", "g")) - skip = 0 - for (i in exa) { - if (section == exa[i]) - skip = 1 - } - if (section == "database_path") - save = 1 - } - skip==1 {next} - save==2 {save=0; db_path=$0} - save==1 {save=2} - $1 ~ /password/ {$0=$1"="} - {print >> OUT_FILE} - END {print db_path} -EOP - - db=$(\ - sed -r -e 's/\//g;s/^[[:space:]]*//;s/[[:space:]]*$//' ${conf_file} |\ - awk -v OUT_FILE=${cleaned_conf} -v exl="$(join_by '|' "${exclude[@]}")" -f ${awk_prog}) - rm ${awk_prog} - cat ${cleaned_conf} | log_section "cleaned config file" - rm ${cleaned_conf} - echo "${db}" | log_section "database path" - df ${db} | log_section "df: database" -fi - -# Send output from this script to a log file -## this captures any messages -## or errors from the script itself - -log_file=${tmp_loc}/get_info.log -exec 3>&1 1>>${log_file} 2>&1 - -## Send all stdout files to /tmp - -if [[ -x ${rippled_exe} ]] ; then - pgrep rippled && \ - ${rippled_exe} --conf ${conf_file} \ - -- server_info | log_section "server info" -fi - -cat /proc/meminfo | log_section "meminfo" -cat /proc/swaps | log_section "swap space" -ulimit -a | log_section "ulimit" - -if command -v lshw >/dev/null 2>&1 ; then - lshw 2>/dev/null | log_section "hardware info" -else - lscpu > ${tmp_loc}/hw_info.txt - hwinfo >> ${tmp_loc}/hw_info.txt - lspci >> ${tmp_loc}/hw_info.txt - lsblk >> ${tmp_loc}/hw_info.txt - cat ${tmp_loc}/hw_info.txt | log_section "hardware info" - rm ${tmp_loc}/hw_info.txt -fi - -if command -v iostat >/dev/null 2>&1 ; then - iostat -t -d -x 2 6 | log_section "iostat" -fi - -df -h | log_section "free disk space" -drives=($(df | awk '$1 ~ /^\/dev\// {print $1}' | xargs -n 1 basename)) -block_devs=($(ls /sys/block/)) -for d in "${drives[@]}"; do - for dev in "${block_devs[@]}"; do - #echo "D: [$d], DEV: [$dev]" - if [[ $d =~ $dev ]]; then - # this file (if exists) has 0 for SSD and 1 for HDD - if [[ "$(cat /sys/block/${dev}/queue/rotational 2>/dev/null)" == 0 ]] ; then - echo "${d} : SSD" >> ${tmp_loc}/is_ssd.txt - else - echo "${d} : NO SSD" >> ${tmp_loc}/is_ssd.txt - fi - fi - done -done - -if [[ -f ${tmp_loc}/is_ssd.txt ]] ; then - cat ${tmp_loc}/is_ssd.txt | log_section "SSD" - rm ${tmp_loc}/is_ssd.txt -fi - -cat ${log_file} | log_section "script log" - -cat << MSG | tee /dev/fd/3 -#################################################### - rippled info has been gathered. Please copy the - contents of ${summary_out} - to a github gist at https://gist.github.com/ - - PLEASE REVIEW THIS FILE FOR ANY SENSITIVE DATA - BEFORE POSTING! We have tried our best to omit - any sensitive information from this file, but you - should verify before posting. -#################################################### -MSG - diff --git a/bin/hexify.js b/bin/hexify.js deleted file mode 100755 index 1e2fb70009..0000000000 --- a/bin/hexify.js +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/node -// -// Returns hex of lowercasing a string. -// - -var stringToHex = function (s) { - return Array.prototype.map.call(s, function (c) { - var b = c.charCodeAt(0); - - return b < 16 ? "0" + b.toString(16) : b.toString(16); - }).join(""); -}; - -if (3 != process.argv.length) { - process.stderr.write("Usage: " + process.argv[1] + " string\n\nReturns hex of lowercasing string.\n"); - process.exit(1); - -} else { - - process.stdout.write(stringToHex(process.argv[2].toLowerCase()) + "\n"); -} - -// vim:sw=2:sts=2:ts=8:et diff --git a/bin/jsonrpc_request.js b/bin/jsonrpc_request.js deleted file mode 100755 index 0b9c08666d..0000000000 --- a/bin/jsonrpc_request.js +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/node -// -// This is a tool to issue JSON-RPC requests from the command line. -// -// This can be used to test a JSON-RPC server. -// -// Requires: npm simple-jsonrpc -// - -var jsonrpc = require('simple-jsonrpc'); - -var program = process.argv[1]; - -if (5 !== process.argv.length) { - console.log("Usage: %s ", program); -} -else { - var url = process.argv[2]; - var method = process.argv[3]; - var json_raw = process.argv[4]; - var json; - - try { - json = JSON.parse(json_raw); - } - catch (e) { - console.log("JSON parse error: %s", e.message); - throw e; - } - - var client = jsonrpc.client(url); - - client.call(method, json, - function (result) { - console.log(JSON.stringify(result, undefined, 2)); - }, - function (error) { - console.log(JSON.stringify(error, undefined, 2)); - }); -} - -// vim:sw=2:sts=2:ts=8:et diff --git a/bin/jsonrpc_server.js b/bin/jsonrpc_server.js deleted file mode 100755 index 4cd3ffb95c..0000000000 --- a/bin/jsonrpc_server.js +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/node -// -// This is a tool to listen for JSON-RPC requests at an IP and port. -// -// This will report the request to console and echo back the request as the response. -// - -var http = require("http"); - -var program = process.argv[1]; - -if (4 !== process.argv.length) { - console.log("Usage: %s ", program); -} -else { - var ip = process.argv[2]; - var port = process.argv[3]; - - var server = http.createServer(function (req, res) { - console.log("CONNECT"); - var input = ""; - - req.setEncoding(); - - req.on('data', function (buffer) { - // console.log("DATA: %s", buffer); - input = input + buffer; - }); - - req.on('end', function () { - // console.log("END"); - - var json_req; - - console.log("URL: %s", req.url); - console.log("HEADERS: %s", JSON.stringify(req.headers, undefined, 2)); - - try { - json_req = JSON.parse(input); - - console.log("REQ: %s", JSON.stringify(json_req, undefined, 2)); - } - catch (e) { - console.log("BAD JSON: %s", e.message); - - json_req = { error : e.message } - } - - res.statusCode = 200; - res.end(JSON.stringify({ - jsonrpc: "2.0", - result: { request : json_req }, - id: req.id - })); - }); - - req.on('close', function () { - console.log("CLOSE"); - }); - }); - - server.listen(port, ip, undefined, - function () { - console.log("Listening at: %s:%s", ip, port); - }); -} - -// vim:sw=2:sts=2:ts=8:et diff --git a/bin/physical.sh b/bin/physical.sh deleted file mode 100755 index c2c5aad68d..0000000000 --- a/bin/physical.sh +++ /dev/null @@ -1,218 +0,0 @@ -#!/bin/bash - -set -o errexit - -marker_base=985c80fbc6131f3a8cedd0da7e8af98dfceb13c7 -marker_commit=${1:-${marker_base}} - -if [ $(git merge-base ${marker_commit} ${marker_base}) != ${marker_base} ]; then - echo "first marker commit not an ancestor: ${marker_commit}" - exit 1 -fi - -if [ $(git merge-base ${marker_commit} HEAD) != $(git rev-parse --verify ${marker_commit}) ]; then - echo "given marker commit not an ancestor: ${marker_commit}" - exit 1 -fi - -if [ -e Builds/CMake ]; then - echo move CMake - git mv Builds/CMake cmake - git add --update . - git commit -m 'Move CMake directory' --author 'Pretty Printer ' -fi - -if [ -e src/ripple ]; then - - echo move protocol buffers - mkdir -p include/xrpl - if [ -e src/ripple/proto ]; then - git mv src/ripple/proto include/xrpl - fi - - extract_list() { - git show ${marker_commit}:Builds/CMake/RippledCore.cmake | \ - awk "/END ${1}/ { p = 0 } p && /src\/ripple/; /BEGIN ${1}/ { p = 1 }" | \ - sed -e 's#src/ripple/##' -e 's#[^a-z]\+$##' - } - - move_files() { - oldroot="$1"; shift - newroot="$1"; shift - detail="$1"; shift - files=("$@") - for file in ${files[@]}; do - if [ ! -e ${oldroot}/${file} ]; then - continue - fi - dir=$(dirname ${file}) - if [ $(basename ${dir}) == 'details' ]; then - dir=$(dirname ${dir}) - fi - if [ $(basename ${dir}) == 'impl' ]; then - dir="$(dirname ${dir})/${detail}" - fi - mkdir -p ${newroot}/${dir} - git mv ${oldroot}/${file} ${newroot}/${dir} - done - } - - echo move libxrpl headers - files=$(extract_list 'LIBXRPL HEADERS') - files+=( - basics/SlabAllocator.h - - beast/asio/io_latency_probe.h - beast/container/aged_container.h - beast/container/aged_container_utility.h - beast/container/aged_map.h - beast/container/aged_multimap.h - beast/container/aged_multiset.h - beast/container/aged_set.h - beast/container/aged_unordered_map.h - beast/container/aged_unordered_multimap.h - beast/container/aged_unordered_multiset.h - beast/container/aged_unordered_set.h - beast/container/detail/aged_associative_container.h - beast/container/detail/aged_container_iterator.h - beast/container/detail/aged_ordered_container.h - beast/container/detail/aged_unordered_container.h - beast/container/detail/empty_base_optimization.h - beast/core/LockFreeStack.h - beast/insight/Collector.h - beast/insight/Counter.h - beast/insight/CounterImpl.h - beast/insight/Event.h - beast/insight/EventImpl.h - beast/insight/Gauge.h - beast/insight/GaugeImpl.h - beast/insight/Group.h - beast/insight/Groups.h - beast/insight/Hook.h - beast/insight/HookImpl.h - beast/insight/Insight.h - beast/insight/Meter.h - beast/insight/MeterImpl.h - beast/insight/NullCollector.h - beast/insight/StatsDCollector.h - beast/test/fail_counter.h - beast/test/fail_stream.h - beast/test/pipe_stream.h - beast/test/sig_wait.h - beast/test/string_iostream.h - beast/test/string_istream.h - beast/test/string_ostream.h - beast/test/test_allocator.h - beast/test/yield_to.h - beast/utility/hash_pair.h - beast/utility/maybe_const.h - beast/utility/temp_dir.h - - # included by only json/impl/json_assert.h - json/json_errors.h - - protocol/PayChan.h - protocol/RippleLedgerHash.h - protocol/messages.h - protocol/st.h - ) - files+=( - basics/README.md - crypto/README.md - json/README.md - protocol/README.md - resource/README.md - ) - move_files src/ripple include/xrpl detail ${files[@]} - - echo move libxrpl sources - files=$(extract_list 'LIBXRPL SOURCES') - move_files src/ripple src/libxrpl "" ${files[@]} - - echo check leftovers - dirs=$(cd include/xrpl; ls -d */) - dirs=$(cd src/ripple; ls -d ${dirs} 2>/dev/null || true) - files="$(cd src/ripple; find ${dirs} -type f)" - if [ -n "${files}" ]; then - echo "leftover files:" - echo ${files} - exit - fi - - echo remove empty directories - empty_dirs="$(cd src/ripple; find ${dirs} -depth -type d)" - for dir in ${empty_dirs[@]}; do - if [ -e ${dir} ]; then - rmdir ${dir} - fi - done - - echo move xrpld sources - files=$( - extract_list 'XRPLD SOURCES' - cd src/ripple - find * -regex '.*\.\(h\|ipp\|md\|pu\|uml\|png\)' - ) - move_files src/ripple src/xrpld detail ${files[@]} - - files="$(cd src/ripple; find . -type f)" - if [ -n "${files}" ]; then - echo "leftover files:" - echo ${files} - exit - fi - -fi - -rm -rf src/ripple - -echo rename .hpp to .h -find include src -name '*.hpp' -exec bash -c 'f="{}"; git mv "${f}" "${f%hpp}h"' \; - -echo move PerfLog.h -if [ -e include/xrpl/basics/PerfLog.h ]; then - git mv include/xrpl/basics/PerfLog.h src/xrpld/perflog -fi - -# Make sure all protobuf includes have the correct prefix. -protobuf_replace='s:^#include\s*["<].*org/xrpl\([^">]\+\)[">]:#include :' -# Make sure first-party includes use angle brackets and .h extension. -ripple_replace='s:include\s*["<]ripple/\(.*\)\.h\(pp\)\?[">]:include :' -beast_replace='s:include\s*:#include :" \ - -e "s:^#include ' -find include src -type f \( -name '*.cpp' -o -name '*.h' -o -name '*.ipp' \) -exec clang-format-10 -i {} + -git add --update . -git commit -m 'Rewrite includes' --author 'Pretty Printer ' -./Builds/levelization/levelization.sh -git add --update . -git commit -m 'Recompute loops' --author 'Pretty Printer ' diff --git a/bin/rlint.js b/bin/rlint.js deleted file mode 100755 index ce12e9560a..0000000000 --- a/bin/rlint.js +++ /dev/null @@ -1,252 +0,0 @@ -#!/usr/bin/node - -var async = require('async'); -var Remote = require('ripple-lib').Remote; -var Transaction = require('ripple-lib').Transaction; -var UInt160 = require('ripple-lib').UInt160; -var Amount = require('ripple-lib').Amount; - -var book_key = function (book) { - return book.taker_pays.currency - + ":" + book.taker_pays.issuer - + ":" + book.taker_gets.currency - + ":" + book.taker_gets.issuer; -}; - -var book_key_cross = function (book) { - return book.taker_gets.currency - + ":" + book.taker_gets.issuer - + ":" + book.taker_pays.currency - + ":" + book.taker_pays.issuer; -}; - -var ledger_verify = function (ledger) { - var dir_nodes = ledger.accountState.filter(function (entry) { - return entry.LedgerEntryType === 'DirectoryNode' // Only directories - && entry.index === entry.RootIndex // Only root nodes - && 'TakerGetsCurrency' in entry; // Only offer directories - }); - - var books = {}; - - dir_nodes.forEach(function (node) { - var book = { - taker_gets: { - currency: UInt160.from_generic(node.TakerGetsCurrency).to_json(), - issuer: UInt160.from_generic(node.TakerGetsIssuer).to_json() - }, - taker_pays: { - currency: UInt160.from_generic(node.TakerPaysCurrency).to_json(), - issuer: UInt160.from_generic(node.TakerPaysIssuer).to_json() - }, - quality: Amount.from_quality(node.RootIndex), - index: node.RootIndex - }; - - books[book_key(book)] = book; - -// console.log(JSON.stringify(node, undefined, 2)); - }); - -// console.log(JSON.stringify(dir_entry, undefined, 2)); - console.log("#%s books: %s", ledger.ledger_index, Object.keys(books).length); - - Object.keys(books).forEach(function (key) { - var book = books[key]; - var key_cross = book_key_cross(book); - var book_cross = books[key_cross]; - - if (book && book_cross && !book_cross.done) - { - var book_cross_quality_inverted = Amount.from_json("1.0/1/1").divide(book_cross.quality); - - if (book_cross_quality_inverted.compareTo(book.quality) >= 0) - { - // Crossing books - console.log("crossing: #%s :: %s :: %s :: %s :: %s :: %s :: %s", ledger.ledger_index, key, book.quality.to_text(), book_cross.quality.to_text(), book_cross_quality_inverted.to_text(), - book.index, book_cross.index); - } - - book_cross.done = true; - } - }); - - var ripple_selfs = {}; - - var accounts = {}; - var counts = {}; - - ledger.accountState.forEach(function (entry) { - if (entry.LedgerEntryType === 'Offer') - { - counts[entry.Account] = (counts[entry.Account] || 0) + 1; - } - else if (entry.LedgerEntryType === 'RippleState') - { - if (entry.Flags & (0x10000 | 0x40000)) - { - counts[entry.LowLimit.issuer] = (counts[entry.LowLimit.issuer] || 0) + 1; - } - - if (entry.Flags & (0x20000 | 0x80000)) - { - counts[entry.HighLimit.issuer] = (counts[entry.HighLimit.issuer] || 0) + 1; - } - - if (entry.HighLimit.issuer === entry.LowLimit.issuer) - ripple_selfs[entry.Account] = entry; - } - else if (entry.LedgerEntryType == 'AccountRoot') - { - accounts[entry.Account] = entry; - } - }); - - var low = 0; // Accounts with too low a count. - var high = 0; - var missing_accounts = 0; // Objects with no referencing account. - var missing_objects = 0; // Accounts specifying an object but having none. - - Object.keys(counts).forEach(function (account) { - if (account in accounts) - { - if (counts[account] !== accounts[account].OwnerCount) - { - if (counts[account] < accounts[account].OwnerCount) - { - high += 1; - console.log("%s: high count %s/%s", account, counts[account], accounts[account].OwnerCount); - } - else - { - low += 1; - console.log("%s: low count %s/%s", account, counts[account], accounts[account].OwnerCount); - } - } - } - else - { - missing_accounts += 1; - - console.log("%s: missing : count %s", account, counts[account]); - } - }); - - Object.keys(accounts).forEach(function (account) { - if (!('OwnerCount' in accounts[account])) - { - console.log("%s: bad entry : %s", account, JSON.stringify(accounts[account], undefined, 2)); - } - else if (!(account in counts) && accounts[account].OwnerCount) - { - missing_objects += 1; - - console.log("%s: no objects : %s/%s", account, 0, accounts[account].OwnerCount); - } - }); - - if (low) - console.log("counts too low = %s", low); - - if (high) - console.log("counts too high = %s", high); - - if (missing_objects) - console.log("missing_objects = %s", missing_objects); - - if (missing_accounts) - console.log("missing_accounts = %s", missing_accounts); - - if (Object.keys(ripple_selfs).length) - console.log("RippleState selfs = %s", Object.keys(ripple_selfs).length); - -}; - -var ledger_request = function (remote, ledger_index, done) { - remote.request_ledger(undefined, { - accounts: true, - expand: true, - }) - .ledger_index(ledger_index) - .on('success', function (m) { - // console.log("ledger: ", ledger_index); - // console.log("ledger: ", JSON.stringify(m, undefined, 2)); - done(m.ledger); - }) - .on('error', function (m) { - console.log("error"); - done(); - }) - .request(); -}; - -var usage = function () { - console.log("rlint.js _websocket_ip_ _websocket_port_ "); -}; - -var finish = function (remote) { - remote.disconnect(); - - // XXX Because remote.disconnect() doesn't work: - process.exit(); -}; - -console.log("args: ", process.argv.length); -console.log("args: ", process.argv); - -if (process.argv.length < 4) { - usage(); -} -else { - var remote = Remote.from_config({ - websocket_ip: process.argv[2], - websocket_port: process.argv[3], - }) - .once('ledger_closed', function (m) { - console.log("ledger_closed: ", JSON.stringify(m, undefined, 2)); - - if (process.argv.length === 5) { - var ledger_index = process.argv[4]; - - ledger_request(remote, ledger_index, function (l) { - if (l) { - ledger_verify(l); - } - - finish(remote); - }); - - } else if (process.argv.length === 6) { - var ledger_start = Number(process.argv[4]); - var ledger_end = Number(process.argv[5]); - var ledger_cursor = ledger_end; - - async.whilst( - function () { - return ledger_start <= ledger_cursor && ledger_cursor <=ledger_end; - }, - function (callback) { - // console.log(ledger_cursor); - - ledger_request(remote, ledger_cursor, function (l) { - if (l) { - ledger_verify(l); - } - - --ledger_cursor; - - callback(); - }); - }, - function (error) { - finish(remote); - }); - - } else { - finish(remote); - } - }) - .connect(); -} - -// vim:sw=2:sts=2:ts=8:et diff --git a/bin/sh/install-vcpkg.sh b/bin/sh/install-vcpkg.sh deleted file mode 100755 index 8cf8f2d088..0000000000 --- a/bin/sh/install-vcpkg.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env bash -set -exu - -: ${TRAVIS_BUILD_DIR:=""} -: ${VCPKG_DIR:=".vcpkg"} -export VCPKG_ROOT=${VCPKG_DIR} -: ${VCPKG_DEFAULT_TRIPLET:="x64-windows-static"} - -export VCPKG_DEFAULT_TRIPLET - -EXE="vcpkg" -if [[ -z ${COMSPEC:-} ]]; then - EXE="${EXE}.exe" -fi - -if [[ -d "${VCPKG_DIR}" && -x "${VCPKG_DIR}/${EXE}" && -d "${VCPKG_DIR}/installed" ]] ; then - echo "Using cached vcpkg at ${VCPKG_DIR}" - ${VCPKG_DIR}/${EXE} list -else - if [[ -d "${VCPKG_DIR}" ]] ; then - rm -rf "${VCPKG_DIR}" - fi - git clone --branch 2021.04.30 https://github.com/Microsoft/vcpkg.git ${VCPKG_DIR} - pushd ${VCPKG_DIR} - BSARGS=() - if [[ "$(uname)" == "Darwin" ]] ; then - BSARGS+=(--allowAppleClang) - fi - if [[ -z ${COMSPEC:-} ]]; then - chmod +x ./bootstrap-vcpkg.sh - time ./bootstrap-vcpkg.sh "${BSARGS[@]}" - else - time ./bootstrap-vcpkg.bat - fi - popd -fi - -# TODO: bring boost in this way as well ? -# NOTE: can pin specific ports to a commit/version like this: -# git checkout ports/boost -if [ $# -eq 0 ]; then - echo "No extra packages specified..." - PKGS=() -else - PKGS=( "$@" ) -fi -for LIB in "${PKGS[@]}"; do - time ${VCPKG_DIR}/${EXE} --clean-after-build install ${LIB} -done - - diff --git a/bin/sh/setup-msvc.sh b/bin/sh/setup-msvc.sh deleted file mode 100755 index 8d61c9757f..0000000000 --- a/bin/sh/setup-msvc.sh +++ /dev/null @@ -1,40 +0,0 @@ - -# NOTE: must be sourced from a shell so it can export vars - -cat << BATCH > ./getenv.bat -CALL %* -ENV -BATCH - -while read line ; do - IFS='"' read x path arg <<<"${line}" - if [ -f "${path}" ] ; then - echo "FOUND: $path" - export VCINSTALLDIR=$(./getenv.bat "${path}" ${arg} | grep "^VCINSTALLDIR=" | sed -E "s/^VCINSTALLDIR=//g") - if [ "${VCINSTALLDIR}" != "" ] ; then - echo "USING ${VCINSTALLDIR}" - export LIB=$(./getenv.bat "${path}" ${arg} | grep "^LIB=" | sed -E "s/^LIB=//g") - export LIBPATH=$(./getenv.bat "${path}" ${arg} | grep "^LIBPATH=" | sed -E "s/^LIBPATH=//g") - export INCLUDE=$(./getenv.bat "${path}" ${arg} | grep "^INCLUDE=" | sed -E "s/^INCLUDE=//g") - ADDPATH=$(./getenv.bat "${path}" ${arg} | grep "^PATH=" | sed -E "s/^PATH=//g") - export PATH="${ADDPATH}:${PATH}" - break - fi - fi -done <= 7 - -import argparse -import asyncio -import configparser -import contextlib -import json -import logging -import os -from pathlib import Path -import platform -import subprocess -import time -import urllib.error -import urllib.request - -# Enable asynchronous subprocesses on Windows. The default changed in 3.8. -# https://docs.python.org/3.7/library/asyncio-platforms.html#subprocess-support-on-windows -if (platform.system() == 'Windows' and sys.version_info.major == 3 - and sys.version_info.minor < 8): - asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy()) - -DEFAULT_EXE = 'rippled' -DEFAULT_CONFIGURATION_FILE = 'rippled.cfg' -# Number of seconds to wait before forcefully terminating. -PATIENCE = 120 -# Number of contiguous seconds in a sync state to be considered synced. -DEFAULT_SYNC_DURATION = 60 -# Number of seconds between polls of state. -DEFAULT_POLL_INTERVAL = 5 -SYNC_STATES = ('full', 'validating', 'proposing') - - -def read_config(config_file): - # strict = False: Allow duplicate keys, e.g. [rpc_startup]. - # allow_no_value = True: Allow keys with no values. Generally, these - # instances use the "key" as the value, and the section name is the key, - # e.g. [debug_logfile]. - # delimiters = ('='): Allow ':' as a character in Windows paths. Some of - # our "keys" are actually values, and we don't want to split them on ':'. - config = configparser.ConfigParser( - strict=False, - allow_no_value=True, - delimiters=('='), - ) - config.read(config_file) - return config - - -def to_list(value, separator=','): - """Parse a list from a delimited string value.""" - return [s.strip() for s in value.split(separator) if s] - - -def find_log_file(config_file): - """Try to figure out what log file the user has chosen. Raises all kinds - of exceptions if there is any possibility of ambiguity.""" - config = read_config(config_file) - values = list(config['debug_logfile'].keys()) - if len(values) < 1: - raise ValueError( - f'no [debug_logfile] in configuration file: {config_file}') - if len(values) > 1: - raise ValueError( - f'too many [debug_logfile] in configuration file: {config_file}') - return values[0] - - -def find_http_port(config_file): - config = read_config(config_file) - names = list(config['server'].keys()) - for name in names: - server = config[name] - if 'http' in to_list(server.get('protocol', '')): - return int(server['port']) - raise ValueError(f'no server in [server] for "http" protocol') - - -@contextlib.asynccontextmanager -async def rippled(exe=DEFAULT_EXE, config_file=DEFAULT_CONFIGURATION_FILE): - """A context manager for a rippled process.""" - # Start the server. - process = await asyncio.create_subprocess_exec( - str(exe), - '--conf', - str(config_file), - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL, - ) - logging.info(f'rippled started with pid {process.pid}') - try: - yield process - finally: - # Ask it to stop. - logging.info(f'asking rippled (pid: {process.pid}) to stop') - start = time.time() - process.terminate() - - # Wait nicely. - try: - await asyncio.wait_for(process.wait(), PATIENCE) - except asyncio.TimeoutError: - # Ask the operating system to kill it. - logging.warning(f'killing rippled ({process.pid})') - try: - process.kill() - except ProcessLookupError: - pass - - code = await process.wait() - end = time.time() - logging.info( - f'rippled stopped after {end - start:.1f} seconds with code {code}' - ) - - -async def sync( - port, - *, - duration=DEFAULT_SYNC_DURATION, - interval=DEFAULT_POLL_INTERVAL, -): - """Poll rippled on an interval until it has been synced for a duration.""" - start = time.perf_counter() - while (time.perf_counter() - start) < duration: - await asyncio.sleep(interval) - - request = urllib.request.Request( - f'http://127.0.0.1:{port}', - data=json.dumps({ - 'method': 'server_state' - }).encode(), - headers={'Content-Type': 'application/json'}, - ) - with urllib.request.urlopen(request) as response: - try: - body = json.loads(response.read()) - except urllib.error.HTTPError as cause: - logging.warning(f'server_state returned not JSON: {cause}') - start = time.perf_counter() - continue - - try: - state = body['result']['state']['server_state'] - except KeyError as cause: - logging.warning(f'server_state response missing key: {cause.key}') - start = time.perf_counter() - continue - logging.info(f'server_state: {state}') - if state not in SYNC_STATES: - # Require a contiguous sync state. - start = time.perf_counter() - - -async def loop(test, - *, - exe=DEFAULT_EXE, - config_file=DEFAULT_CONFIGURATION_FILE): - """ - Start-test-stop rippled in an infinite loop. - - Moves log to a different file after each iteration. - """ - log_file = find_log_file(config_file) - id = 0 - while True: - logging.info(f'iteration: {id}') - async with rippled(exe, config_file) as process: - start = time.perf_counter() - exited = asyncio.create_task(process.wait()) - tested = asyncio.create_task(test()) - # Try to sync as long as the process is running. - done, pending = await asyncio.wait( - {exited, tested}, - return_when=asyncio.FIRST_COMPLETED, - ) - if done == {exited}: - code = exited.result() - logging.warning( - f'server halted for unknown reason with code {code}') - else: - assert done == {tested} - assert tested.exception() is None - end = time.perf_counter() - logging.info(f'synced after {end - start:.0f} seconds') - os.replace(log_file, f'debug.{id}.log') - id += 1 - - -logging.basicConfig( - format='%(asctime)s %(levelname)-8s %(message)s', - level=logging.INFO, - datefmt='%Y-%m-%d %H:%M:%S', -) - -parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter) -parser.add_argument( - 'rippled', - type=Path, - nargs='?', - default=DEFAULT_EXE, - help='Path to rippled.', -) -parser.add_argument( - '--conf', - type=Path, - default=DEFAULT_CONFIGURATION_FILE, - help='Path to configuration file.', -) -parser.add_argument( - '--duration', - type=int, - default=DEFAULT_SYNC_DURATION, - help='Number of contiguous seconds required in a synchronized state.', -) -parser.add_argument( - '--interval', - type=int, - default=DEFAULT_POLL_INTERVAL, - help='Number of seconds to wait between polls of state.', -) -args = parser.parse_args() - -port = find_http_port(args.conf) - - -def test(): - return sync(port, duration=args.duration, interval=args.interval) - - -try: - asyncio.run(loop(test, exe=args.rippled, config_file=args.conf)) -except KeyboardInterrupt: - # Squelch the message. This is a normal mode of exit. - pass diff --git a/bin/stop-test.js b/bin/stop-test.js deleted file mode 100644 index 45aa15e271..0000000000 --- a/bin/stop-test.js +++ /dev/null @@ -1,133 +0,0 @@ -/* -------------------------------- REQUIRES -------------------------------- */ - -var child = require("child_process"); -var assert = require("assert"); - -/* --------------------------------- CONFIG --------------------------------- */ - -if (process.argv[2] == null) { - [ - 'Usage: ', - '', - ' `node bin/stop-test.js i,j [rippled_path] [rippled_conf]`', - '', - ' Launch rippled and stop it after n seconds for all n in [i, j}', - ' For all even values of n launch rippled with `--fg`', - ' For values of n where n % 3 == 0 launch rippled with `--fg`\n', - 'Examples: ', - '', - ' $ node bin/stop-test.js 5,10', - (' $ node bin/stop-test.js 1,4 ' + - 'build/clang.debug/rippled $HOME/.confs/rippled.cfg') - ] - .forEach(function(l){console.log(l)}); - - process.exit(); -} else { - var testRange = process.argv[2].split(',').map(Number); - var rippledPath = process.argv[3] || 'build/rippled' - var rippledConf = process.argv[4] || 'rippled.cfg' -} - -var options = { - env: process.env, - stdio: 'ignore' // we could dump the child io when it fails abnormally -}; - -// default args -var conf_args = ['--conf='+rippledConf]; -var start_args = conf_args.concat([/*'--net'*/]) -var stop_args = conf_args.concat(['stop']); - -/* --------------------------------- HELPERS -------------------------------- */ - -function start(args) { - return child.spawn(rippledPath, args, options); -} -function stop(rippled) { child.execFile(rippledPath, stop_args, options)} -function secs_l8r(ms, f) {setTimeout(f, ms * 1000); } - -function show_results_and_exit(results) { - console.log(JSON.stringify(results, undefined, 2)); - process.exit(); -} - -var timeTakes = function (range) { - function sumRange(n) {return (n+1) * n /2} - var ret = sumRange(range[1]); - if (range[0] > 1) { - ret = ret - sumRange(range[0] - 1) - } - var stopping = (range[1] - range[0]) * 0.5; - return ret + stopping; -} - -/* ---------------------------------- TEST ---------------------------------- */ - -console.log("Test will take ~%s seconds", timeTakes(testRange)); - -(function oneTest(n /* seconds */, results) { - if (n >= testRange[1]) { - // show_results_and_exit(results); - console.log(JSON.stringify(results, undefined, 2)); - oneTest(testRange[0], []); - return; - } - - var args = start_args; - if (n % 2 == 0) {args = args.concat(['--fg'])} - if (n % 3 == 0) {args = args.concat(['--net'])} - - var result = {args: args, alive_for: n}; - results.push(result); - - console.log("\nLaunching `%s` with `%s` for %d seconds", - rippledPath, JSON.stringify(args), n); - - rippled = start(args); - console.log("Rippled pid: %d", rippled.pid); - - // defaults - var b4StopSent = false; - var stopSent = false; - var stop_took = null; - - rippled.once('exit', function(){ - if (!stopSent && !b4StopSent) { - console.warn('\nRippled exited itself b4 stop issued'); - process.exit(); - }; - - // The io handles close AFTER exit, may have implications for - // `stdio:'inherit'` option to `child.spawn`. - rippled.once('close', function() { - result.stop_took = (+new Date() - stop_took) / 1000; // seconds - console.log("Stopping after %d seconds took %s seconds", - n, result.stop_took); - oneTest(n+1, results); - }); - }); - - secs_l8r(n, function(){ - console.log("Stopping rippled after %d seconds", n); - - // possible race here ? - // seems highly unlikely, but I was having issues at one point - b4StopSent=true; - stop_took = (+new Date()); - // when does `exit` actually get sent? - stop(); - stopSent=true; - - // Sometimes we want to attach with a debugger. - if (process.env.ABORT_TESTS_ON_STALL != null) { - // We wait 30 seconds, and if it hasn't stopped, we abort the process - secs_l8r(30, function() { - if (result.stop_took == null) { - console.log("rippled has stalled"); - process.exit(); - }; - }); - } - }) -}(testRange[0], [])); \ No newline at end of file diff --git a/bin/update_binformat.js b/bin/update_binformat.js deleted file mode 100644 index 7987f72c82..0000000000 --- a/bin/update_binformat.js +++ /dev/null @@ -1,119 +0,0 @@ -/** - * bin/update_bintypes.js - * - * This unholy abomination of a script generates the JavaScript file - * src/js/bintypes.js from various parts of the C++ source code. - * - * This should *NOT* be part of any automatic build process unless the C++ - * source data are brought into a more easily parseable format. Until then, - * simply run this script manually and fix as needed. - */ - -// XXX: Process LedgerFormats.(h|cpp) as well. - -var filenameProto = __dirname + '/../src/cpp/ripple/SerializeProto.h', - filenameTxFormatsH = __dirname + '/../src/cpp/ripple/TransactionFormats.h', - filenameTxFormats = __dirname + '/../src/cpp/ripple/TransactionFormats.cpp'; - -var fs = require('fs'); - -var output = []; - -// Stage 1: Get the field types and codes from SerializeProto.h -var types = {}, - fields = {}; -String(fs.readFileSync(filenameProto)).split('\n').forEach(function (line) { - line = line.replace(/^\s+|\s+$/g, '').replace(/\s+/g, ''); - if (!line.length || line.slice(0, 2) === '//' || line.slice(-1) !== ')') return; - - var tmp = line.slice(0, -1).split('('), - type = tmp[0], - opts = tmp[1].split(','); - - if (type === 'TYPE') types[opts[1]] = [opts[0], +opts[2]]; - else if (type === 'FIELD') fields[opts[0]] = [types[opts[1]][0], +opts[2]]; -}); - -output.push('var ST = require("./serializedtypes");'); -output.push(''); -output.push('var REQUIRED = exports.REQUIRED = 0,'); -output.push(' OPTIONAL = exports.OPTIONAL = 1,'); -output.push(' DEFAULT = exports.DEFAULT = 2;'); -output.push(''); - -function pad(s, n) { while (s.length < n) s += ' '; return s; } -function padl(s, n) { while (s.length < n) s = ' '+s; return s; } - -Object.keys(types).forEach(function (type) { - output.push(pad('ST.'+types[type][0]+'.id', 25) + ' = '+types[type][1]+';'); -}); -output.push(''); - -// Stage 2: Get the transaction type IDs from TransactionFormats.h -var ttConsts = {}; -String(fs.readFileSync(filenameTxFormatsH)).split('\n').forEach(function (line) { - var regex = /tt([A-Z_]+)\s+=\s+([0-9-]+)/; - var match = line.match(regex); - if (match) ttConsts[match[1]] = +match[2]; -}); - -// Stage 3: Get the transaction formats from TransactionFormats.cpp -var base = [], - sections = [], - current = base; -String(fs.readFileSync(filenameTxFormats)).split('\n').forEach(function (line) { - line = line.replace(/^\s+|\s+$/g, '').replace(/\s+/g, ''); - - var d_regex = /DECLARE_TF\(([A-Za-z]+),tt([A-Z_]+)/; - var d_match = line.match(d_regex); - - var s_regex = /SOElement\(sf([a-z]+),SOE_(REQUIRED|OPTIONAL|DEFAULT)/i; - var s_match = line.match(s_regex); - - if (d_match) sections.push(current = [d_match[1], ttConsts[d_match[2]]]); - else if (s_match) current.push([s_match[1], s_match[2]]); -}); - -function removeFinalComma(arr) { - arr[arr.length-1] = arr[arr.length-1].slice(0, -1); -} - -output.push('var base = ['); -base.forEach(function (field) { - var spec = fields[field[0]]; - output.push(' [ '+ - pad("'"+field[0]+"'", 21)+', '+ - pad(field[1], 8)+', '+ - padl(""+spec[1], 2)+', '+ - 'ST.'+pad(spec[0], 3)+ - ' ],'); -}); -removeFinalComma(output); -output.push('];'); -output.push(''); - - -output.push('exports.tx = {'); -sections.forEach(function (section) { - var name = section.shift(), - ttid = section.shift(); - - output.push(' '+name+': ['+ttid+'].concat(base, ['); - section.forEach(function (field) { - var spec = fields[field[0]]; - output.push(' [ '+ - pad("'"+field[0]+"'", 21)+', '+ - pad(field[1], 8)+', '+ - padl(""+spec[1], 2)+', '+ - 'ST.'+pad(spec[0], 3)+ - ' ],'); - }); - removeFinalComma(output); - output.push(' ]),'); -}); -removeFinalComma(output); -output.push('};'); -output.push(''); - -console.log(output.join('\n')); - diff --git a/conanfile.py b/conanfile.py index 399c9d6e1f..ab4657277c 100644 --- a/conanfile.py +++ b/conanfile.py @@ -117,7 +117,6 @@ class Xrpl(ConanFile): exports_sources = ( 'CMakeLists.txt', - 'bin/getRippledInfo', 'cfg/*', 'cmake/*', 'external/*', From abf12db788ccb6b5cae439872bf1d09b98a6c24b Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Mon, 11 Aug 2025 14:02:03 +0100 Subject: [PATCH 113/244] chore: Set CONAN_REMOTE_URL also for forks (#5662) This change replaces the configuration variable with the hardcoded `https://conan.ripplex.io`, making it possible for PRs from forks to use our Conan remote containing workarounds. --- .github/workflows/libxrpl.yml | 2 +- .github/workflows/macos.yml | 2 +- .github/workflows/nix.yml | 2 +- .github/workflows/windows.yml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/libxrpl.yml b/.github/workflows/libxrpl.yml index 8223b7996f..40797ddee5 100644 --- a/.github/workflows/libxrpl.yml +++ b/.github/workflows/libxrpl.yml @@ -1,6 +1,6 @@ name: Check libXRPL compatibility with Clio env: - CONAN_REMOTE_URL: ${{ vars.CONAN_REMOTE_URL }} + CONAN_REMOTE_URL: https://conan.ripplex.io CONAN_LOGIN_USERNAME_XRPLF: ${{ secrets.CONAN_REMOTE_USERNAME }} CONAN_PASSWORD_XRPLF: ${{ secrets.CONAN_REMOTE_PASSWORD }} on: diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 6c6091cc2e..3cbe4c4197 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -18,7 +18,7 @@ concurrency: # This part of Conan configuration is specific to this workflow only; we do not want # to pollute conan/profiles directory with settings which might not work for others env: - CONAN_REMOTE_URL: ${{ vars.CONAN_REMOTE_URL }} + CONAN_REMOTE_URL: https://conan.ripplex.io CONAN_REMOTE_USERNAME: ${{ secrets.CONAN_REMOTE_USERNAME }} CONAN_REMOTE_PASSWORD: ${{ secrets.CONAN_REMOTE_PASSWORD }} # This part of the Conan configuration is specific to this workflow only; we diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index 5beb5d291d..5ee5f317f2 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -17,7 +17,7 @@ concurrency: cancel-in-progress: true env: - CONAN_REMOTE_URL: ${{ vars.CONAN_REMOTE_URL }} + CONAN_REMOTE_URL: https://conan.ripplex.io CONAN_REMOTE_USERNAME: ${{ secrets.CONAN_REMOTE_USERNAME }} CONAN_REMOTE_PASSWORD: ${{ secrets.CONAN_REMOTE_PASSWORD }} # This part of the Conan configuration is specific to this workflow only; we diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 9e2322b119..90446e8135 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -19,7 +19,7 @@ concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true env: - CONAN_REMOTE_URL: ${{ vars.CONAN_REMOTE_URL }} + CONAN_REMOTE_URL: https://conan.ripplex.io CONAN_REMOTE_USERNAME: ${{ secrets.CONAN_REMOTE_USERNAME }} CONAN_REMOTE_PASSWORD: ${{ secrets.CONAN_REMOTE_PASSWORD }} # This part of the Conan configuration is specific to this workflow only; we From 97f0747e103f13e26e45b731731059b32f7679ac Mon Sep 17 00:00:00 2001 From: Mayukha Vadari Date: Mon, 11 Aug 2025 12:15:42 -0400 Subject: [PATCH 114/244] chore: Run prettier on all files (#5657) --- .clang-format | 81 +- .codecov.yml | 2 +- .github/ISSUE_TEMPLATE/bug_report.md | 13 +- .github/ISSUE_TEMPLATE/feature_request.md | 8 +- .github/workflows/libxrpl.yml | 6 +- .github/workflows/macos.yml | 5 +- .github/workflows/missing-commits.yml | 84 +- .github/workflows/nix.yml | 96 +- .github/workflows/windows.yml | 11 +- .pre-commit-config.yaml | 8 +- BUILD.md | 179 +- Builds/levelization/README.md | 68 +- CONTRIBUTING.md | 292 +- LICENSE.md | 3 +- README.md | 22 +- SECURITY.md | 14 +- docs/0001-negative-unl/README.md | 358 +- docs/0010-ledger-replay/README.md | 2 + docs/CheatSheet.md | 4 +- docs/CodingStyle.md | 66 +- docs/HeapProfiling.md | 3 +- docs/README.md | 22 +- docs/build/conan.md | 14 +- docs/build/depend.md | 5 +- docs/build/environment.md | 4 +- docs/build/install.md | 42 +- docs/consensus.md | 232 +- external/README.md | 10 +- external/antithesis-sdk/README.md | 7 +- external/ed25519-donna/README.md | 97 +- external/ed25519-donna/fuzz/README.md | 99 +- external/secp256k1/CHANGELOG.md | 144 +- external/secp256k1/CMakePresets.json | 6 +- external/secp256k1/CONTRIBUTING.md | 74 +- external/secp256k1/README.md | 130 +- external/secp256k1/SECURITY.md | 10 +- external/secp256k1/doc/ellswift.md | 410 +- external/secp256k1/doc/musig.md | 3 +- external/secp256k1/doc/release-process.md | 40 +- .../secp256k1/doc/safegcd_implementation.md | 301 +- .../ecdsa_secp256k1_sha256_bitcoin_test.json | 8086 ++++++++--------- include/xrpl/basics/README.md | 43 +- include/xrpl/proto/org/xrpl/rpc/v1/README.md | 1 - include/xrpl/protocol/README.md | 6 +- include/xrpl/resource/README.md | 32 +- src/test/README.md | 5 +- src/test/csf/README.md | 27 +- src/tests/README.md | 1 + src/xrpld/app/consensus/README.md | 15 +- src/xrpld/app/ledger/README.md | 249 +- src/xrpld/app/misc/FeeEscalation.md | 279 +- src/xrpld/app/misc/README.md | 34 +- src/xrpld/app/rdb/README.md | 46 +- src/xrpld/consensus/README.md | 5 +- src/xrpld/nodestore/README.md | 154 +- src/xrpld/overlay/README.md | 146 +- src/xrpld/peerfinder/README.md | 76 +- src/xrpld/rpc/README.md | 17 +- src/xrpld/shamap/README.md | 173 +- tests/README.md | 1 + 60 files changed, 6244 insertions(+), 6127 deletions(-) diff --git a/.clang-format b/.clang-format index 7b0fda27c9..cfd991e64b 100644 --- a/.clang-format +++ b/.clang-format @@ -1,5 +1,5 @@ --- -Language: Cpp +Language: Cpp AccessModifierOffset: -4 AlignAfterOpenBracket: AlwaysBreak AlignConsecutiveAssignments: false @@ -19,52 +19,52 @@ AlwaysBreakTemplateDeclarations: true BinPackArguments: false BinPackParameters: false BraceWrapping: - AfterClass: true + AfterClass: true AfterControlStatement: true - AfterEnum: false - AfterFunction: true - AfterNamespace: false + AfterEnum: false + AfterFunction: true + AfterNamespace: false AfterObjCDeclaration: true - AfterStruct: true - AfterUnion: true - BeforeCatch: true - BeforeElse: true - IndentBraces: false + AfterStruct: true + AfterUnion: true + BeforeCatch: true + BeforeElse: true + IndentBraces: false BreakBeforeBinaryOperators: false BreakBeforeBraces: Custom BreakBeforeTernaryOperators: true BreakConstructorInitializersBeforeComma: true -ColumnLimit: 80 -CommentPragmas: '^ IWYU pragma:' +ColumnLimit: 80 +CommentPragmas: "^ IWYU pragma:" ConstructorInitializerAllOnOneLineOrOnePerLine: true ConstructorInitializerIndentWidth: 4 ContinuationIndentWidth: 4 Cpp11BracedListStyle: true DerivePointerAlignment: false -DisableFormat: false +DisableFormat: false ExperimentalAutoDetectBinPacking: false -ForEachMacros: [ Q_FOREACH, BOOST_FOREACH ] -IncludeBlocks: Regroup +ForEachMacros: [Q_FOREACH, BOOST_FOREACH] +IncludeBlocks: Regroup IncludeCategories: - - Regex: '^<(test)/' - Priority: 0 - - Regex: '^<(xrpld)/' - Priority: 1 - - Regex: '^<(xrpl)/' - Priority: 2 - - Regex: '^<(boost)/' - Priority: 3 - - Regex: '^.*/' - Priority: 4 - - Regex: '^.*\.h' - Priority: 5 - - Regex: '.*' - Priority: 6 -IncludeIsMainRegex: '$' + - Regex: "^<(test)/" + Priority: 0 + - Regex: "^<(xrpld)/" + Priority: 1 + - Regex: "^<(xrpl)/" + Priority: 2 + - Regex: "^<(boost)/" + Priority: 3 + - Regex: "^.*/" + Priority: 4 + - Regex: '^.*\.h' + Priority: 5 + - Regex: ".*" + Priority: 6 +IncludeIsMainRegex: "$" IndentCaseLabels: true IndentFunctionDeclarationAfterType: false IndentRequiresClause: true -IndentWidth: 4 +IndentWidth: 4 IndentWrappedFunctionNames: false KeepEmptyLinesAtTheStartOfBlocks: false MaxEmptyLinesToKeep: 1 @@ -78,20 +78,25 @@ PenaltyBreakString: 1000 PenaltyExcessCharacter: 1000000 PenaltyReturnTypeOnItsOwnLine: 200 PointerAlignment: Left -ReflowComments: true +ReflowComments: true RequiresClausePosition: OwnLine -SortIncludes: true +SortIncludes: true SpaceAfterCStyleCast: false SpaceBeforeAssignmentOperators: true SpaceBeforeParens: ControlStatements SpaceInEmptyParentheses: false SpacesBeforeTrailingComments: 2 -SpacesInAngles: false +SpacesInAngles: false SpacesInContainerLiterals: true SpacesInCStyleCastParentheses: false SpacesInParentheses: false SpacesInSquareBrackets: false -Standard: Cpp11 -TabWidth: 8 -UseTab: Never -QualifierAlignment: Right \ No newline at end of file +Standard: Cpp11 +TabWidth: 8 +UseTab: Never +QualifierAlignment: Right +--- +Language: JavaScript +--- +Language: Json +IndentWidth: 2 diff --git a/.codecov.yml b/.codecov.yml index b97039e8b6..d28d7c80df 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -27,7 +27,7 @@ github_checks: parsers: cobertura: partials_as_hits: true - handle_missing_conditions : true + handle_missing_conditions: true slack_app: false diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index e2df996005..cc921f5a55 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -2,30 +2,35 @@ name: Bug Report about: Create a report to help us improve rippled title: "[Title with short description] (Version: [rippled version])" -labels: '' -assignees: '' - +labels: "" +assignees: "" --- + ## Issue Description + ## Steps to Reproduce + ## Expected Result + ## Actual Result + ## Environment + ## Supporting Files + - diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 2e19746f52..967b3c1817 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -3,19 +3,23 @@ name: Feature Request about: Suggest a new feature for the rippled project title: "[Title with short description] (Version: [rippled version])" labels: Feature Request -assignees: '' - +assignees: "" --- + ## Summary + ## Motivation + ## Solution + ## Paths Not Taken + diff --git a/.github/workflows/libxrpl.yml b/.github/workflows/libxrpl.yml index 40797ddee5..5880c03d71 100644 --- a/.github/workflows/libxrpl.yml +++ b/.github/workflows/libxrpl.yml @@ -6,8 +6,8 @@ env: on: pull_request: paths: - - 'src/libxrpl/protocol/BuildInfo.cpp' - - '.github/workflows/libxrpl.yml' + - "src/libxrpl/protocol/BuildInfo.cpp" + - ".github/workflows/libxrpl.yml" types: [opened, reopened, synchronize, ready_for_review] concurrency: group: ${{ github.workflow }}-${{ github.ref }} @@ -29,7 +29,7 @@ jobs: with: ref: ${{ github.event.pull_request.head.sha || github.sha }} running-workflow-name: wait-for-check-regexp - check-regexp: '(dependencies|test).*linux.*' # Ignore windows and mac tests but make sure linux passes + check-regexp: "(dependencies|test).*linux.*" # Ignore windows and mac tests but make sure linux passes repo-token: ${{ secrets.GITHUB_TOKEN }} wait-interval: 10 - name: Checkout diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 3cbe4c4197..73e25c357f 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -11,7 +11,7 @@ on: - release - master # Branches that opt-in to running - - 'ci/**' + - "ci/**" concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true @@ -32,7 +32,6 @@ env: tools.compilation:verbosity=verbose jobs: - test: if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }} strategy: @@ -89,7 +88,7 @@ jobs: sysctl -n hw.logicalcpu clang --version - name: configure Conan - run : | + run: | echo "${CONAN_GLOBAL_CONF}" > $(conan config home)/global.conf conan config install conan/profiles/ -tf $(conan config home)/profiles/ conan profile show diff --git a/.github/workflows/missing-commits.yml b/.github/workflows/missing-commits.yml index 8715671f33..ed478a2327 100644 --- a/.github/workflows/missing-commits.yml +++ b/.github/workflows/missing-commits.yml @@ -12,49 +12,49 @@ jobs: up_to_date: runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - name: Check for missing commits - id: commits - env: - SUGGESTION: | + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Check for missing commits + id: commits + env: + SUGGESTION: | - If you are reading this, then the commits indicated above are - missing from "develop" and/or "release". Do a reverse-merge - as soon as possible. See CONTRIBUTING.md for instructions. - run: | - set -o pipefail - # Branches ordered by how "canonical" they are. Every commit in - # one branch should be in all the branches behind it - order=( master release develop ) - branches=() - for branch in "${order[@]}" - do - # Check that the branches exist so that this job will work on - # forked repos, which don't necessarily have master and - # release branches. - if git ls-remote --exit-code --heads origin \ - refs/heads/${branch} > /dev/null - then - branches+=( origin/${branch} ) - fi - done + If you are reading this, then the commits indicated above are + missing from "develop" and/or "release". Do a reverse-merge + as soon as possible. See CONTRIBUTING.md for instructions. + run: | + set -o pipefail + # Branches ordered by how "canonical" they are. Every commit in + # one branch should be in all the branches behind it + order=( master release develop ) + branches=() + for branch in "${order[@]}" + do + # Check that the branches exist so that this job will work on + # forked repos, which don't necessarily have master and + # release branches. + if git ls-remote --exit-code --heads origin \ + refs/heads/${branch} > /dev/null + then + branches+=( origin/${branch} ) + fi + done - prior=() - for branch in "${branches[@]}" - do - if [[ ${#prior[@]} -ne 0 ]] + prior=() + for branch in "${branches[@]}" + do + if [[ ${#prior[@]} -ne 0 ]] + then + echo "Checking ${prior[@]} for commits missing from ${branch}" + git log --oneline --no-merges "${prior[@]}" \ + ^$branch | tee -a "missing-commits.txt" + echo + fi + prior+=( "${branch}" ) + done + if [[ $( cat missing-commits.txt | wc -l ) -ne 0 ]] then - echo "Checking ${prior[@]} for commits missing from ${branch}" - git log --oneline --no-merges "${prior[@]}" \ - ^$branch | tee -a "missing-commits.txt" - echo + echo "${SUGGESTION}" + exit 1 fi - prior+=( "${branch}" ) - done - if [[ $( cat missing-commits.txt | wc -l ) -ne 0 ]] - then - echo "${SUGGESTION}" - exit 1 - fi diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index 5ee5f317f2..395bd72b8d 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -364,59 +364,59 @@ jobs: env: build_dir: .build steps: - - name: download cache - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 - with: - name: linux-clang-Debug + - name: download cache + uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 + with: + name: linux-clang-Debug - - name: extract cache - run: | - mkdir -p ${CONAN_HOME} - tar -xzf conan.tar.gz -C ${CONAN_HOME} + - name: extract cache + run: | + mkdir -p ${CONAN_HOME} + tar -xzf conan.tar.gz -C ${CONAN_HOME} - - name: check environment - run: | - echo ${PATH} | tr ':' '\n' - conan --version - cmake --version - env | sort - ls ${CONAN_HOME} + - name: check environment + run: | + echo ${PATH} | tr ':' '\n' + conan --version + cmake --version + env | sort + ls ${CONAN_HOME} - - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + - name: checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - - name: dependencies - uses: ./.github/actions/dependencies - with: - configuration: Debug + - name: dependencies + uses: ./.github/actions/dependencies + with: + configuration: Debug - - name: prepare environment - run: | - mkdir -p ${build_dir} - echo "SOURCE_DIR=$(pwd)" >> $GITHUB_ENV - echo "BUILD_DIR=$(pwd)/${build_dir}" >> $GITHUB_ENV + - name: prepare environment + run: | + mkdir -p ${build_dir} + echo "SOURCE_DIR=$(pwd)" >> $GITHUB_ENV + echo "BUILD_DIR=$(pwd)/${build_dir}" >> $GITHUB_ENV - - name: build with instrumentation - run: | - cd ${BUILD_DIR} - cmake -S ${SOURCE_DIR} -B ${BUILD_DIR} \ - -Dvoidstar=ON \ - -Dtests=ON \ - -Dxrpld=ON \ - -DCMAKE_BUILD_TYPE=Debug \ - -DSECP256K1_BUILD_BENCHMARK=OFF \ - -DSECP256K1_BUILD_TESTS=OFF \ - -DSECP256K1_BUILD_EXHAUSTIVE_TESTS=OFF \ - -DCMAKE_TOOLCHAIN_FILE=${BUILD_DIR}/build/generators/conan_toolchain.cmake - cmake --build . --parallel $(nproc) + - name: build with instrumentation + run: | + cd ${BUILD_DIR} + cmake -S ${SOURCE_DIR} -B ${BUILD_DIR} \ + -Dvoidstar=ON \ + -Dtests=ON \ + -Dxrpld=ON \ + -DCMAKE_BUILD_TYPE=Debug \ + -DSECP256K1_BUILD_BENCHMARK=OFF \ + -DSECP256K1_BUILD_TESTS=OFF \ + -DSECP256K1_BUILD_EXHAUSTIVE_TESTS=OFF \ + -DCMAKE_TOOLCHAIN_FILE=${BUILD_DIR}/build/generators/conan_toolchain.cmake + cmake --build . --parallel $(nproc) - - name: verify instrumentation enabled - run: | - cd ${BUILD_DIR} - ./rippled --version | grep libvoidstar + - name: verify instrumentation enabled + run: | + cd ${BUILD_DIR} + ./rippled --version | grep libvoidstar - - name: run unit tests - run: | - cd ${BUILD_DIR} - ./rippled -u --unittest-jobs $(( $(nproc)/4 )) - ctest -j $(nproc) --output-on-failure + - name: run unit tests + run: | + cd ${BUILD_DIR} + ./rippled -u --unittest-jobs $(( $(nproc)/4 )) + ctest -j $(nproc) --output-on-failure diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 90446e8135..b81ffc8d3a 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -12,7 +12,7 @@ on: - release - master # Branches that opt-in to running - - 'ci/**' + - "ci/**" # https://docs.github.com/en/actions/using-jobs/using-concurrency concurrency: @@ -33,7 +33,6 @@ env: tools.compilation:verbosity=verbose jobs: - test: if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }} strategy: @@ -69,8 +68,8 @@ jobs: - name: restore Python cache directory uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 with: - path: ${{ steps.pip-cache.outputs.dir }} - key: ${{ runner.os }}-${{ hashFiles('.github/workflows/windows.yml') }} + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-${{ hashFiles('.github/workflows/windows.yml') }} - name: install Conan run: pip install wheel conan - name: check environment @@ -93,10 +92,10 @@ jobs: - name: build uses: ./.github/actions/build with: - generator: '${{ matrix.version.generator }}' + generator: "${{ matrix.version.generator }}" configuration: ${{ matrix.configuration.type }} # Hard code for now. Move to the matrix if varied options are needed - cmake-args: '-Dassert=TRUE -Dwerr=TRUE -Dreporting=OFF -Dunity=ON' + cmake-args: "-Dassert=TRUE -Dwerr=TRUE -Dreporting=OFF -Dunity=ON" cmake-target: install - name: test shell: bash diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index abfbd887c7..7daecdb5ec 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ # .pre-commit-config.yaml repos: -- repo: https://github.com/pre-commit/mirrors-clang-format - rev: v18.1.8 - hooks: - - id: clang-format + - repo: https://github.com/pre-commit/mirrors-clang-format + rev: v18.1.8 + hooks: + - id: clang-format diff --git a/BUILD.md b/BUILD.md index e1567e297c..958bf19b8c 100644 --- a/BUILD.md +++ b/BUILD.md @@ -41,21 +41,23 @@ found here](./docs/build/environment.md). - [Conan 2.17](https://conan.io/downloads.html)[^1], or higher - [CMake 3.22](https://cmake.org/download/)[^2], or higher -[^1]: It is possible to build with Conan 1.60+, but the instructions are -significantly different, which is why we are not recommending it. +[^1]: + It is possible to build with Conan 1.60+, but the instructions are + significantly different, which is why we are not recommending it. -[^2]: CMake 4 is not yet supported by all dependencies required by this project. -If you are affected by this issue, follow [conan workaround for cmake -4](#workaround-for-cmake-4) +[^2]: + CMake 4 is not yet supported by all dependencies required by this project. + If you are affected by this issue, follow [conan workaround for cmake + 4](#workaround-for-cmake-4) `rippled` is written in the C++20 dialect and includes the `` header. The [minimum compiler versions][2] required are: -| Compiler | Version | -|-------------|-----| -| GCC | 12 | -| Clang | 16 | -| Apple Clang | 16 | +| Compiler | Version | +| ----------- | --------- | +| GCC | 12 | +| Clang | 16 | +| Apple Clang | 16 | | MSVC | 19.44[^3] | ### Linux @@ -92,6 +94,7 @@ unfamiliar with Conan, then please read [this crash course](./docs/build/conan.m [Getting Started][3] walkthrough. #### Default profile + We recommend that you import the provided `conan/profiles/default` profile: ```bash @@ -173,10 +176,34 @@ you need to amend the list of compiler versions in to the `version` array specific for your compiler. For example: ```yaml - apple-clang: - version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", - "9.1", "10.0", "11.0", "12.0", "13", "13.0", "13.1", "14", - "14.0", "15", "15.0", "16", "16.0", "17", "17.0"] +apple-clang: + version: + [ + "5.0", + "5.1", + "6.0", + "6.1", + "7.0", + "7.3", + "8.0", + "8.1", + "9.0", + "9.1", + "10.0", + "11.0", + "12.0", + "13", + "13.0", + "13.1", + "14", + "14.0", + "15", + "15.0", + "16", + "16.0", + "17", + "17.0", + ] ``` #### Multiple compilers @@ -189,11 +216,11 @@ For example, if you are running MacOS and have [homebrew LLVM@18](https://formulae.brew.sh/formula/llvm@18), and want to use it as a compiler in the new Conan profile: - ```bash - export CC=$(brew --prefix llvm@18)/bin/clang - export CXX=$(brew --prefix llvm@18)/bin/clang++ - conan profile detect - ``` +```bash +export CC=$(brew --prefix llvm@18)/bin/clang +export CXX=$(brew --prefix llvm@18)/bin/clang++ +conan profile detect +``` You should also explicitly set the path to the compiler in the profile file, which helps to avoid errors when `CC` and/or `CXX` are set and disagree with the @@ -343,61 +370,61 @@ tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS'] 2. Use conan to generate CMake files for every configuration you want to build: - ``` - conan install .. --output-folder . --build missing --settings build_type=Release - conan install .. --output-folder . --build missing --settings build_type=Debug - ``` + ``` + conan install .. --output-folder . --build missing --settings build_type=Release + conan install .. --output-folder . --build missing --settings build_type=Debug + ``` - To build Debug, in the next step, be sure to set `-DCMAKE_BUILD_TYPE=Debug` + To build Debug, in the next step, be sure to set `-DCMAKE_BUILD_TYPE=Debug` - For a single-configuration generator, e.g. `Unix Makefiles` or `Ninja`, - you only need to run this command once. - For a multi-configuration generator, e.g. `Visual Studio`, you may want to - run it more than once. + For a single-configuration generator, e.g. `Unix Makefiles` or `Ninja`, + you only need to run this command once. + For a multi-configuration generator, e.g. `Visual Studio`, you may want to + run it more than once. - Each of these commands should also have a different `build_type` setting. - A second command with the same `build_type` setting will overwrite the files - generated by the first. You can pass the build type on the command line with - `--settings build_type=$BUILD_TYPE` or in the profile itself, - under the section `[settings]` with the key `build_type`. + Each of these commands should also have a different `build_type` setting. + A second command with the same `build_type` setting will overwrite the files + generated by the first. You can pass the build type on the command line with + `--settings build_type=$BUILD_TYPE` or in the profile itself, + under the section `[settings]` with the key `build_type`. - If you are using a Microsoft Visual C++ compiler, - then you will need to ensure consistency between the `build_type` setting - and the `compiler.runtime` setting. + If you are using a Microsoft Visual C++ compiler, + then you will need to ensure consistency between the `build_type` setting + and the `compiler.runtime` setting. - When `build_type` is `Release`, `compiler.runtime` should be `MT`. + When `build_type` is `Release`, `compiler.runtime` should be `MT`. - When `build_type` is `Debug`, `compiler.runtime` should be `MTd`. + When `build_type` is `Debug`, `compiler.runtime` should be `MTd`. - ``` - conan install .. --output-folder . --build missing --settings build_type=Release --settings compiler.runtime=MT - conan install .. --output-folder . --build missing --settings build_type=Debug --settings compiler.runtime=MTd - ``` + ``` + conan install .. --output-folder . --build missing --settings build_type=Release --settings compiler.runtime=MT + conan install .. --output-folder . --build missing --settings build_type=Debug --settings compiler.runtime=MTd + ``` 3. Configure CMake and pass the toolchain file generated by Conan, located at `$OUTPUT_FOLDER/build/generators/conan_toolchain.cmake`. - Single-config generators: + Single-config generators: - Pass the CMake variable [`CMAKE_BUILD_TYPE`][build_type] - and make sure it matches the one of the `build_type` settings - you chose in the previous step. + Pass the CMake variable [`CMAKE_BUILD_TYPE`][build_type] + and make sure it matches the one of the `build_type` settings + you chose in the previous step. - For example, to build Debug, in the next command, replace "Release" with "Debug" + For example, to build Debug, in the next command, replace "Release" with "Debug" - ``` - cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release -Dxrpld=ON -Dtests=ON .. - ``` + ``` + cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release -Dxrpld=ON -Dtests=ON .. + ``` - Multi-config generators: + Multi-config generators: - ``` - cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -Dxrpld=ON -Dtests=ON .. - ``` + ``` + cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -Dxrpld=ON -Dtests=ON .. + ``` - **Note:** You can pass build options for `rippled` in this step. + **Note:** You can pass build options for `rippled` in this step. -5. Build `rippled`. +4. Build `rippled`. For a single-configuration generator, it will build whatever configuration you passed for `CMAKE_BUILD_TYPE`. For a multi-configuration generator, you @@ -416,7 +443,7 @@ tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS'] cmake --build . --config Debug ``` -6. Test rippled. +5. Test rippled. Single-config generators: @@ -438,7 +465,6 @@ tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS'] The location of `rippled` binary in your build directory depends on your CMake generator. Pass `--help` to see the rest of the command line options. - ## Coverage report The coverage report is intended for developers using compilers GCC @@ -478,7 +504,7 @@ variable in `cmake`. The specific command line used to run the `gcovr` tool will displayed if the `CODE_COVERAGE_VERBOSE` variable is set. By default, the code coverage tool runs parallel unit tests with `--unittest-jobs` - set to the number of available CPU cores. This may cause spurious test +set to the number of available CPU cores. This may cause spurious test errors on Apple. Developers can override the number of unit test jobs with the `coverage_test_parallelism` variable in `cmake`. @@ -497,26 +523,24 @@ stored inside the build directory, as either of: - file named `coverage.`_extension_, with a suitable extension for the report format, or - directory named `coverage`, with the `index.html` and other files inside, for the `html-details` or `html-nested` report formats. - ## Options -| Option | Default Value | Description | -| --- | ---| ---| -| `assert` | OFF | Enable assertions. | -| `coverage` | OFF | Prepare the coverage report. | -| `san` | N/A | Enable a sanitizer with Clang. Choices are `thread` and `address`. | -| `tests` | OFF | Build tests. | -| `unity` | OFF | Configure a unity build. | -| `xrpld` | OFF | Build the xrpld (`rippled`) application, and not just the libxrpl library. | -| `werr` | OFF | Treat compilation warnings as errors | -| `wextra` | OFF | Enable additional compilation warnings | +| Option | Default Value | Description | +| ---------- | ------------- | -------------------------------------------------------------------------- | +| `assert` | OFF | Enable assertions. | +| `coverage` | OFF | Prepare the coverage report. | +| `san` | N/A | Enable a sanitizer with Clang. Choices are `thread` and `address`. | +| `tests` | OFF | Build tests. | +| `unity` | OFF | Configure a unity build. | +| `xrpld` | OFF | Build the xrpld (`rippled`) application, and not just the libxrpl library. | +| `werr` | OFF | Treat compilation warnings as errors | +| `wextra` | OFF | Enable additional compilation warnings | [Unity builds][5] may be faster for the first build (at the cost of much more memory) since they concatenate sources into fewer translation units. Non-unity builds may be faster for incremental builds, and can be helpful for detecting `#include` omissions. - ## Troubleshooting ### Conan @@ -565,16 +589,15 @@ If you want to experiment with a new package, follow these steps: 1. Search for the package on [Conan Center](https://conan.io/center/). 2. Modify [`conanfile.py`](./conanfile.py): - - Add a version of the package to the `requires` property. - - Change any default options for the package by adding them to the - `default_options` property (with syntax `'$package:$option': $value`). + - Add a version of the package to the `requires` property. + - Change any default options for the package by adding them to the + `default_options` property (with syntax `'$package:$option': $value`). 3. Modify [`CMakeLists.txt`](./CMakeLists.txt): - - Add a call to `find_package($package REQUIRED)`. - - Link a library from the package to the target `ripple_libs` - (search for the existing call to `target_link_libraries(ripple_libs INTERFACE ...)`). + - Add a call to `find_package($package REQUIRED)`. + - Link a library from the package to the target `ripple_libs` + (search for the existing call to `target_link_libraries(ripple_libs INTERFACE ...)`). 4. Start coding! Don't forget to include whatever headers you need from the package. - [1]: https://github.com/conan-io/conan-center-index/issues/13168 [2]: https://en.cppreference.com/w/cpp/compiler_support/20 [3]: https://docs.conan.io/en/latest/getting_started.html diff --git a/Builds/levelization/README.md b/Builds/levelization/README.md index 4ff3a54236..93aa316b61 100644 --- a/Builds/levelization/README.md +++ b/Builds/levelization/README.md @@ -25,28 +25,28 @@ more dependencies listed later. **tl;dr:** The modules listed first are more independent than the modules listed later. -| Level / Tier | Module(s) | -|--------------|-----------------------------------------------| -| 01 | ripple/beast ripple/unity -| 02 | ripple/basics -| 03 | ripple/json ripple/crypto -| 04 | ripple/protocol -| 05 | ripple/core ripple/conditions ripple/consensus ripple/resource ripple/server -| 06 | ripple/peerfinder ripple/ledger ripple/nodestore ripple/net -| 07 | ripple/shamap ripple/overlay -| 08 | ripple/app -| 09 | ripple/rpc -| 10 | ripple/perflog -| 11 | test/jtx test/beast test/csf -| 12 | test/unit_test -| 13 | test/crypto test/conditions test/json test/resource test/shamap test/peerfinder test/basics test/overlay -| 14 | test -| 15 | test/net test/protocol test/ledger test/consensus test/core test/server test/nodestore -| 16 | test/rpc test/app +| Level / Tier | Module(s) | +| ------------ | -------------------------------------------------------------------------------------------------------- | +| 01 | ripple/beast ripple/unity | +| 02 | ripple/basics | +| 03 | ripple/json ripple/crypto | +| 04 | ripple/protocol | +| 05 | ripple/core ripple/conditions ripple/consensus ripple/resource ripple/server | +| 06 | ripple/peerfinder ripple/ledger ripple/nodestore ripple/net | +| 07 | ripple/shamap ripple/overlay | +| 08 | ripple/app | +| 09 | ripple/rpc | +| 10 | ripple/perflog | +| 11 | test/jtx test/beast test/csf | +| 12 | test/unit_test | +| 13 | test/crypto test/conditions test/json test/resource test/shamap test/peerfinder test/basics test/overlay | +| 14 | test | +| 15 | test/net test/protocol test/ledger test/consensus test/core test/server test/nodestore | +| 16 | test/rpc test/app | -(Note that `test` levelization is *much* less important and *much* less +(Note that `test` levelization is _much_ less important and _much_ less strictly enforced than `ripple` levelization, other than the requirement -that `test` code should *never* be included in `ripple` code.) +that `test` code should _never_ be included in `ripple` code.) ## Validation @@ -59,48 +59,48 @@ the rippled source. The only caveat is that it runs much slower under Windows than in Linux. It hasn't yet been tested under MacOS. It generates many files of [results](results): -* `rawincludes.txt`: The raw dump of the `#includes` -* `paths.txt`: A second dump grouping the source module +- `rawincludes.txt`: The raw dump of the `#includes` +- `paths.txt`: A second dump grouping the source module to the destination module, deduped, and with frequency counts. -* `includes/`: A directory where each file represents a module and +- `includes/`: A directory where each file represents a module and contains a list of modules and counts that the module _includes_. -* `includedby/`: Similar to `includes/`, but the other way around. Each +- `includedby/`: Similar to `includes/`, but the other way around. Each file represents a module and contains a list of modules and counts that _include_ the module. -* [`loops.txt`](results/loops.txt): A list of direct loops detected +- [`loops.txt`](results/loops.txt): A list of direct loops detected between modules as they actually exist, as opposed to how they are desired as described above. In a perfect repo, this file will be empty. This file is committed to the repo, and is used by the [levelization Github workflow](../../.github/workflows/levelization.yml) to validate that nothing changed. -* [`ordering.txt`](results/ordering.txt): A list showing relationships +- [`ordering.txt`](results/ordering.txt): A list showing relationships between modules where there are no loops as they actually exist, as opposed to how they are desired as described above. This file is committed to the repo, and is used by the [levelization Github workflow](../../.github/workflows/levelization.yml) to validate that nothing changed. -* [`levelization.yml`](../../.github/workflows/levelization.yml) +- [`levelization.yml`](../../.github/workflows/levelization.yml) Github Actions workflow to test that levelization loops haven't - changed. Unfortunately, if changes are detected, it can't tell if + changed. Unfortunately, if changes are detected, it can't tell if they are improvements or not, so if you have resolved any issues or done anything else to improve levelization, run `levelization.sh`, and commit the updated results. -The `loops.txt` and `ordering.txt` files relate the modules +The `loops.txt` and `ordering.txt` files relate the modules using comparison signs, which indicate the number of times each module is included in the other. -* `A > B` means that A should probably be at a higher level than B, +- `A > B` means that A should probably be at a higher level than B, because B is included in A significantly more than A is included in B. These results can be included in both `loops.txt` and `ordering.txt`. Because `ordering.txt`only includes relationships where B is not included in A at all, it will only include these types of results. -* `A ~= B` means that A and B are included in each other a different +- `A ~= B` means that A and B are included in each other a different number of times, but the values are so close that the script can't definitively say that one should be above the other. These results will only be included in `loops.txt`. -* `A == B` means that A and B include each other the same number of +- `A == B` means that A and B include each other the same number of times, so the script has no clue which should be higher. These results will only be included in `loops.txt`. @@ -110,5 +110,5 @@ get those details locally. 1. Run `levelization.sh` 2. Grep the modules in `paths.txt`. - * For example, if a cycle is found `A ~= B`, simply `grep -w - A Builds/levelization/results/paths.txt | grep -w B` + - For example, if a cycle is found `A ~= B`, simply `grep -w +A Builds/levelization/results/paths.txt | grep -w B` diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index cb3eb6f048..fb29de5b7e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -8,13 +8,12 @@ We assume you are familiar with the general practice of [making contributions on GitHub][contrib]. This file includes only special instructions specific to this project. - ## Before you start The following branches exist in the main project repository: - `develop`: The latest set of unreleased features, and the most common - starting point for contributions. + starting point for contributions. - `release`: The latest beta release or release candidate. - `master`: The latest stable release. - `gh-pages`: The documentation for this project, built by Doxygen. @@ -27,18 +26,18 @@ In general, external contributions should be developed in your personal [fork][forking]. Contributions from developers with write permissions should be done in [the main repository][rippled] in a branch with a permitted prefix. Permitted prefixes are: -* XLS-[a-zA-Z0-9]+/.+ - * e.g. XLS-0033d/mpt-clarify-STEitherAmount -* [GitHub username]/.+ - * e.g. JoelKatz/fix-rpc-webhook-queue -* [Organization name]/.+ - * e.g. ripple/antithesis -Regardless of where the branch is created, please open a *draft* pull +- XLS-[a-zA-Z0-9]+/.+ + - e.g. XLS-0033d/mpt-clarify-STEitherAmount +- [GitHub username]/.+ + - e.g. JoelKatz/fix-rpc-webhook-queue +- [Organization name]/.+ + - e.g. ripple/antithesis + +Regardless of where the branch is created, please open a _draft_ pull request as soon as possible after pushing the branch to Github, to increase visibility, and ease feedback during the development process. - ## Major contributions If your contribution is a major feature or breaking change, then you @@ -55,8 +54,8 @@ responsibility of the XLS author to update the draft to match the final implementation when its corresponding pull request is merged, unless the author delegates that responsibility to others. - ## Before making a pull request + (Or marking a draft pull request as ready.) Changes that alter transaction processing must be guarded by an @@ -73,11 +72,12 @@ automatic test run by `rippled --unittest`. Otherwise, it must be a manual test. If you create new source files, they must be organized as follows: -* If the files are in any of the `libxrpl` modules, the headers (`.h`) must go + +- If the files are in any of the `libxrpl` modules, the headers (`.h`) must go under `include/xrpl`, and source (`.cpp`) files must go under `src/libxrpl`. -* All other non-test files must go under `src/xrpld`. -* All test source files must go under `src/test`. +- All other non-test files must go under `src/xrpld`. +- All test source files must go under `src/test`. The source must be formatted according to the style guide below. @@ -87,16 +87,17 @@ Changes should be usually squashed down into a single commit. Some larger or more complicated change sets make more sense, and are easier to review if organized into multiple logical commits. Either way, all commits should fit the following criteria: -* Changes should be presented in a single commit or a logical + +- Changes should be presented in a single commit or a logical sequence of commits. Specifically, chronological commits that simply reflect the history of how the author implemented the change, "warts and all", are not useful to reviewers. -* Every commit should have a [good message](#good-commit-messages). +- Every commit should have a [good message](#good-commit-messages). to explain a specific aspects of the change. -* Every commit should be signed. -* Every commit should be well-formed (builds successfully, +- Every commit should be signed. +- Every commit should be well-formed (builds successfully, unit tests passing), as this helps to resolve merge conflicts, and makes it easier to use `git bisect` to find bugs. @@ -108,13 +109,14 @@ Refer to for general rules on writing a good commit message. tl;dr + > 1. Separate subject from body with a blank line. > 2. Limit the subject line to 50 characters. -> * [...]shoot for 50 characters, but consider 72 the hard limit. +> - [...]shoot for 50 characters, but consider 72 the hard limit. > 3. Capitalize the subject line. > 4. Do not end the subject line with a period. > 5. Use the imperative mood in the subject line. -> * A properly formed Git commit subject line should always be able +> - A properly formed Git commit subject line should always be able > to complete the following sentence: "If applied, this commit will > _your subject line here_". > 6. Wrap the body at 72 characters. @@ -122,16 +124,17 @@ tl;dr In addition to those guidelines, please add one of the following prefixes to the subject line if appropriate. -* `fix:` - The primary purpose is to fix an existing bug. -* `perf:` - The primary purpose is performance improvements. -* `refactor:` - The changes refactor code without affecting + +- `fix:` - The primary purpose is to fix an existing bug. +- `perf:` - The primary purpose is performance improvements. +- `refactor:` - The changes refactor code without affecting functionality. -* `test:` - The changes _only_ affect unit tests. -* `docs:` - The changes _only_ affect documentation. This can +- `test:` - The changes _only_ affect unit tests. +- `docs:` - The changes _only_ affect documentation. This can include code comments in addition to `.md` files like this one. -* `build:` - The changes _only_ affect the build process, +- `build:` - The changes _only_ affect the build process, including CMake and/or Conan settings. -* `chore:` - Other tasks that don't affect the binary, but don't fit +- `chore:` - Other tasks that don't affect the binary, but don't fit any of the other cases. e.g. formatting, git settings, updating Github Actions jobs. @@ -143,9 +146,10 @@ unit tests for Feature X (#1234)`. In general, pull requests use `develop` as the base branch. The exceptions are -* Fixes and improvements to a release candidate use `release` as the + +- Fixes and improvements to a release candidate use `release` as the base. -* Hotfixes use `master` as the base. +- Hotfixes use `master` as the base. If your changes are not quite ready, but you want to make it easily available for preliminary examination or review, you can create a "Draft" pull request. @@ -182,11 +186,11 @@ meets a few criteria: 2. All CI checks must be complete and passed. (One-off failures may be acceptable if they are related to a known issue.) 3. The PR must have a [good commit message](#good-commit-messages). - * If the PR started with a good commit message, and it doesn't + - If the PR started with a good commit message, and it doesn't need to be updated, the author can indicate that in a comment. - * Any contributor, preferably the author, can leave a comment + - Any contributor, preferably the author, can leave a comment suggesting a commit message. - * If the author squashes and rebases the code in preparation for + - If the author squashes and rebases the code in preparation for merge, they should also ensure the commit message(s) are updated as well. 4. The PR branch must be up to date with the base branch (usually @@ -208,7 +212,6 @@ This is a non-exhaustive list of recommended style guidelines. These are not always strictly enforced and serve as a way to keep the codebase coherent rather than a set of _thou shalt not_ commandments. - ## Formatting All code must conform to `clang-format` version 18, @@ -237,6 +240,7 @@ To download the patch file: 5. Commit and push. You can install a pre-commit hook to automatically run `clang-format` before every commit: + ``` pip3 install pre-commit pre-commit install @@ -267,49 +271,51 @@ locations, where the reporting of contract violations on the Antithesis platform is either not possible or not useful. For this reason: -* The locations where `assert` or `assert(false)` contracts should continue to be used: - * `constexpr` functions - * unit tests i.e. files under `src/test` - * unit tests-related modules (files under `beast/test` and `beast/unit_test`) -* Outside of the listed locations, do not use `assert`; use `XRPL_ASSERT` instead, + +- The locations where `assert` or `assert(false)` contracts should continue to be used: + - `constexpr` functions + - unit tests i.e. files under `src/test` + - unit tests-related modules (files under `beast/test` and `beast/unit_test`) +- Outside of the listed locations, do not use `assert`; use `XRPL_ASSERT` instead, giving it unique name, with the short description of the contract. -* Outside of the listed locations, do not use `assert(false)`; use +- Outside of the listed locations, do not use `assert(false)`; use `UNREACHABLE` instead, giving it unique name, with the description of the condition being violated -* The contract name should start with a full name (including scope) of the - function, optionally a named lambda, followed by a colon ` : ` and a brief +- The contract name should start with a full name (including scope) of the + function, optionally a named lambda, followed by a colon `:` and a brief (typically at most five words) description. `UNREACHABLE` contracts can use slightly longer descriptions. If there are multiple overloads of the function, use common sense to balance both brevity and unambiguity of the function name. NOTE: the purpose of name is to provide stable means of unique identification of every contract; for this reason try to avoid elements which can change in some obvious refactors or when reinforcing the condition. -* Contract description typically (except for `UNREACHABLE`) should describe the +- Contract description typically (except for `UNREACHABLE`) should describe the _expected_ condition, as in "I assert that _expected_ is true". -* Contract description for `UNREACHABLE` should describe the _unexpected_ +- Contract description for `UNREACHABLE` should describe the _unexpected_ situation which caused the line to have been reached. -* Example good name for an +- Example good name for an `UNREACHABLE` macro `"Json::operator==(Value, Value) : invalid type"`; example good name for an `XRPL_ASSERT` macro `"Json::Value::asCString : valid type"`. -* Example **bad** name +- Example **bad** name `"RFC1751::insert(char* s, int x, int start, int length) : length is greater than or equal zero"` (missing namespace, unnecessary full function signature, description too verbose). Good name: `"ripple::RFC1751::insert : minimum length"`. -* In **few** well-justified cases a non-standard name can be used, in which case a +- In **few** well-justified cases a non-standard name can be used, in which case a comment should be placed to explain the rationale (example in `contract.cpp`) -* Do **not** rename a contract without a good reason (e.g. the name no longer +- Do **not** rename a contract without a good reason (e.g. the name no longer reflects the location or the condition being checked) -* Do not use `std::unreachable` -* Do not put contracts where they can be violated by an external condition +- Do not use `std::unreachable` +- Do not put contracts where they can be violated by an external condition (e.g. timing, data payload before mandatory validation etc.) as this creates bogus bug reports (and causes crashes of Debug builds) ## Unit Tests + To execute all unit tests: -```rippled --unittest --unittest-jobs=``` +`rippled --unittest --unittest-jobs=` -(Note: Using multiple cores on a Mac M1 can cause spurious test failures. The +(Note: Using multiple cores on a Mac M1 can cause spurious test failures. The cause is still under investigation. If you observe this problem, try specifying fewer jobs.) To run a specific set of test suites: @@ -317,10 +323,11 @@ To run a specific set of test suites: ``` rippled --unittest TestSuiteName ``` + Note: In this example, all tests with prefix `TestSuiteName` will be run, so if -`TestSuiteName1` and `TestSuiteName2` both exist, then both tests will run. -Alternatively, if the unit test name finds an exact match, it will stop -doing partial matches, i.e. if a unit test with a title of `TestSuiteName` +`TestSuiteName1` and `TestSuiteName2` both exist, then both tests will run. +Alternatively, if the unit test name finds an exact match, it will stop +doing partial matches, i.e. if a unit test with a title of `TestSuiteName` exists, then no other unit test will be executed, apart from `TestSuiteName`. ## Avoid @@ -336,7 +343,6 @@ exists, then no other unit test will be executed, apart from `TestSuiteName`. explanatory comments. 8. Importing new libraries unless there is a very good reason to do so. - ## Seek to 9. Extend functionality of existing code rather than creating new code. @@ -351,14 +357,12 @@ exists, then no other unit test will be executed, apart from `TestSuiteName`. 14. Provide as many comments as you feel that a competent programmer would need to understand what your code does. - # Maintainers Maintainers are ecosystem participants with elevated access to the repository. They are able to push new code, make decisions on when a release should be made, etc. - ## Adding and removing New maintainers can be proposed by two existing maintainers, subject to a vote @@ -373,47 +377,45 @@ A minimum of 60% agreement and 50% participation are required. The XRP Ledger Foundation will have the ability, for cause, to remove an existing maintainer without a vote. - ## Current Maintainers Maintainers are users with maintain or admin access to the repo. -* [bthomee](https://github.com/bthomee) (Ripple) -* [intelliot](https://github.com/intelliot) (Ripple) -* [JoelKatz](https://github.com/JoelKatz) (Ripple) -* [nixer89](https://github.com/nixer89) (XRP Ledger Foundation) -* [RichardAH](https://github.com/RichardAH) (XRP Ledger Foundation) -* [Silkjaer](https://github.com/Silkjaer) (XRP Ledger Foundation) -* [WietseWind](https://github.com/WietseWind) (XRPL Labs + XRP Ledger Foundation) -* [ximinez](https://github.com/ximinez) (Ripple) - +- [bthomee](https://github.com/bthomee) (Ripple) +- [intelliot](https://github.com/intelliot) (Ripple) +- [JoelKatz](https://github.com/JoelKatz) (Ripple) +- [nixer89](https://github.com/nixer89) (XRP Ledger Foundation) +- [RichardAH](https://github.com/RichardAH) (XRP Ledger Foundation) +- [Silkjaer](https://github.com/Silkjaer) (XRP Ledger Foundation) +- [WietseWind](https://github.com/WietseWind) (XRPL Labs + XRP Ledger Foundation) +- [ximinez](https://github.com/ximinez) (Ripple) ## Current Code Reviewers Code Reviewers are developers who have the ability to review, approve, and in some cases merge source code changes. -* [HowardHinnant](https://github.com/HowardHinnant) (Ripple) -* [scottschurr](https://github.com/scottschurr) (Ripple) -* [seelabs](https://github.com/seelabs) (Ripple) -* [Ed Hennis](https://github.com/ximinez) (Ripple) -* [mvadari](https://github.com/mvadari) (Ripple) -* [thejohnfreeman](https://github.com/thejohnfreeman) (Ripple) -* [Bronek](https://github.com/Bronek) (Ripple) -* [manojsdoshi](https://github.com/manojsdoshi) (Ripple) -* [godexsoft](https://github.com/godexsoft) (Ripple) -* [mDuo13](https://github.com/mDuo13) (Ripple) -* [ckniffen](https://github.com/ckniffen) (Ripple) -* [arihantkothari](https://github.com/arihantkothari) (Ripple) -* [pwang200](https://github.com/pwang200) (Ripple) -* [sophiax851](https://github.com/sophiax851) (Ripple) -* [shawnxie999](https://github.com/shawnxie999) (Ripple) -* [gregtatcam](https://github.com/gregtatcam) (Ripple) -* [mtrippled](https://github.com/mtrippled) (Ripple) -* [ckeshava](https://github.com/ckeshava) (Ripple) -* [nbougalis](https://github.com/nbougalis) None -* [RichardAH](https://github.com/RichardAH) (XRPL Labs + XRP Ledger Foundation) -* [dangell7](https://github.com/dangell7) (XRPL Labs) +- [HowardHinnant](https://github.com/HowardHinnant) (Ripple) +- [scottschurr](https://github.com/scottschurr) (Ripple) +- [seelabs](https://github.com/seelabs) (Ripple) +- [Ed Hennis](https://github.com/ximinez) (Ripple) +- [mvadari](https://github.com/mvadari) (Ripple) +- [thejohnfreeman](https://github.com/thejohnfreeman) (Ripple) +- [Bronek](https://github.com/Bronek) (Ripple) +- [manojsdoshi](https://github.com/manojsdoshi) (Ripple) +- [godexsoft](https://github.com/godexsoft) (Ripple) +- [mDuo13](https://github.com/mDuo13) (Ripple) +- [ckniffen](https://github.com/ckniffen) (Ripple) +- [arihantkothari](https://github.com/arihantkothari) (Ripple) +- [pwang200](https://github.com/pwang200) (Ripple) +- [sophiax851](https://github.com/sophiax851) (Ripple) +- [shawnxie999](https://github.com/shawnxie999) (Ripple) +- [gregtatcam](https://github.com/gregtatcam) (Ripple) +- [mtrippled](https://github.com/mtrippled) (Ripple) +- [ckeshava](https://github.com/ckeshava) (Ripple) +- [nbougalis](https://github.com/nbougalis) None +- [RichardAH](https://github.com/RichardAH) (XRPL Labs + XRP Ledger Foundation) +- [dangell7](https://github.com/dangell7) (XRPL Labs) Developers not on this list are able and encouraged to submit feedback on pending code changes (open pull requests). @@ -423,6 +425,7 @@ on pending code changes (open pull requests). These instructions assume you have your git upstream remotes configured to avoid accidental pushes to the main repo, and a remote group specifying both of them. e.g. + ``` $ git remote -v | grep upstream upstream https://github.com/XRPLF/rippled.git (fetch) @@ -437,6 +440,7 @@ upstream upstream-push You can use the [setup-upstreams] script to set this up. It also assumes you have a default gpg signing key set up in git. e.g. + ``` $ git config user.signingkey 968479A1AFF927E37D1A566BB5690EEEBB952194 @@ -461,8 +465,8 @@ the suggested commit message, or modify it as needed. #### Slightly more complicated pull requests Some pull requests need to be pushed to `develop` as more than one -commit. A PR author may *request* to merge as separate commits. They -must *justify* why separate commits are needed, and *specify* how they +commit. A PR author may _request_ to merge as separate commits. They +must _justify_ why separate commits are needed, and _specify_ how they would like the commits to be merged. If you disagree with the author, discuss it with them directly. @@ -471,20 +475,22 @@ fast forward only merge (`--ff-only`) on the command line and push to `develop`. Some examples of when separate commits are worthwhile are: + 1. PRs where source files are reorganized in multiple steps. -2. PRs where the commits are mostly independent and *could* be separate +2. PRs where the commits are mostly independent and _could_ be separate PRs, but are pulled together into one PR under a commit theme or issue. 3. PRs that are complicated enough that `git bisect` would not be much help if it determined this PR introduced a problem. Either way, check that: -* The commits are based on the current tip of `develop`. -* The commits are clean: No merge commits (except when reverse + +- The commits are based on the current tip of `develop`. +- The commits are clean: No merge commits (except when reverse merging), no "[FOLD]" or "fixup!" messages. -* All commits are signed. If the commits are not signed by the author, use +- All commits are signed. If the commits are not signed by the author, use `git commit --amend -S` to sign them yourself. -* At least one (but preferably all) of the commits has the PR number +- At least one (but preferably all) of the commits has the PR number in the commit message. The "Create a merge commit" and "Rebase and merge" options should be @@ -502,13 +508,13 @@ Rippled uses a linear workflow model that can be summarized as: 1. In between releases, developers work against the `develop` branch. 2. Periodically, a maintainer will build and tag a beta version from `develop`, which is pushed to `release`. - * Betas are usually released every two to three weeks, though that + - Betas are usually released every two to three weeks, though that schedule can vary depending on progress, availability, and other factors. 3. When the changes in `develop` are considered stable and mature enough to be ready to release, a release candidate (RC) is built and tagged from `develop`, and merged to `release`. - * Further development for that release (primarily fixes) then + - Further development for that release (primarily fixes) then continues against `release`, while other development continues on `develop`. Effectively, `release` is forked from `develop`. Changes to `release` must be reverse merged to `develop`. @@ -543,6 +549,7 @@ Rippled uses a linear workflow model that can be summarized as: the version number, etc. The workflow may look something like: + ``` git fetch --multiple upstreams user1 user2 user3 [...] git checkout -B release-next --no-track upstream/develop @@ -581,8 +588,9 @@ This includes, betas, and the first release candidate (RC). 1. If you didn't create one [preparing the `develop` branch](#preparing-the-develop-branch), Ensure there is no old - `release-next` branch hanging around. Then make a `release-next` + `release-next` branch hanging around. Then make a `release-next` branch that only changes the version number. e.g. + ``` git fetch upstreams @@ -603,25 +611,30 @@ git push upstream-push git fetch upstreams git branch --set-upstream-to=upstream/release-next ``` - You can also use the [update-version] script. -2. Create a Pull Request for `release-next` with **`develop`** as - the base branch. - 1. Use the title "[TRIVIAL] Set version to X.X.X-bX". - 2. Instead of the default description template, use the following: + +You can also use the [update-version] script. 2. Create a Pull Request for `release-next` with **`develop`** as +the base branch. + +1. Use the title "[TRIVIAL] Set version to X.X.X-bX". +2. Instead of the default description template, use the following: + ``` ## High Level Overview of Change This PR only changes the version number. It will be merged as soon as Github CI actions successfully complete. ``` + 3. Wait for CI to successfully complete, and get someone to approve the PR. (It is safe to ignore known CI issues.) 4. Push the updated `develop` branch using your `release-next` branch. **Do not use the Github UI. It's important to preserve commit IDs.** + ``` git push upstream-push release-next:develop ``` + 5. In the unlikely event that the push fails because someone has merged something else in the meantime, rebase your branch onto the updated `develop` branch, push again, and go back to step 3. @@ -630,22 +643,25 @@ git push upstream-push release-next:develop 7. Once this is done, forward progress on `develop` can continue (other PRs may be merged). 8. Now create a Pull Request for `release-next` with **`release`** as - the base branch. Instead of the default template, reuse and update + the base branch. Instead of the default template, reuse and update the message from the previous release. Include the following verbiage somewhere in the description: + ``` The base branch is `release`. [All releases (including betas)](https://github.com/XRPLF/rippled/blob/develop/CONTRIBUTING.md#before-you-start) go in `release`. This PR branch will be pushed directly to `release` (not squashed or rebased, and not using the GitHub UI). ``` + 7. Sign-offs for the three platforms (Linux, Mac, Windows) usually occur offline, but at least one approval will be needed on the PR. - * If issues are discovered during testing, simply abandon the - release. It's easy to start a new release, it should be easy to + - If issues are discovered during testing, simply abandon the + release. It's easy to start a new release, it should be easy to abandon one. **DO NOT REUSE THE VERSION NUMBER.** e.g. If you abandon 2.4.0-b1, the next attempt will be 2.4.0-b2. 8. Once everything is ready to go, push to `release`. + ``` git fetch upstreams @@ -666,23 +682,28 @@ git log -1 --oneline # Other branches, including some from upstream-push, may also be # present. ``` + 9. Tag the release, too. + ``` git tag git push upstream-push ``` + 10. Delete the `release-next` branch on the repo. Use the Github UI or: + ``` git push --delete upstream-push release-next ``` + 11. Finally [create a new release on Github](https://github.com/XRPLF/rippled/releases). #### Release candidates after the first Once the first release candidate is [merged into -release](#making-the-release), then `release` and `develop` *are allowed -to diverge*. +release](#making-the-release), then `release` and `develop` _are allowed +to diverge_. If a bug or issue is discovered in a version that has a release candidate being tested, any fix and new version will need to be applied @@ -690,7 +711,7 @@ against `release`, then reverse-merged to `develop`. This helps keep git history as linear as possible. A `release-next` branch will be created from `release`, and any further -work for that release must be based on `release-next`. Specifically, +work for that release must be based on `release-next`. Specifically, PRs must use `release-next` as the base, and those PRs will be merged directly to `release-next` when approved. Changes should be restricted to bug fixes, but other changes may be necessary from time to time. @@ -713,17 +734,21 @@ Once the RC is merged and tagged, it needs to be reverse merged into 1. Create a branch, based on `upstream/develop`. The branch name is not important, but could include "mergeNNNrcN". E.g. For release A.B.C-rcD, use `mergeABCrcD`. + ``` git fetch upstreams git checkout --no-track -b mergeABCrcD upstream/develop ``` + 2. Merge `release` into your branch. + ``` # I like the "--edit --log --verbose" parameters, but they are # not required. git merge upstream/release ``` + 3. `BuildInfo.cpp` will have a conflict with the version number. Resolve it with the version from `develop` - the higher version. 4. Push your branch to your repo (or `upstream` if you have permission), @@ -731,22 +756,27 @@ git merge upstream/release simply indicate that this is a merge of the RC. The "Context" should summarize the changes from the RC. Include the following text prominently: + ``` This PR must be merged manually using a push. Do not use the Github UI. ``` + 5. Depending on the complexity of the changes, and/or merge conflicts, the PR may need a thorough review, or just a sign-off that the merge was done correctly. 6. If `develop` is updated before this PR is merged, do not merge `develop` back into your branch. Instead rebase preserving merges, or do the merge again. (See also the `rerere` git config setting.) + ``` git rebase --rebase-merges upstream/develop # OR git reset --hard upstream/develop git merge upstream/release ``` + 7. When the PR is ready, push it to `develop`. + ``` git fetch upstreams @@ -757,8 +787,8 @@ git push upstream-push mergeABCrcD:develop git fetch upstreams ``` -Development on `develop` can proceed as normal. +Development on `develop` can proceed as normal. #### Final releases @@ -773,7 +803,7 @@ internally as if they were RCs (at minimum, ensuring unit tests pass, and the app starts, syncs, and stops cleanly across all three platforms.) -*If in doubt, make an RC first.* +_If in doubt, make an RC first._ The process for building a final release is very similar to [the process for building a beta](#making-the-release), except the code will be @@ -785,20 +815,23 @@ moving from `release` to `master` instead of from `develop` to number. As above, or using the [update-version] script. 2. Create a Pull Request for `master-next` with **`master`** as - the base branch. Instead of the default template, reuse and update + the base branch. Instead of the default template, reuse and update the message from the previous final release. Include the following verbiage somewhere in the description: + ``` The base branch is `master`. This PR branch will be pushed directly to `release` and `master` (not squashed or rebased, and not using the GitHub UI). ``` + 7. Sign-offs for the three platforms (Linux, Mac, Windows) usually occur offline, but at least one approval will be needed on the PR. - * If issues are discovered during testing, close the PR, delete + - If issues are discovered during testing, close the PR, delete `master-next`, and move development back to `release`, [issuing more RCs as necessary](#release-candidates-after-the-first) 8. Once everything is ready to go, push to `release` and `master`. + ``` git fetch upstreams @@ -821,15 +854,20 @@ git log -1 --oneline # Other branches, including some from upstream-push, may also be # present. ``` + 9. Tag the release, too. + ``` git tag git push upstream-push ``` + 10. Delete the `master-next` branch on the repo. Use the Github UI or: + ``` git push --delete upstream-push master-next ``` + 11. [Create a new release on Github](https://github.com/XRPLF/rippled/releases). Be sure that "Set as the latest release" is checked. @@ -856,11 +894,13 @@ any branch. When it's ready to merge, jump to step 3 using your branch instead of `master-next`. 1. Create a `master-next` branch from `master`. + ``` git checkout --no-track -b master-next upstream/master git push upstream-push git fetch upstreams ``` + 2. Open any PRs for the pending hotfix using `master-next` as the base, so they can be merged directly in to it. Unlike `develop`, though, `master-next` can be thrown away and recreated if necessary. @@ -868,19 +908,22 @@ git fetch upstreams steps as above, or use the [update-version] script. 4. Create a Pull Request for `master-next` with **`master`** as - the base branch. Instead of the default template, reuse and update + the base branch. Instead of the default template, reuse and update the message from the previous final release. Include the following verbiage somewhere in the description: + ``` The base branch is `master`. This PR branch will be pushed directly to `master` (not squashed or rebased, and not using the GitHub UI). ``` + 7. Sign-offs for the three platforms (Linux, Mac, Windows) usually occur offline, but at least one approval will be needed on the PR. - * If issues are discovered during testing, update `master-next` as + - If issues are discovered during testing, update `master-next` as needed, but ensure that the changes are properly squashed, and the version setting commit remains last 8. Once everything is ready to go, push to `master` **only**. + ``` git fetch upstreams @@ -901,15 +944,20 @@ git log -1 --oneline # Other branches, including some from upstream-push, may also be # present. ``` + 9. Tag the release, too. + ``` git tag git push upstream-push ``` + 9. Delete the `master-next` branch on the repo. + ``` git push --delete upstream-push master-next ``` + 10. [Create a new release on Github](https://github.com/XRPLF/rippled/releases). Be sure that "Set as the latest release" is checked. @@ -921,17 +969,21 @@ Once the hotfix is released, it needs to be reverse merged into 1. Create a branch in your own repo, based on `upstream/develop`. The branch name is not important, but could include "mergeNNN". E.g. For release 2.2.3, use `merge223`. + ``` git fetch upstreams git checkout --no-track -b merge223 upstream/develop ``` + 2. Merge master into your branch. + ``` # I like the "--edit --log --verbose" parameters, but they are # not required. git merge upstream/master ``` + 3. `BuildInfo.cpp` will have a conflict with the version number. Resolve it with the version from `develop` - the higher version. 4. Push your branch to your repo, and open a normal PR against @@ -939,22 +991,27 @@ git merge upstream/master is a merge of the hotfix version. The "Context" should summarize the changes from the hotfix. Include the following text prominently: + ``` This PR must be merged manually using a --ff-only merge. Do not use the Github UI. ``` + 5. Depending on the complexity of the hotfix, and/or merge conflicts, the PR may need a thorough review, or just a sign-off that the merge was done correctly. 6. If `develop` is updated before this PR is merged, do not merge `develop` back into your branch. Instead rebase preserving merges, or do the merge again. (See also the `rerere` git config setting.) + ``` git rebase --rebase-merges upstream/develop # OR git reset --hard upstream/develop git merge upstream/master ``` + 7. When the PR is ready, push it to `develop`. + ``` git fetch upstreams @@ -963,6 +1020,7 @@ git log --show-signature "upstream/develop..HEAD" git push upstream-push HEAD:develop ``` + Development on `develop` can proceed as normal. It is recommended to create a beta (or RC) immediately to ensure that everything worked as expected. @@ -977,12 +1035,13 @@ a significant fraction of users, which would necessitate a hotfix / point release to that version as well as any later versions. This scenario would follow the same basic procedure as above, -except that *none* of `develop`, `release`, or `master` +except that _none_ of `develop`, `release`, or `master` would be touched during the release process. In this example, consider if version 2.1.1 needed to be patched. 1. Create two branches in the main (`upstream`) repo. + ``` git fetch upstreams @@ -996,6 +1055,7 @@ git push upstream-push git fetch upstreams ``` + 2. Work continues as above, except using `master-2.1.2`as the base branch for any merging, packaging, etc. 3. After the release is tagged and packages are built, you could diff --git a/LICENSE.md b/LICENSE.md index 9282ed78ba..8aca84866f 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,4 +1,4 @@ -ISC License +ISC License Copyright (c) 2011, Arthur Britto, David Schwartz, Jed McCaleb, Vinnie Falco, Bob Way, Eric Lombrozo, Nikolaos D. Bougalis, Howard Hinnant. Copyright (c) 2012-2020, the XRP Ledger developers. @@ -14,4 +14,3 @@ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - diff --git a/README.md b/README.md index 0315c37428..4fdb89dffa 100644 --- a/README.md +++ b/README.md @@ -5,17 +5,19 @@ The [XRP Ledger](https://xrpl.org/) is a decentralized cryptographic ledger powered by a network of peer-to-peer nodes. The XRP Ledger uses a novel Byzantine Fault Tolerant consensus algorithm to settle and record transactions in a secure distributed database without a central operator. ## XRP + [XRP](https://xrpl.org/xrp.html) is a public, counterparty-free asset native to the XRP Ledger, and is designed to bridge the many different currencies in use worldwide. XRP is traded on the open-market and is available for anyone to access. The XRP Ledger was created in 2012 with a finite supply of 100 billion units of XRP. ## rippled + The server software that powers the XRP Ledger is called `rippled` and is available in this repository under the permissive [ISC open-source license](LICENSE.md). The `rippled` server software is written primarily in C++ and runs on a variety of platforms. The `rippled` server software can run in several modes depending on its [configuration](https://xrpl.org/rippled-server-modes.html). If you are interested in running an **API Server** (including a **Full History Server**), take a look at [Clio](https://github.com/XRPLF/clio). (rippled Reporting Mode has been replaced by Clio.) ### Build from Source -* [Read the build instructions in `BUILD.md`](BUILD.md) -* If you encounter any issues, please [open an issue](https://github.com/XRPLF/rippled/issues) +- [Read the build instructions in `BUILD.md`](BUILD.md) +- If you encounter any issues, please [open an issue](https://github.com/XRPLF/rippled/issues) ## Key Features of the XRP Ledger @@ -35,7 +37,6 @@ If you are interested in running an **API Server** (including a **Full History S [Modern Features for Smart Contracts]: https://xrpl.org/xrp-ledger-overview.html#modern-features-for-smart-contracts [On-Ledger Decentralized Exchange]: https://xrpl.org/xrp-ledger-overview.html#on-ledger-decentralized-exchange - ## Source Code Here are some good places to start learning the source code: @@ -47,7 +48,7 @@ Here are some good places to start learning the source code: ### Repository Contents | Folder | Contents | -|:-----------|:-------------------------------------------------| +| :--------- | :----------------------------------------------- | | `./bin` | Scripts and data files for Ripple integrators. | | `./Builds` | Platform-specific guides for building `rippled`. | | `./docs` | Source documentation files and doxygen config. | @@ -57,15 +58,14 @@ Here are some good places to start learning the source code: Some of the directories under `src` are external repositories included using git-subtree. See those directories' README files for more details. - ## Additional Documentation -* [XRP Ledger Dev Portal](https://xrpl.org/) -* [Setup and Installation](https://xrpl.org/install-rippled.html) -* [Source Documentation (Doxygen)](https://xrplf.github.io/rippled/) +- [XRP Ledger Dev Portal](https://xrpl.org/) +- [Setup and Installation](https://xrpl.org/install-rippled.html) +- [Source Documentation (Doxygen)](https://xrplf.github.io/rippled/) ## See Also -* [Clio API Server for the XRP Ledger](https://github.com/XRPLF/clio) -* [Mailing List for Release Announcements](https://groups.google.com/g/ripple-server) -* [Learn more about the XRP Ledger (YouTube)](https://www.youtube.com/playlist?list=PLJQ55Tj1hIVZtJ_JdTvSum2qMTsedWkNi) +- [Clio API Server for the XRP Ledger](https://github.com/XRPLF/clio) +- [Mailing List for Release Announcements](https://groups.google.com/g/ripple-server) +- [Learn more about the XRP Ledger (YouTube)](https://www.youtube.com/playlist?list=PLJQ55Tj1hIVZtJ_JdTvSum2qMTsedWkNi) diff --git a/SECURITY.md b/SECURITY.md index eb7437d2f9..3fd85bad0a 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -2,7 +2,6 @@ For more details on operating an XRP Ledger server securely, please visit https://xrpl.org/manage-the-rippled-server.html. - # Security Policy ## Supported Versions @@ -77,13 +76,14 @@ The amount paid varies dramatically. Vulnerabilities that are harmless on their To report a qualifying bug, please send a detailed report to: -|Email Address|bugs@ripple.com | -|:-----------:|:----------------------------------------------------| -|Short Key ID | `0xC57929BE` | -|Long Key ID | `0xCD49A0AFC57929BE` | -|Fingerprint | `24E6 3B02 37E0 FA9C 5E96 8974 CD49 A0AF C579 29BE` | +| Email Address | bugs@ripple.com | +| :-----------: | :-------------------------------------------------- | +| Short Key ID | `0xC57929BE` | +| Long Key ID | `0xCD49A0AFC57929BE` | +| Fingerprint | `24E6 3B02 37E0 FA9C 5E96 8974 CD49 A0AF C579 29BE` | + +The full PGP key for this address, which is also available on several key servers (e.g. on [keyserver.ubuntu.com](https://keyserver.ubuntu.com)), is: -The full PGP key for this address, which is also available on several key servers (e.g. on [keyserver.ubuntu.com](https://keyserver.ubuntu.com)), is: ``` -----BEGIN PGP PUBLIC KEY BLOCK----- mQINBFUwGHYBEAC0wpGpBPkd8W1UdQjg9+cEFzeIEJRaoZoeuJD8mofwI5Ejnjdt diff --git a/docs/0001-negative-unl/README.md b/docs/0001-negative-unl/README.md index 606b30aab1..f28ff63c6f 100644 --- a/docs/0001-negative-unl/README.md +++ b/docs/0001-negative-unl/README.md @@ -30,7 +30,7 @@ the ledger (so the entire network has the same view). This will help the network see which validators are **currently** unreliable, and adjust their quorum calculation accordingly. -*Improving the liveness of the network is the main motivation for the negative UNL.* +_Improving the liveness of the network is the main motivation for the negative UNL._ ### Targeted Faults @@ -53,16 +53,17 @@ even if the number of remaining validators gets to 60%. Say we have a network with 10 validators on the UNL and everything is operating correctly. The quorum required for this network would be 8 (80% of 10). When validators fail, the quorum required would be as low as 6 (60% of 10), which is the absolute -***minimum quorum***. We need the absolute minimum quorum to be strictly greater +**_minimum quorum_**. We need the absolute minimum quorum to be strictly greater than 50% of the original UNL so that there cannot be two partitions of well-behaved nodes headed in different directions. We arbitrarily choose 60% as the minimum quorum to give a margin of safety. Consider these events in the absence of negative UNL: + 1. 1:00pm - validator1 fails, votes vs. quorum: 9 >= 8, we have quorum 1. 3:00pm - validator2 fails, votes vs. quorum: 8 >= 8, we have quorum 1. 5:00pm - validator3 fails, votes vs. quorum: 7 < 8, we don’t have quorum - * **network cannot validate new ledgers with 3 failed validators** + - **network cannot validate new ledgers with 3 failed validators** We're below 80% agreement, so new ledgers cannot be validated. This is how the XRP Ledger operates today, but if the negative UNL was enabled, the events would @@ -70,18 +71,20 @@ happen as follows. (Please note that the events below are from a simplified version of our protocol.) 1. 1:00pm - validator1 fails, votes vs. quorum: 9 >= 8, we have quorum -1. 1:40pm - network adds validator1 to negative UNL, quorum changes to ceil(9 * 0.8), or 8 +1. 1:40pm - network adds validator1 to negative UNL, quorum changes to ceil(9 \* 0.8), or 8 1. 3:00pm - validator2 fails, votes vs. quorum: 8 >= 8, we have quorum -1. 3:40pm - network adds validator2 to negative UNL, quorum changes to ceil(8 * 0.8), or 7 +1. 3:40pm - network adds validator2 to negative UNL, quorum changes to ceil(8 \* 0.8), or 7 1. 5:00pm - validator3 fails, votes vs. quorum: 7 >= 7, we have quorum -1. 5:40pm - network adds validator3 to negative UNL, quorum changes to ceil(7 * 0.8), or 6 +1. 5:40pm - network adds validator3 to negative UNL, quorum changes to ceil(7 \* 0.8), or 6 1. 7:00pm - validator4 fails, votes vs. quorum: 6 >= 6, we have quorum - * **network can still validate new ledgers with 4 failed validators** + - **network can still validate new ledgers with 4 failed validators** ## External Interactions ### Message Format Changes + This proposal will: + 1. add a new pseudo-transaction type 1. add the negative UNL to the ledger data structure. @@ -89,19 +92,20 @@ Any tools or systems that rely on the format of this data will have to be updated. ### Amendment + This feature **will** need an amendment to activate. ## Design This section discusses the following topics about the Negative UNL design: -* [Negative UNL protocol overview](#Negative-UNL-Protocol-Overview) -* [Validator reliability measurement](#Validator-Reliability-Measurement) -* [Format Changes](#Format-Changes) -* [Negative UNL maintenance](#Negative-UNL-Maintenance) -* [Quorum size calculation](#Quorum-Size-Calculation) -* [Filter validation messages](#Filter-Validation-Messages) -* [High level sequence diagram of code +- [Negative UNL protocol overview](#Negative-UNL-Protocol-Overview) +- [Validator reliability measurement](#Validator-Reliability-Measurement) +- [Format Changes](#Format-Changes) +- [Negative UNL maintenance](#Negative-UNL-Maintenance) +- [Quorum size calculation](#Quorum-Size-Calculation) +- [Filter validation messages](#Filter-Validation-Messages) +- [High level sequence diagram of code changes](#High-Level-Sequence-Diagram-of-Code-Changes) ### Negative UNL Protocol Overview @@ -114,9 +118,9 @@ with V in their UNL adjust the quorum and V’s validation message is not counte when verifying if a ledger is fully validated. V’s flow of messages and network interactions, however, will remain the same. -We define the ***effective UNL** = original UNL - negative UNL*, and the -***effective quorum*** as the quorum of the *effective UNL*. And we set -*effective quorum = Ceiling(80% * effective UNL)*. +We define the **\*effective UNL** = original UNL - negative UNL\*, and the +**_effective quorum_** as the quorum of the _effective UNL_. And we set +_effective quorum = Ceiling(80% _ effective UNL)\*. ### Validator Reliability Measurement @@ -126,16 +130,16 @@ measure about its validators, but we have chosen ledger validation messages. This is because every validator shall send one and only one signed validation message per ledger. This keeps the measurement simple and removes timing/clock-sync issues. A node will measure the percentage of agreeing -validation messages (*PAV*) received from each validator on the node's UNL. Note +validation messages (_PAV_) received from each validator on the node's UNL. Note that the node will only count the validation messages that agree with its own validations. We define the **PAV** as the **P**ercentage of **A**greed **V**alidation messages received for the last N ledgers, where N = 256 by default. -When the PAV drops below the ***low-water mark***, the validator is considered +When the PAV drops below the **_low-water mark_**, the validator is considered unreliable, and is a candidate to be disabled by being added to the negative -UNL. A validator must have a PAV higher than the ***high-water mark*** to be +UNL. A validator must have a PAV higher than the **_high-water mark_** to be re-enabled. The validator is re-enabled by removing it from the negative UNL. In the implementation, we plan to set the low-water mark as 50% and the high-water mark as 80%. @@ -143,22 +147,24 @@ mark as 80%. ### Format Changes The negative UNL component in a ledger contains three fields. -* ***NegativeUNL***: The current negative UNL, a list of unreliable validators. -* ***ToDisable***: The validator to be added to the negative UNL on the next + +- **_NegativeUNL_**: The current negative UNL, a list of unreliable validators. +- **_ToDisable_**: The validator to be added to the negative UNL on the next flag ledger. -* ***ToReEnable***: The validator to be removed from the negative UNL on the +- **_ToReEnable_**: The validator to be removed from the negative UNL on the next flag ledger. -All three fields are optional. When the *ToReEnable* field exists, the -*NegativeUNL* field cannot be empty. +All three fields are optional. When the _ToReEnable_ field exists, the +_NegativeUNL_ field cannot be empty. -A new pseudo-transaction, ***UNLModify***, is added. It has three fields -* ***Disabling***: A flag indicating whether the modification is to disable or +A new pseudo-transaction, **_UNLModify_**, is added. It has three fields + +- **_Disabling_**: A flag indicating whether the modification is to disable or to re-enable a validator. -* ***Seq***: The ledger sequence number. -* ***Validator***: The validator to be disabled or re-enabled. +- **_Seq_**: The ledger sequence number. +- **_Validator_**: The validator to be disabled or re-enabled. -There would be at most one *disable* `UNLModify` and one *re-enable* `UNLModify` +There would be at most one _disable_ `UNLModify` and one _re-enable_ `UNLModify` transaction per flag ledger. The full machinery is described further on. ### Negative UNL Maintenance @@ -167,19 +173,19 @@ The negative UNL can only be modified on the flag ledgers. If a validator's reliability status changes, it takes two flag ledgers to modify the negative UNL. Let's see an example of the algorithm: -* Ledger seq = 100: A validator V goes offline. -* Ledger seq = 256: This is a flag ledger, and V's reliability measurement *PAV* +- Ledger seq = 100: A validator V goes offline. +- Ledger seq = 256: This is a flag ledger, and V's reliability measurement _PAV_ is lower than the low-water mark. Other validators add `UNLModify` pseudo-transactions `{true, 256, V}` to the transaction set which goes through the consensus. Then the pseudo-transaction is applied to the negative UNL ledger component by setting `ToDisable = V`. -* Ledger seq = 257 ~ 511: The negative UNL ledger component is copied from the +- Ledger seq = 257 ~ 511: The negative UNL ledger component is copied from the parent ledger. -* Ledger seq=512: This is a flag ledger, and the negative UNL is updated +- Ledger seq=512: This is a flag ledger, and the negative UNL is updated `NegativeUNL = NegativeUNL + ToDisable`. The negative UNL may have up to `MaxNegativeListed = floor(original UNL * 25%)` -validators. The 25% is because of 75% * 80% = 60%, where 75% = 100% - 25%, 80% +validators. The 25% is because of 75% \* 80% = 60%, where 75% = 100% - 25%, 80% is the quorum of the effective UNL, and 60% is the absolute minimum quorum of the original UNL. Adding more than 25% validators to the negative UNL does not improve the liveness of the network, because adding more validators to the @@ -187,52 +193,43 @@ negative UNL cannot lower the effective quorum. The following is the detailed algorithm: -* **If** the ledger seq = x is a flag ledger +- **If** the ledger seq = x is a flag ledger + 1. Compute `NegativeUNL = NegativeUNL + ToDisable - ToReEnable` if they + exist in the parent ledger - 1. Compute `NegativeUNL = NegativeUNL + ToDisable - ToReEnable` if they - exist in the parent ledger + 1. Try to find a candidate to disable if `sizeof NegativeUNL < MaxNegativeListed` - 1. Try to find a candidate to disable if `sizeof NegativeUNL < MaxNegativeListed` + 1. Find a validator V that has a _PAV_ lower than the low-water + mark, but is not in `NegativeUNL`. - 1. Find a validator V that has a *PAV* lower than the low-water - mark, but is not in `NegativeUNL`. + 1. If two or more are found, their public keys are XORed with the hash + of the parent ledger and the one with the lowest XOR result is chosen. + 1. If V is found, create a `UNLModify` pseudo-transaction + `TxDisableValidator = {true, x, V}` + 1. Try to find a candidate to re-enable if `sizeof NegativeUNL > 0`: + 1. Find a validator U that is in `NegativeUNL` and has a _PAV_ higher + than the high-water mark. + 1. If U is not found, try to find one in `NegativeUNL` but not in the + local _UNL_. + 1. If two or more are found, their public keys are XORed with the hash + of the parent ledger and the one with the lowest XOR result is chosen. + 1. If U is found, create a `UNLModify` pseudo-transaction + `TxReEnableValidator = {false, x, U}` - 1. If two or more are found, their public keys are XORed with the hash - of the parent ledger and the one with the lowest XOR result is chosen. - - 1. If V is found, create a `UNLModify` pseudo-transaction - `TxDisableValidator = {true, x, V}` - - 1. Try to find a candidate to re-enable if `sizeof NegativeUNL > 0`: - - 1. Find a validator U that is in `NegativeUNL` and has a *PAV* higher - than the high-water mark. - - 1. If U is not found, try to find one in `NegativeUNL` but not in the - local *UNL*. - - 1. If two or more are found, their public keys are XORed with the hash - of the parent ledger and the one with the lowest XOR result is chosen. - - 1. If U is found, create a `UNLModify` pseudo-transaction - `TxReEnableValidator = {false, x, U}` - - 1. If any `UNLModify` pseudo-transactions are created, add them to the - transaction set. The transaction set goes through the consensus algorithm. - - 1. If have enough support, the `UNLModify` pseudo-transactions remain in the - transaction set agreed by the validators. Then the pseudo-transactions are - applied to the ledger: - - 1. If have `TxDisableValidator`, set `ToDisable=TxDisableValidator.V`. - Else clear `ToDisable`. - - 1. If have `TxReEnableValidator`, set - `ToReEnable=TxReEnableValidator.U`. Else clear `ToReEnable`. - -* **Else** (not a flag ledger) + 1. If any `UNLModify` pseudo-transactions are created, add them to the + transaction set. The transaction set goes through the consensus algorithm. + 1. If have enough support, the `UNLModify` pseudo-transactions remain in the + transaction set agreed by the validators. Then the pseudo-transactions are + applied to the ledger: - 1. Copy the negative UNL ledger component from the parent ledger + 1. If have `TxDisableValidator`, set `ToDisable=TxDisableValidator.V`. + Else clear `ToDisable`. + + 1. If have `TxReEnableValidator`, set + `ToReEnable=TxReEnableValidator.U`. Else clear `ToReEnable`. + +- **Else** (not a flag ledger) + 1. Copy the negative UNL ledger component from the parent ledger The negative UNL is stored on each ledger because we don't know when a validator may reconnect to the network. If the negative UNL was stored only on every flag @@ -273,31 +270,26 @@ not counted when checking if the ledger is fully validated. The diagram below is the sequence of one round of consensus. Classes and components with non-trivial changes are colored green. -* The `ValidatorList` class is modified to compute the quorum of the effective +- The `ValidatorList` class is modified to compute the quorum of the effective UNL. -* The `Validations` class provides an interface for querying the validation +- The `Validations` class provides an interface for querying the validation messages from trusted validators. -* The `ConsensusAdaptor` component: - - * The `RCLConsensus::Adaptor` class is modified for creating `UNLModify` - Pseudo-Transactions. - - * The `Change` class is modified for applying `UNLModify` - Pseudo-Transactions. - - * The `Ledger` class is modified for creating and adjusting the negative UNL - ledger component. - - * The `LedgerMaster` class is modified for filtering out validation messages - from negative UNL validators when verifying if a ledger is fully - validated. +- The `ConsensusAdaptor` component: + - The `RCLConsensus::Adaptor` class is modified for creating `UNLModify` + Pseudo-Transactions. + - The `Change` class is modified for applying `UNLModify` + Pseudo-Transactions. + - The `Ledger` class is modified for creating and adjusting the negative UNL + ledger component. + - The `LedgerMaster` class is modified for filtering out validation messages + from negative UNL validators when verifying if a ledger is fully + validated. ![Sequence diagram](./negativeUNL_highLevel_sequence.png?raw=true "Negative UNL Changes") - ## Roads Not Taken ### Use a Mechanism Like Fee Voting to Process UNLModify Pseudo-Transactions @@ -311,7 +303,7 @@ and different quorums for the same ledger. As a result, the network's safety is impacted. This updated version does not impact safety though operates a bit more slowly. -The negative UNL modifications in the *UNLModify* pseudo-transaction approved by +The negative UNL modifications in the _UNLModify_ pseudo-transaction approved by the consensus will take effect at the next flag ledger. The extra time of the 256 ledgers should be enough for nodes to be in sync of the negative UNL modifications. @@ -334,29 +326,28 @@ expiration approach cannot be simply applied. ### Validator Reliability Measurement and Flag Ledger Frequency If the ledger time is about 4.5 seconds and the low-water mark is 50%, then in -the worst case, it takes 48 minutes *((0.5 * 256 + 256 + 256) * 4.5 / 60 = 48)* +the worst case, it takes 48 minutes _((0.5 _ 256 + 256 + 256) _ 4.5 / 60 = 48)_ to put an offline validator on the negative UNL. We considered lowering the flag ledger frequency so that the negative UNL can be more responsive. We also considered decoupling the reliability measurement and flag ledger frequency to be more flexible. In practice, however, their benefits are not clear. - ## New Attack Vectors A group of malicious validators may try to frame a reliable validator and put it on the negative UNL. But they cannot succeed. Because: 1. A reliable validator sends a signed validation message every ledger. A -sufficient peer-to-peer network will propagate the validation messages to other -validators. The validators will decide if another validator is reliable or not -only by its local observation of the validation messages received. So an honest -validator’s vote on another validator’s reliability is accurate. + sufficient peer-to-peer network will propagate the validation messages to other + validators. The validators will decide if another validator is reliable or not + only by its local observation of the validation messages received. So an honest + validator’s vote on another validator’s reliability is accurate. 1. Given the votes are accurate, and one vote per validator, an honest validator -will not create a UNLModify transaction of a reliable validator. + will not create a UNLModify transaction of a reliable validator. 1. A validator can be added to a negative UNL only through a UNLModify -transaction. + transaction. Assuming the group of malicious validators is less than the quorum, they cannot frame a reliable validator. @@ -365,32 +356,32 @@ frame a reliable validator. The bullet points below briefly summarize the current proposal: -* The motivation of the negative UNL is to improve the liveness of the network. +- The motivation of the negative UNL is to improve the liveness of the network. -* The targeted faults are the ones frequently observed in the production +- The targeted faults are the ones frequently observed in the production network. -* Validators propose negative UNL candidates based on their local measurements. +- Validators propose negative UNL candidates based on their local measurements. -* The absolute minimum quorum is 60% of the original UNL. +- The absolute minimum quorum is 60% of the original UNL. -* The format of the ledger is changed, and a new *UNLModify* pseudo-transaction +- The format of the ledger is changed, and a new _UNLModify_ pseudo-transaction is added. Any tools or systems that rely on the format of these data will have to be updated. -* The negative UNL can only be modified on the flag ledgers. +- The negative UNL can only be modified on the flag ledgers. -* At most one validator can be added to the negative UNL at a flag ledger. +- At most one validator can be added to the negative UNL at a flag ledger. -* At most one validator can be removed from the negative UNL at a flag ledger. +- At most one validator can be removed from the negative UNL at a flag ledger. -* If a validator's reliability status changes, it takes two flag ledgers to +- If a validator's reliability status changes, it takes two flag ledgers to modify the negative UNL. -* The quorum is the larger of 80% of the effective UNL and 60% of the original +- The quorum is the larger of 80% of the effective UNL and 60% of the original UNL. -* If a validator is on the negative UNL, its validation messages are ignored +- If a validator is on the negative UNL, its validation messages are ignored when the local node verifies if a ledger is fully validated. ## FAQ @@ -415,7 +406,7 @@ lower quorum size while keeping the network safe. validator removed from the negative UNL? A validator’s reliability is measured by other validators. If a validator -becomes unreliable, at a flag ledger, other validators propose *UNLModify* +becomes unreliable, at a flag ledger, other validators propose _UNLModify_ pseudo-transactions which vote the validator to add to the negative UNL during the consensus session. If agreed, the validator is added to the negative UNL at the next flag ledger. The mechanism of removing a validator from the negative @@ -423,32 +414,32 @@ UNL is the same. ### Question: Given a negative UNL, what happens if the UNL changes? -Answer: Let’s consider the cases: +Answer: Let’s consider the cases: -1. A validator is added to the UNL, and it is already in the negative UNL. This -case could happen when not all the nodes have the same UNL. Note that the -negative UNL on the ledger lists unreliable nodes that are not necessarily the -validators for everyone. +1. A validator is added to the UNL, and it is already in the negative UNL. This + case could happen when not all the nodes have the same UNL. Note that the + negative UNL on the ledger lists unreliable nodes that are not necessarily the + validators for everyone. - In this case, the liveness is affected negatively. Because the minimum - quorum could be larger but the usable validators are not increased. + In this case, the liveness is affected negatively. Because the minimum + quorum could be larger but the usable validators are not increased. -1. A validator is removed from the UNL, and it is in the negative UNL. +1. A validator is removed from the UNL, and it is in the negative UNL. In this case, the liveness is affected positively. Because the quorum could be smaller but the usable validators are not reduced. -1. A validator is added to the UNL, and it is not in the negative UNL. -1. A validator is removed from the UNL, and it is not in the negative UNL. - +1. A validator is added to the UNL, and it is not in the negative UNL. +1. A validator is removed from the UNL, and it is not in the negative UNL. + Case 3 and 4 are not affected by the negative UNL protocol. -### Question: Can we simply lower the quorum to 60% without the negative UNL? +### Question: Can we simply lower the quorum to 60% without the negative UNL? Answer: No, because the negative UNL approach is safer. -First let’s compare the two approaches intuitively, (1) the *negative UNL* -approach, and (2) *lower quorum*: simply lowering the quorum from 80% to 60% +First let’s compare the two approaches intuitively, (1) the _negative UNL_ +approach, and (2) _lower quorum_: simply lowering the quorum from 80% to 60% without the negative UNL. The negative UNL approach uses consensus to come up with a list of unreliable validators, which are then removed from the effective UNL temporarily. With this approach, the list of unreliable validators is agreed @@ -462,75 +453,75 @@ Next we compare the two approaches quantitatively with examples, and apply Theorem 8 of [Analysis of the XRP Ledger Consensus Protocol](https://arxiv.org/abs/1802.07242) paper: -*XRP LCP guarantees fork safety if **Oi,j > nj / 2 + +_XRP LCP guarantees fork safety if **Oi,j > nj / 2 + ni − qi + ti,j** for every pair of nodes -Pi, Pj,* +Pi, Pj,_ -where *Oi,j* is the overlapping requirement, nj and +where _Oi,j_ is the overlapping requirement, nj and ni are UNL sizes, qi is the quorum size of Pi, -*ti,j = min(ti, tj, Oi,j)*, and +_ti,j = min(ti, tj, Oi,j)_, and ti and tj are the number of faults can be tolerated by Pi and Pj. -We denote *UNLi* as *Pi's UNL*, and *|UNLi|* as -the size of *Pi's UNL*. +We denote _UNLi_ as _Pi's UNL_, and _|UNLi|_ as +the size of _Pi's UNL_. -Assuming *|UNLi| = |UNLj|*, let's consider the following +Assuming _|UNLi| = |UNLj|_, let's consider the following three cases: -1. With 80% quorum and 20% faults, *Oi,j > 100% / 2 + 100% - 80% + -20% = 90%*. I.e. fork safety requires > 90% UNL overlaps. This is one of the -results in the analysis paper. +1. With 80% quorum and 20% faults, _Oi,j > 100% / 2 + 100% - 80% + + 20% = 90%_. I.e. fork safety requires > 90% UNL overlaps. This is one of the + results in the analysis paper. -1. If the quorum is 60%, the relationship between the overlapping requirement -and the faults that can be tolerated is *Oi,j > 90% + -ti,j*. Under the same overlapping condition (i.e. 90%), to guarantee -the fork safety, the network cannot tolerate any faults. So under the same -overlapping condition, if the quorum is simply lowered, the network can tolerate -fewer faults. +1. If the quorum is 60%, the relationship between the overlapping requirement + and the faults that can be tolerated is _Oi,j > 90% + + ti,j_. Under the same overlapping condition (i.e. 90%), to guarantee + the fork safety, the network cannot tolerate any faults. So under the same + overlapping condition, if the quorum is simply lowered, the network can tolerate + fewer faults. -1. With the negative UNL approach, we want to argue that the inequation -*Oi,j > nj / 2 + ni − qi + -ti,j* is always true to guarantee fork safety, while the negative UNL -protocol runs, i.e. the effective quorum is lowered without weakening the -network's fault tolerance. To make the discussion easier, we rewrite the -inequation as *Oi,j > nj / 2 + (ni − -qi) + min(ti, tj)*, where Oi,j is -dropped from the definition of ti,j because *Oi,j > -min(ti, tj)* always holds under the parameters we will -use. Assuming a validator V is added to the negative UNL, now let's consider the -4 cases: +1. With the negative UNL approach, we want to argue that the inequation + _Oi,j > nj / 2 + ni − qi + + ti,j_ is always true to guarantee fork safety, while the negative UNL + protocol runs, i.e. the effective quorum is lowered without weakening the + network's fault tolerance. To make the discussion easier, we rewrite the + inequation as _Oi,j > nj / 2 + (ni − + qi) + min(ti, tj)_, where Oi,j is + dropped from the definition of ti,j because _Oi,j > + min(ti, tj)_ always holds under the parameters we will + use. Assuming a validator V is added to the negative UNL, now let's consider the + 4 cases: - 1. V is not on UNLi nor UNLj + 1. V is not on UNLi nor UNLj - The inequation holds because none of the variables change. + The inequation holds because none of the variables change. - 1. V is on UNLi but not on UNLj + 1. V is on UNLi but not on UNLj - The value of *(ni − qi)* is smaller. The value of - *min(ti, tj)* could be smaller too. Other - variables do not change. Overall, the left side of the inequation does - not change, but the right side is smaller. So the inequation holds. - - 1. V is not on UNLi but on UNLj + The value of *(ni − qi)* is smaller. The value of + *min(ti, tj)* could be smaller too. Other + variables do not change. Overall, the left side of the inequation does + not change, but the right side is smaller. So the inequation holds. - The value of *nj / 2* is smaller. The value of - *min(ti, tj)* could be smaller too. Other - variables do not change. Overall, the left side of the inequation does - not change, but the right side is smaller. So the inequation holds. - - 1. V is on both UNLi and UNLj + 1. V is not on UNLi but on UNLj - The value of *Oi,j* is reduced by 1. The values of - *nj / 2*, *(ni − qi)*, and - *min(ti, tj)* are reduced by 0.5, 0.2, and 1 - respectively. The right side is reduced by 1.7. Overall, the left side - of the inequation is reduced by 1, and the right side is reduced by 1.7. - So the inequation holds. + The value of *nj / 2* is smaller. The value of + *min(ti, tj)* could be smaller too. Other + variables do not change. Overall, the left side of the inequation does + not change, but the right side is smaller. So the inequation holds. - The inequation holds for all the cases. So with the negative UNL approach, - the network's fork safety is preserved, while the quorum is lowered that - increases the network's liveness. + 1. V is on both UNLi and UNLj + + The value of *Oi,j* is reduced by 1. The values of + *nj / 2*, *(ni − qi)*, and + *min(ti, tj)* are reduced by 0.5, 0.2, and 1 + respectively. The right side is reduced by 1.7. Overall, the left side + of the inequation is reduced by 1, and the right side is reduced by 1.7. + So the inequation holds. + + The inequation holds for all the cases. So with the negative UNL approach, + the network's fork safety is preserved, while the quorum is lowered that + increases the network's liveness.

Question: We have observed that occasionally a validator wanders off on its own chain. How is this case handled by the negative UNL algorithm?

@@ -565,11 +556,11 @@ will be used after that. We want to see the test cases still pass with real network delay. A test case specifies: 1. a UNL with different number of validators for different test cases, -1. a network with zero or more non-validator nodes, +1. a network with zero or more non-validator nodes, 1. a sequence of validator reliability change events (by killing/restarting nodes, or by running modified rippled that does not send all validation messages), -1. the correct outcomes. +1. the correct outcomes. For all the test cases, the correct outcomes are verified by examining logs. We will grep the log to see if the correct negative UNLs are generated, and whether @@ -579,6 +570,7 @@ timing parameters of rippled will be changed to have faster ledger time. Most if not all test cases do not need client transactions. For example, the test cases for the prototype: + 1. A 10-validator UNL. 1. The network does not have other nodes. 1. The validators will be started from the genesis. Once they start to produce @@ -587,11 +579,11 @@ For example, the test cases for the prototype: 1. A sequence of events (or the lack of events) such as a killed validator is added to the negative UNL. -#### Roads Not Taken: Test with Extended CSF +#### Roads Not Taken: Test with Extended CSF We considered testing with the current unit test framework, specifically the [Consensus Simulation Framework](https://github.com/ripple/rippled/blob/develop/src/test/csf/README.md) (CSF). However, the CSF currently can only test the generic consensus algorithm as in the paper: [Analysis of the XRP Ledger Consensus -Protocol](https://arxiv.org/abs/1802.07242). \ No newline at end of file +Protocol](https://arxiv.org/abs/1802.07242). diff --git a/docs/0010-ledger-replay/README.md b/docs/0010-ledger-replay/README.md index 170fd15c43..c82d9b1906 100644 --- a/docs/0010-ledger-replay/README.md +++ b/docs/0010-ledger-replay/README.md @@ -82,7 +82,9 @@ pattern and the way coroutines are implemented, where every yield saves the spot in the code where it left off and every resume jumps back to that spot. ### Sequence Diagram + ![Sequence diagram](./ledger_replay_sequence.png?raw=true "A successful ledger replay") ### Class Diagram + ![Class diagram](./ledger_replay_classes.png?raw=true "Ledger replay classes") diff --git a/docs/CheatSheet.md b/docs/CheatSheet.md index 3b70c7c8f7..60a99f587a 100644 --- a/docs/CheatSheet.md +++ b/docs/CheatSheet.md @@ -16,5 +16,5 @@ ## Function - Minimize external dependencies - * Pass options in the ctor instead of using theConfig - * Use as few other classes as possible + - Pass options in the ctor instead of using theConfig + - Use as few other classes as possible diff --git a/docs/CodingStyle.md b/docs/CodingStyle.md index 0ff50c780d..3c26709047 100644 --- a/docs/CodingStyle.md +++ b/docs/CodingStyle.md @@ -1,18 +1,18 @@ # Coding Standards -Coding standards used here gradually evolve and propagate through +Coding standards used here gradually evolve and propagate through code reviews. Some aspects are enforced more strictly than others. ## Rules -These rules only apply to our own code. We can't enforce any sort of +These rules only apply to our own code. We can't enforce any sort of style on the external repositories and libraries we include. The best guideline is to maintain the standards that are used in those libraries. -* Tab inserts 4 spaces. No tab characters. -* Braces are indented in the [Allman style][1]. -* Modern C++ principles. No naked ```new``` or ```delete```. -* Line lengths limited to 80 characters. Exceptions limited to data and tables. +- Tab inserts 4 spaces. No tab characters. +- Braces are indented in the [Allman style][1]. +- Modern C++ principles. No naked `new` or `delete`. +- Line lengths limited to 80 characters. Exceptions limited to data and tables. ## Guidelines @@ -21,17 +21,17 @@ why you're doing it. Think, use common sense, and consider that this your changes will probably need to be maintained long after you've moved on to other projects. -* Use white space and blank lines to guide the eye and keep your intent clear. -* Put private data members at the top of a class, and the 6 public special -members immediately after, in the following order: - * Destructor - * Default constructor - * Copy constructor - * Copy assignment - * Move constructor - * Move assignment -* Don't over-inline by defining large functions within the class -declaration, not even for template classes. +- Use white space and blank lines to guide the eye and keep your intent clear. +- Put private data members at the top of a class, and the 6 public special + members immediately after, in the following order: + - Destructor + - Default constructor + - Copy constructor + - Copy assignment + - Move constructor + - Move assignment +- Don't over-inline by defining large functions within the class + declaration, not even for template classes. ## Formatting @@ -39,44 +39,44 @@ The goal of source code formatting should always be to make things as easy to read as possible. White space is used to guide the eye so that details are not overlooked. Blank lines are used to separate code into "paragraphs." -* Always place a space before and after all binary operators, +- Always place a space before and after all binary operators, especially assignments (`operator=`). -* The `!` operator should be preceded by a space, but not followed by one. -* The `~` operator should be preceded by a space, but not followed by one. -* The `++` and `--` operators should have no spaces between the operator and +- The `!` operator should be preceded by a space, but not followed by one. +- The `~` operator should be preceded by a space, but not followed by one. +- The `++` and `--` operators should have no spaces between the operator and the operand. -* A space never appears before a comma, and always appears after a comma. -* Don't put spaces after a parenthesis. A typical member function call might +- A space never appears before a comma, and always appears after a comma. +- Don't put spaces after a parenthesis. A typical member function call might look like this: `foobar (1, 2, 3);` -* In general, leave a blank line before an `if` statement. -* In general, leave a blank line after a closing brace `}`. -* Do not place code on the same line as any opening or +- In general, leave a blank line before an `if` statement. +- In general, leave a blank line after a closing brace `}`. +- Do not place code on the same line as any opening or closing brace. -* Do not write `if` statements all-on-one-line. The exception to this is when +- Do not write `if` statements all-on-one-line. The exception to this is when you've got a sequence of similar `if` statements, and are aligning them all vertically to highlight their similarities. -* In an `if-else` statement, if you surround one half of the statement with +- In an `if-else` statement, if you surround one half of the statement with braces, you also need to put braces around the other half, to match. -* When writing a pointer type, use this spacing: `SomeObject* myObject`. +- When writing a pointer type, use this spacing: `SomeObject* myObject`. Technically, a more correct spacing would be `SomeObject *myObject`, but it makes more sense for the asterisk to be grouped with the type name, since being a pointer is part of the type, not the variable name. The only time that this can lead to any problems is when you're declaring multiple pointers of the same type in the same statement - which leads on to the next rule: -* When declaring multiple pointers, never do so in a single statement, e.g. +- When declaring multiple pointers, never do so in a single statement, e.g. `SomeObject* p1, *p2;` - instead, always split them out onto separate lines and write the type name again, to make it quite clear what's going on, and avoid the danger of missing out any vital asterisks. -* The previous point also applies to references, so always put the `&` next to +- The previous point also applies to references, so always put the `&` next to the type rather than the variable, e.g. `void foo (Thing const& thing)`. And don't put a space on both sides of the `*` or `&` - always put a space after it, but never before it. -* The word `const` should be placed to the right of the thing that it modifies, +- The word `const` should be placed to the right of the thing that it modifies, for consistency. For example `int const` refers to an int which is const. `int const*` is a pointer to an int which is const. `int *const` is a const pointer to an int. -* Always place a space in between the template angle brackets and the type +- Always place a space in between the template angle brackets and the type name. Template code is already hard enough to read! [1]: http://en.wikipedia.org/wiki/Indent_style#Allman_style diff --git a/docs/HeapProfiling.md b/docs/HeapProfiling.md index c8de1eb26f..2871cccaba 100644 --- a/docs/HeapProfiling.md +++ b/docs/HeapProfiling.md @@ -31,7 +31,7 @@ and header under /opt/local/include: $ scons clang profile-jemalloc=/opt/local ----------------------- +--- ## Using the jemalloc library from within the code @@ -60,4 +60,3 @@ Linking against the jemalloc library will override the system's default `malloc()` and related functions with jemalloc's implementation. This is the case even if the code is not instrumented to use jemalloc's specific API. - diff --git a/docs/README.md b/docs/README.md index 55b9e30e04..c95a871729 100644 --- a/docs/README.md +++ b/docs/README.md @@ -7,7 +7,6 @@ Install these dependencies: - [Doxygen](http://www.doxygen.nl): All major platforms have [official binary distributions](http://www.doxygen.nl/download.html#srcbin), or you can build from [source](http://www.doxygen.nl/download.html#srcbin). - - MacOS: We recommend installing via Homebrew: `brew install doxygen`. The executable will be installed in `/usr/local/bin` which is already in the default `PATH`. @@ -21,18 +20,15 @@ Install these dependencies: $ ln -s /Applications/Doxygen.app/Contents/Resources/doxygen /usr/local/bin/doxygen ``` -- [PlantUML](http://plantuml.com): - +- [PlantUML](http://plantuml.com): 1. Install a functioning Java runtime, if you don't already have one. 2. Download [`plantuml.jar`](http://sourceforge.net/projects/plantuml/files/plantuml.jar/download). - [Graphviz](https://www.graphviz.org): - - Linux: Install from your package manager. - Windows: Use an [official installer](https://graphviz.gitlab.io/_pages/Download/Download_windows.html). - MacOS: Install via Homebrew: `brew install graphviz`. - ## Docker Instead of installing the above dependencies locally, you can use the official @@ -40,14 +36,16 @@ build environment Docker image, which has all of them installed already. 1. Install [Docker](https://docs.docker.com/engine/installation/) 2. Pull the image: - ``` - sudo docker pull rippleci/rippled-ci-builder:2944b78d22db - ``` -3. Run the image from the project folder: - ``` - sudo docker run -v $PWD:/opt/rippled --rm rippleci/rippled-ci-builder:2944b78d22db - ``` +``` +sudo docker pull rippleci/rippled-ci-builder:2944b78d22db +``` + +3. Run the image from the project folder: + +``` +sudo docker run -v $PWD:/opt/rippled --rm rippleci/rippled-ci-builder:2944b78d22db +``` ## Build diff --git a/docs/build/conan.md b/docs/build/conan.md index 5f1ff7ae98..9dcd2c8f1c 100644 --- a/docs/build/conan.md +++ b/docs/build/conan.md @@ -5,7 +5,6 @@ we should first understand _why_ we use Conan, and to understand that, we need to understand how we use CMake. - ### CMake Technically, you don't need CMake to build this project. @@ -33,9 +32,9 @@ Parameters include: - where to find the compiler and linker - where to find dependencies, e.g. libraries and headers - how to link dependencies, e.g. any special compiler or linker flags that - need to be used with them, including preprocessor definitions + need to be used with them, including preprocessor definitions - how to compile translation units, e.g. with optimizations, debug symbols, - position-independent code, etc. + position-independent code, etc. - on Windows, which runtime library to link with For some of these parameters, like the build system and compiler, @@ -54,7 +53,6 @@ Most humans prefer to put them into a configuration file, once, that CMake can read every time it is configured. For CMake, that file is a [toolchain file][toolchain]. - ### Conan These next few paragraphs on Conan are going to read much like the ones above @@ -79,10 +77,10 @@ Those files include: - A single toolchain file. - For every dependency, a CMake [package configuration file][pcf], - [package version file][pvf], and for every build type, a package - targets file. - Together, these files implement version checking and define `IMPORTED` - targets for the dependencies. + [package version file][pvf], and for every build type, a package + targets file. + Together, these files implement version checking and define `IMPORTED` + targets for the dependencies. The toolchain file itself amends the search path ([`CMAKE_PREFIX_PATH`][prefix_path]) so that [`find_package()`][find_package] diff --git a/docs/build/depend.md b/docs/build/depend.md index 42fd41a26e..2fa14378aa 100644 --- a/docs/build/depend.md +++ b/docs/build/depend.md @@ -2,8 +2,7 @@ We recommend two different methods to depend on libxrpl in your own [CMake][] project. Both methods add a CMake library target named `xrpl::libxrpl`. - -## Conan requirement +## Conan requirement The first method adds libxrpl as a [Conan][] requirement. With this method, there is no need for a Git [submodule][]. @@ -48,7 +47,6 @@ cmake \ cmake --build . --parallel ``` - ## CMake subdirectory The second method adds the [rippled][] project as a CMake @@ -90,7 +88,6 @@ cmake \ cmake --build . --parallel ``` - [add_subdirectory]: https://cmake.org/cmake/help/latest/command/add_subdirectory.html [submodule]: https://git-scm.com/book/en/v2/Git-Tools-Submodules [rippled]: https://github.com/ripple/rippled diff --git a/docs/build/environment.md b/docs/build/environment.md index 7301879d09..c6b735ba48 100644 --- a/docs/build/environment.md +++ b/docs/build/environment.md @@ -5,7 +5,6 @@ platforms: Linux, macOS, or Windows. [BUILD.md]: ../../BUILD.md - ## Linux Package ecosystems vary across Linux distributions, @@ -53,11 +52,10 @@ clang --version ### Install Xcode Specific Version (Optional) -If you develop other applications using XCode you might be consistently updating to the newest version of Apple Clang. +If you develop other applications using XCode you might be consistently updating to the newest version of Apple Clang. This will likely cause issues building rippled. You may want to install a specific version of Xcode: 1. **Download Xcode** - - Visit [Apple Developer Downloads](https://developer.apple.com/download/more/) - Sign in with your Apple Developer account - Search for an Xcode version that includes **Apple Clang (Expected Version)** diff --git a/docs/build/install.md b/docs/build/install.md index af0d6f335c..7be01ce726 100644 --- a/docs/build/install.md +++ b/docs/build/install.md @@ -6,7 +6,6 @@ like CentOS. Installing from source is an option for all platforms, and the only supported option for installing custom builds. - ## From source From a source build, you can install rippled and libxrpl using CMake's @@ -21,25 +20,23 @@ The default [prefix][1] is typically `/usr/local` on Linux and macOS and [1]: https://cmake.org/cmake/help/latest/variable/CMAKE_INSTALL_PREFIX.html - ## With the APT package manager -1. Update repositories: +1. Update repositories: sudo apt update -y -2. Install utilities: +2. Install utilities: sudo apt install -y apt-transport-https ca-certificates wget gnupg -3. Add Ripple's package-signing GPG key to your list of trusted keys: +3. Add Ripple's package-signing GPG key to your list of trusted keys: sudo mkdir /usr/local/share/keyrings/ wget -q -O - "https://repos.ripple.com/repos/api/gpg/key/public" | gpg --dearmor > ripple-key.gpg sudo mv ripple-key.gpg /usr/local/share/keyrings - -4. Check the fingerprint of the newly-added key: +4. Check the fingerprint of the newly-added key: gpg /usr/local/share/keyrings/ripple-key.gpg @@ -51,37 +48,34 @@ The default [prefix][1] is typically `/usr/local` on Linux and macOS and uid TechOps Team at Ripple sub rsa3072 2019-02-14 [E] [expires: 2026-02-17] - In particular, make sure that the fingerprint matches. (In the above example, the fingerprint is on the third line, starting with `C001`.) -4. Add the appropriate Ripple repository for your operating system version: +5. Add the appropriate Ripple repository for your operating system version: echo "deb [signed-by=/usr/local/share/keyrings/ripple-key.gpg] https://repos.ripple.com/repos/rippled-deb focal stable" | \ sudo tee -a /etc/apt/sources.list.d/ripple.list The above example is appropriate for **Ubuntu 20.04 Focal Fossa**. For other operating systems, replace the word `focal` with one of the following: - - `jammy` for **Ubuntu 22.04 Jammy Jellyfish** - `bionic` for **Ubuntu 18.04 Bionic Beaver** - `bullseye` for **Debian 11 Bullseye** - `buster` for **Debian 10 Buster** If you want access to development or pre-release versions of `rippled`, use one of the following instead of `stable`: - - `unstable` - Pre-release builds ([`release` branch](https://github.com/ripple/rippled/tree/release)) - `nightly` - Experimental/development builds ([`develop` branch](https://github.com/ripple/rippled/tree/develop)) **Warning:** Unstable and nightly builds may be broken at any time. Do not use these builds for production servers. -5. Fetch the Ripple repository. +6. Fetch the Ripple repository. sudo apt -y update -6. Install the `rippled` software package: +7. Install the `rippled` software package: sudo apt -y install rippled -7. Check the status of the `rippled` service: +8. Check the status of the `rippled` service: systemctl status rippled.service @@ -89,24 +83,22 @@ The default [prefix][1] is typically `/usr/local` on Linux and macOS and sudo systemctl start rippled.service -8. Optional: allow `rippled` to bind to privileged ports. +9. Optional: allow `rippled` to bind to privileged ports. This allows you to serve incoming API requests on port 80 or 443. (If you want to do so, you must also update the config file's port settings.) sudo setcap 'cap_net_bind_service=+ep' /opt/ripple/bin/rippled - ## With the YUM package manager -1. Install the Ripple RPM repository: +1. Install the Ripple RPM repository: Choose the appropriate RPM repository for the stability of releases you want: - - `stable` for the latest production release (`master` branch) - `unstable` for pre-release builds (`release` branch) - `nightly` for experimental/development builds (`develop` branch) - *Stable* + _Stable_ cat << REPOFILE | sudo tee /etc/yum.repos.d/ripple.repo [ripple-stable] @@ -118,7 +110,7 @@ The default [prefix][1] is typically `/usr/local` on Linux and macOS and gpgkey=https://repos.ripple.com/repos/rippled-rpm/stable/repodata/repomd.xml.key REPOFILE - *Unstable* + _Unstable_ cat << REPOFILE | sudo tee /etc/yum.repos.d/ripple.repo [ripple-unstable] @@ -130,7 +122,7 @@ The default [prefix][1] is typically `/usr/local` on Linux and macOS and gpgkey=https://repos.ripple.com/repos/rippled-rpm/unstable/repodata/repomd.xml.key REPOFILE - *Nightly* + _Nightly_ cat << REPOFILE | sudo tee /etc/yum.repos.d/ripple.repo [ripple-nightly] @@ -142,18 +134,18 @@ The default [prefix][1] is typically `/usr/local` on Linux and macOS and gpgkey=https://repos.ripple.com/repos/rippled-rpm/nightly/repodata/repomd.xml.key REPOFILE -2. Fetch the latest repo updates: +2. Fetch the latest repo updates: sudo yum -y update -3. Install the new `rippled` package: +3. Install the new `rippled` package: sudo yum install -y rippled -4. Configure the `rippled` service to start on boot: +4. Configure the `rippled` service to start on boot: sudo systemctl enable rippled.service -5. Start the `rippled` service: +5. Start the `rippled` service: sudo systemctl start rippled.service diff --git a/docs/consensus.md b/docs/consensus.md index 4ee5aa70dc..067e15d0c8 100644 --- a/docs/consensus.md +++ b/docs/consensus.md @@ -3,7 +3,7 @@ **This section is a work in progress!!** Consensus is the task of reaching agreement within a distributed system in the -presence of faulty or even malicious participants. This document outlines the +presence of faulty or even malicious participants. This document outlines the [XRP Ledger Consensus Algorithm](https://arxiv.org/abs/1802.07242) as implemented in [rippled](https://github.com/ripple/rippled), but focuses on its utility as a generic consensus algorithm independent of the @@ -15,38 +15,38 @@ collectively trusted subnetworks. ## Distributed Agreement A challenge for distributed systems is reaching agreement on changes in shared -state. For the Ripple network, the shared state is the current ledger--account -information, account balances, order books and other financial data. We will +state. For the Ripple network, the shared state is the current ledger--account +information, account balances, order books and other financial data. We will refer to shared distributed state as a /ledger/ throughout the remainder of this document. ![Ledger Chain](images/consensus/ledger_chain.png "Ledger Chain") As shown above, new ledgers are made by applying a set of transactions to the -prior ledger. For the Ripple network, transactions include payments, +prior ledger. For the Ripple network, transactions include payments, modification of account settings, updates to offers and more. In a centralized system, generating the next ledger is trivial since there is a single unique arbiter of which transactions to include and how to apply them to -a ledger. For decentralized systems, participants must resolve disagreements on +a ledger. For decentralized systems, participants must resolve disagreements on the set of transactions to include, the order to apply those transactions, and -even the resulting ledger after applying the transactions. This is even more +even the resulting ledger after applying the transactions. This is even more difficult when some participants are faulty or malicious. -The Ripple network is a decentralized and **trust-full** network. Anyone is free +The Ripple network is a decentralized and **trust-full** network. Anyone is free to join and participants are free to choose a subset of peers that are collectively trusted to not collude in an attempt to defraud the participant. Leveraging this network of trust, the Ripple algorithm has two main components. -* *Consensus* in which network participants agree on the transactions to apply +- _Consensus_ in which network participants agree on the transactions to apply to a prior ledger, based on the positions of their chosen peers. -* *Validation* in which network participants agree on what ledger was +- _Validation_ in which network participants agree on what ledger was generated, based on the ledgers generated by chosen peers. These phases are continually repeated to process transactions submitted to the network, generating successive ledgers and giving rise to the blockchain ledger -history depicted below. In this diagram, time is flowing to the right, but -links between ledgers point backward to the parent. Also note the alternate +history depicted below. In this diagram, time is flowing to the right, but +links between ledgers point backward to the parent. Also note the alternate Ledger 2 that was generated by some participants, but which failed validation and was abandoned. @@ -54,7 +54,7 @@ and was abandoned. The remainder of this section describes the Consensus and Validation algorithms in more detail and is meant as a companion guide to understanding the generic -implementation in `rippled`. The document **does not** discuss correctness, +implementation in `rippled`. The document **does not** discuss correctness, fault-tolerance or liveness properties of the algorithms or the full details of how they integrate within `rippled` to support the Ripple Consensus Ledger. @@ -62,76 +62,76 @@ how they integrate within `rippled` to support the Ripple Consensus Ledger. ### Definitions -* The *ledger* is the shared distributed state. Each ledger has a unique ID to - distinguish it from all other ledgers. During consensus, the *previous*, - *prior* or *last-closed* ledger is the most recent ledger seen by consensus +- The _ledger_ is the shared distributed state. Each ledger has a unique ID to + distinguish it from all other ledgers. During consensus, the _previous_, + _prior_ or _last-closed_ ledger is the most recent ledger seen by consensus and is the basis upon which it will build the next ledger. -* A *transaction* is an instruction for an atomic change in the ledger state. A +- A _transaction_ is an instruction for an atomic change in the ledger state. A unique ID distinguishes a transaction from other transactions. -* A *transaction set* is a set of transactions under consideration by consensus. - The goal of consensus is to reach agreement on this set. The generic +- A _transaction set_ is a set of transactions under consideration by consensus. + The goal of consensus is to reach agreement on this set. The generic consensus algorithm does not rely on an ordering of transactions within the set, nor does it specify how to apply a transaction set to a ledger to - generate a new ledger. A unique ID distinguishes a set of transactions from + generate a new ledger. A unique ID distinguishes a set of transactions from all other sets of transactions. -* A *node* is one of the distributed actors running the consensus algorithm. It +- A _node_ is one of the distributed actors running the consensus algorithm. It has a unique ID to distinguish it from all other nodes. -* A *peer* of a node is another node that it has chosen to follow and which it - believes will not collude with other chosen peers. The choice of peers is not +- A _peer_ of a node is another node that it has chosen to follow and which it + believes will not collude with other chosen peers. The choice of peers is not symmetric, since participants can decide on their chosen sets independently. -* A /position/ is the current belief of the next ledger's transaction set and +- A /position/ is the current belief of the next ledger's transaction set and close time. Position can refer to the node's own position or the position of a peer. -* A *proposal* is one of a sequence of positions a node shares during consensus. +- A _proposal_ is one of a sequence of positions a node shares during consensus. An initial proposal contains the starting position taken by a node before it - considers any peer positions. If a node subsequently updates its position in - response to its peers, it will issue an updated proposal. A proposal is + considers any peer positions. If a node subsequently updates its position in + response to its peers, it will issue an updated proposal. A proposal is uniquely identified by the ID of the proposing node, the ID of the position taken, the ID of the prior ledger the proposal is for, and the sequence number of the proposal. -* A *dispute* is a transaction that is either not part of a node's position or +- A _dispute_ is a transaction that is either not part of a node's position or not in a peer's position. During consensus, the node will add or remove disputed transactions from its position based on that transaction's support amongst its peers. Note that most types have an ID as a lightweight identifier of instances of that -type. Consensus often operates on the IDs directly since the underlying type is -potentially expensive to share over the network. For example, proposal's only -contain the ID of the position of a peer. Since many peers likely have the same +type. Consensus often operates on the IDs directly since the underlying type is +potentially expensive to share over the network. For example, proposal's only +contain the ID of the position of a peer. Since many peers likely have the same position, this reduces the need to send the full transaction set multiple times. Instead, a node can request the transaction set from the network if necessary. -### Overview +### Overview ![Consensus Overview](images/consensus/consensus_overview.png "Consensus Overview") The diagram above is an overview of the consensus process from the perspective -of a single participant. Recall that during a single consensus round, a node is +of a single participant. Recall that during a single consensus round, a node is trying to agree with its peers on which transactions to apply to its prior -ledger when generating the next ledger. It also attempts to agree on the -[network time when the ledger closed](#effective_close_time). There are +ledger when generating the next ledger. It also attempts to agree on the +[network time when the ledger closed](#effective_close_time). There are 3 main phases to a consensus round: -* A call to `startRound` places the node in the `Open` phase. In this phase, -the node is waiting for transactions to include in its open ledger. -* At some point, the node will `Close` the open ledger and transition to the -`Establish` phase. In this phase, the node shares/receives peer proposals on -which transactions should be accepted in the closed ledger. -* At some point, the node determines it has reached consensus with its peers on -which transactions to include. It transitions to the `Accept` phase. In this -phase, the node works on applying the transactions to the prior ledger to -generate a new closed ledger. Once the new ledger is completed, the node shares -the validated ledger hash with the network and makes a call to `startRound` to -start the cycle again for the next ledger. +- A call to `startRound` places the node in the `Open` phase. In this phase, + the node is waiting for transactions to include in its open ledger. +- At some point, the node will `Close` the open ledger and transition to the + `Establish` phase. In this phase, the node shares/receives peer proposals on + which transactions should be accepted in the closed ledger. +- At some point, the node determines it has reached consensus with its peers on + which transactions to include. It transitions to the `Accept` phase. In this + phase, the node works on applying the transactions to the prior ledger to + generate a new closed ledger. Once the new ledger is completed, the node shares + the validated ledger hash with the network and makes a call to `startRound` to + start the cycle again for the next ledger. Throughout, a heartbeat timer calls `timerEntry` at a regular frequency to drive the process forward. Although the `startRound` call occurs at arbitrary times based on when the initial round began and the time it takes to apply transactions, the transitions from `Open` to `Establish` and `Establish` to -`Accept` only occur during calls to `timerEntry`. Similarly, transactions can +`Accept` only occur during calls to `timerEntry`. Similarly, transactions can arrive at arbitrary times, independent of the heartbeat timer. Transactions received after the `Open` to `Close` transition and not part of peer proposals -won't be considered until the next consensus round. They are represented above +won't be considered until the next consensus round. They are represented above by the light green triangles. Peer proposals are issued by a node during a `timerEntry` call, but since peers @@ -139,16 +139,16 @@ do not synchronize `timerEntry` calls, they are received by other peers at arbitrary times. Peer proposals are only considered if received prior to the `Establish` to `Accept` transition, and only if the peer is working on the same prior ledger. Peer proposals received after consensus is reached will not be -meaningful and are represented above by the circle with the X in it. Only +meaningful and are represented above by the circle with the X in it. Only proposals from chosen peers are considered. -### Effective Close Time ### {#effective_close_time} - +### Effective Close Time ### {#effective_close_time} + In addition to agreeing on a transaction set, each consensus round tries to -agree on the time the ledger closed. Each node calculates its own close time -when it closes the open ledger. This exact close time is rounded to the nearest -multiple of the current *effective close time resolution*. It is this -*effective close time* that nodes seek to agree on. This allows servers to +agree on the time the ledger closed. Each node calculates its own close time +when it closes the open ledger. This exact close time is rounded to the nearest +multiple of the current _effective close time resolution_. It is this +_effective close time_ that nodes seek to agree on. This allows servers to derive a common time for a ledger without the need for perfectly synchronized clocks. As depicted below, the 3 pink arrows represent exact close times from 3 consensus nodes that round to the same effective close time given the current @@ -158,9 +158,9 @@ different effective close time given the current resolution. ![Effective Close Time](images/consensus/EffCloseTime.png "Effective Close Time") The effective close time is part of the node's position and is shared with peers -in its proposals. Just like the position on the consensus transaction set, a +in its proposals. Just like the position on the consensus transaction set, a node will update its close time position in response to its peers' effective -close time positions. Peers can agree to disagree on the close time, in which +close time positions. Peers can agree to disagree on the close time, in which case the effective close time is taken as 1 second past the prior close. The close time resolution is itself dynamic, decreasing (coarser) resolution in @@ -173,12 +173,12 @@ reach close time consensus. Internally, a node operates under one of the following consensus modes. Either of the first two modes may be chosen when a consensus round starts. -* *Proposing* indicates the node is a full-fledged consensus participant. It +- _Proposing_ indicates the node is a full-fledged consensus participant. It takes on positions and sends proposals to its peers. -* *Observing* indicates the node is a passive consensus participant. It +- _Observing_ indicates the node is a passive consensus participant. It maintains a position internally, but does not propose that position to its peers. Instead, it receives peer proposals and updates its position - to track the majority of its peers. This may be preferred if the node is only + to track the majority of its peers. This may be preferred if the node is only being used to track the state of the network or during a start-up phase while it is still synchronizing with the network. @@ -186,14 +186,14 @@ The other two modes are set internally during the consensus round when the node believes it is no longer working on the dominant ledger chain based on peer validations. It checks this on every call to `timerEntry`. -* *Wrong Ledger* indicates the node is not working on the correct prior ledger - and does not have it available. It requests that ledger from the network, but - continues to work towards consensus this round while waiting. If it had been - *proposing*, it will send a special "bowout" proposal to its peers to indicate +- _Wrong Ledger_ indicates the node is not working on the correct prior ledger + and does not have it available. It requests that ledger from the network, but + continues to work towards consensus this round while waiting. If it had been + _proposing_, it will send a special "bowout" proposal to its peers to indicate its change in mode for the rest of this round. For the duration of the round, it defers to peer positions for determining the consensus outcome as if it - were just *observing*. -* *Switch Ledger* indicates that the node has acquired the correct prior ledger + were just _observing_. +- _Switch Ledger_ indicates that the node has acquired the correct prior ledger from the network. Although it now has the correct prior ledger, the fact that it had the wrong one at some point during this round means it is likely behind and should defer to peer positions for determining the consensus outcome. @@ -201,7 +201,7 @@ validations. It checks this on every call to `timerEntry`. ![Consensus Modes](images/consensus/consensus_modes.png "Consensus Modes") Once either wrong ledger or switch ledger are reached, the node cannot -return to proposing or observing until the next consensus round. However, +return to proposing or observing until the next consensus round. However, the node could change its view of the correct prior ledger, so going from switch ledger to wrong ledger and back again is possible. @@ -215,16 +215,16 @@ decide how best to generate the next ledger once it declares consensus. ### Phases As depicted in the overview diagram, consensus is best viewed as a progression -through 3 phases. There are 4 public methods of the generic consensus algorithm +through 3 phases. There are 4 public methods of the generic consensus algorithm that determine this progression -* `startRound` begins a consensus round. -* `timerEntry` is called at a regular frequency (`LEDGER_MIN_CLOSE`) and is the - only call to consensus that can change the phase from `Open` to `Establish` +- `startRound` begins a consensus round. +- `timerEntry` is called at a regular frequency (`LEDGER_MIN_CLOSE`) and is the + only call to consensus that can change the phase from `Open` to `Establish` or `Accept`. -* `peerProposal` is called whenever a peer proposal is received and is what +- `peerProposal` is called whenever a peer proposal is received and is what allows a node to update its position in a subsequent `timerEntry` call. -* `gotTxSet` is called when a transaction set is received from the network. This +- `gotTxSet` is called when a transaction set is received from the network. This is typically in response to a prior request from the node to acquire the transaction set corresponding to a disagreeing peer's position. @@ -234,13 +234,13 @@ actions are taken in response to these calls. #### Open The `Open` phase is a quiescent period to allow transactions to build up in the -node's open ledger. The duration is a trade-off between latency and throughput. +node's open ledger. The duration is a trade-off between latency and throughput. A shorter window reduces the latency to generating the next ledger, but also reduces transaction throughput due to fewer transactions accepted into the ledger. A call to `startRound` would forcibly begin the next consensus round, skipping -completion of the current round. This is not expected during normal operation. +completion of the current round. This is not expected during normal operation. Calls to `peerProposal` or `gotTxSet` simply store the proposal or transaction set for use in the coming `Establish` phase. @@ -254,28 +254,27 @@ the ledger. Under normal circumstances, the open ledger period ends when one of the following is true -* if there are transactions in the open ledger and more than `LEDGER_MIN_CLOSE` - have elapsed. This is the typical behavior. -* if there are no open transactions and a suitably longer idle interval has - elapsed. This increases the opportunity to get some transaction into +- if there are transactions in the open ledger and more than `LEDGER_MIN_CLOSE` + have elapsed. This is the typical behavior. +- if there are no open transactions and a suitably longer idle interval has + elapsed. This increases the opportunity to get some transaction into the next ledger and avoids doing useless work closing an empty ledger. -* if more than half the number of prior round peers have already closed or finished +- if more than half the number of prior round peers have already closed or finished this round. This indicates the node is falling behind and needs to catch up. - When closing the ledger, the node takes its initial position based on the transactions in the open ledger and uses the current time as -its initial close time estimate. If in the proposing mode, the node shares its -initial position with peers. Now that the node has taken a position, it will -consider any peer positions for this round that arrived earlier. The node +its initial close time estimate. If in the proposing mode, the node shares its +initial position with peers. Now that the node has taken a position, it will +consider any peer positions for this round that arrived earlier. The node generates disputed transactions for each transaction not in common with a peer's -position. The node also records the vote of each peer for each disputed +position. The node also records the vote of each peer for each disputed transaction. -In the example below, we suppose our node has closed with transactions 1,2 and 3. It creates disputes +In the example below, we suppose our node has closed with transactions 1,2 and 3. It creates disputes for transactions 2,3 and 4, since at least one peer position differs on each. -##### disputes ##### {#disputes_image} +##### disputes ##### {#disputes_image} ![Disputes](images/consensus/disputes.png "Disputes") @@ -286,22 +285,22 @@ exchanges proposals with peers in an attempt to reach agreement on the consensus transactions and effective close time. A call to `startRound` would forcibly begin the next consensus round, skipping -completion of the current round. This is not expected during normal operation. +completion of the current round. This is not expected during normal operation. Calls to `peerProposal` or `gotTxSet` that reflect new positions will generate disputed transactions for any new disagreements and will update the peer's vote for all disputed transactions. A call to `timerEntry` first checks that the node is working from the correct -prior ledger. If not, the node will update the mode and request the correct -ledger. Otherwise, the node updates the node's position and considers whether -to switch to the `Accepted` phase and declare consensus reached. However, at -least `LEDGER_MIN_CONSENSUS` time must have elapsed before doing either. This +prior ledger. If not, the node will update the mode and request the correct +ledger. Otherwise, the node updates the node's position and considers whether +to switch to the `Accepted` phase and declare consensus reached. However, at +least `LEDGER_MIN_CONSENSUS` time must have elapsed before doing either. This allows peers an opportunity to take an initial position and share it. ##### Update Position In order to achieve consensus, the node is looking for a transaction set that is -supported by a super-majority of peers. The node works towards this set by +supported by a super-majority of peers. The node works towards this set by adding or removing disputed transactions from its position based on an increasing threshold for inclusion. @@ -310,23 +309,23 @@ increasing threshold for inclusion. By starting with a lower threshold, a node initially allows a wide set of transactions into its position. If the establish round continues and the node is "stuck", a higher threshold can focus on accepting transactions with the most -support. The constants that define the thresholds and durations at which the +support. The constants that define the thresholds and durations at which the thresholds change are given by `AV_XXX_CONSENSUS_PCT` and `AV_XXX_CONSENSUS_TIME` respectively, where `XXX` is `INIT`,`MID`,`LATE` and -`STUCK`. The effective close time position is updated using the same +`STUCK`. The effective close time position is updated using the same thresholds. Given the [example disputes above](#disputes_image) and an initial threshold of 50%, our node would retain its position since transaction 1 was not in -dispute and transactions 2 and 3 have 75% support. Since its position did not -change, it would not need to send a new proposal to peers. Peer C would not +dispute and transactions 2 and 3 have 75% support. Since its position did not +change, it would not need to send a new proposal to peers. Peer C would not change either. Peer A would add transaction 3 to its position and Peer B would remove transaction 4 from its position; both would then send an updated position. Conversely, if the diagram reflected a later call to =timerEntry= that occurs in the stuck region with a threshold of say 95%, our node would remove transactions -2 and 3 from its candidate set and send an updated position. Likewise, all the +2 and 3 from its candidate set and send an updated position. Likewise, all the other peers would end up with only transaction 1 in their position. Lastly, if our node were not in the proposing mode, it would not include its own @@ -336,7 +335,7 @@ our node would maintain its position of transactions 1, 2 and 3. ##### Checking Consensus After updating its position, the node checks for supermajority agreement with -its peers on its current position. This agreement is of the exact transaction +its peers on its current position. This agreement is of the exact transaction set, not just the support of individual transactions. That is, if our position is a subset of a peer's position, that counts as a disagreement. Also recall that effective close time agreement allows a supermajority of participants @@ -344,10 +343,10 @@ agreeing to disagree. Consensus is declared when the following 3 clauses are true: -* `LEDGER_MIN_CONSENSUS` time has elapsed in the establish phase -* At least 75% of the prior round proposers have proposed OR this establish +- `LEDGER_MIN_CONSENSUS` time has elapsed in the establish phase +- At least 75% of the prior round proposers have proposed OR this establish phase is `LEDGER_MIN_CONSENSUS` longer than the last round's establish phase -* `minimumConsensusPercentage` of ourself and our peers share the same position +- `minimumConsensusPercentage` of ourself and our peers share the same position The middle condition ensures slower peers have a chance to share positions, but prevents waiting too long on peers that have disconnected. Additionally, a node @@ -364,22 +363,22 @@ logic. Once consensus is reached (or moved on), the node switches to the `Accept` phase and signals to the implementing code that the round is complete. That code is responsible for using the consensus transaction set to generate the next ledger -and calling `startRound` to begin the next round. The implementation has total +and calling `startRound` to begin the next round. The implementation has total freedom on ordering transactions, deciding what to do if consensus moved on, determining whether to retry or abandon local transactions that did not make the consensus set and updating any internal state based on the consensus progress. #### Accept -The `Accept` phase is the terminal phase of the consensus algorithm. Calls to +The `Accept` phase is the terminal phase of the consensus algorithm. Calls to `timerEntry`, `peerProposal` and `gotTxSet` will not change the internal -consensus state while in the accept phase. The expectation is that the +consensus state while in the accept phase. The expectation is that the application specific code is working to generate the new ledger based on the consensus outcome. Once complete, that code should make a call to `startRound` to kick off the next consensus round. The `startRound` call includes the new prior ledger, prior ledger ID and whether the round should begin in the -proposing or observing mode. After setting some initial state, the phase -transitions to `Open`. The node will also check if the provided prior ledger +proposing or observing mode. After setting some initial state, the phase +transitions to `Open`. The node will also check if the provided prior ledger and ID are correct, updating the mode and requesting the proper ledger from the network if necessary. @@ -448,9 +447,9 @@ struct TxSet ### Ledger The `Ledger` type represents the state shared amongst the -distributed participants. Notice that the details of how the next ledger is +distributed participants. Notice that the details of how the next ledger is generated from the prior ledger and the consensus accepted transaction set is -not part of the interface. Within the generic code, this type is primarily used +not part of the interface. Within the generic code, this type is primarily used to know that peers are working on the same tip of the ledger chain and to provide some basic timing data for consensus. @@ -626,7 +625,7 @@ struct Adaptor // Called when consensus operating mode changes void onModeChange(ConsensuMode before, ConsensusMode after); - + // Called when ledger closes. Implementation should generate an initial Result // with position based on the current open ledger's transactions. ConsensusResult onClose(Ledger const &, Ledger const & prev, ConsensusMode mode); @@ -657,27 +656,24 @@ struct Adaptor The implementing class hides many details of the peer communication model from the generic code. -* The `share` member functions are responsible for sharing the given type with a +- The `share` member functions are responsible for sharing the given type with a node's peers, but are agnostic to the mechanism. Ideally, messages are delivered - faster than `LEDGER_GRANULARITY`. -* The generic code does not specify how transactions are submitted by clients, + faster than `LEDGER_GRANULARITY`. +- The generic code does not specify how transactions are submitted by clients, propagated through the network or stored in the open ledger. Indeed, the open ledger is only conceptual from the perspective of the generic code---the initial position and transaction set are opaquely generated in a `Consensus::Result` instance returned from the `onClose` callback. -* The calls to `acquireLedger` and `acquireTxSet` only have non-trivial return - if the ledger or transaction set of interest is available. The implementing +- The calls to `acquireLedger` and `acquireTxSet` only have non-trivial return + if the ledger or transaction set of interest is available. The implementing class is free to block while acquiring, or return the empty option while - servicing the request asynchronously. Due to legacy reasons, the two calls + servicing the request asynchronously. Due to legacy reasons, the two calls are not symmetric. `acquireTxSet` requires the host application to call `gotTxSet` when an asynchronous `acquire` completes. Conversely, `acquireLedger` will be called again later by the consensus code if it still desires the ledger with the hope that the asynchronous acquisition is complete. - ## Validation Coming Soon! - - diff --git a/external/README.md b/external/README.md index a3d04da264..99ce2c337e 100644 --- a/external/README.md +++ b/external/README.md @@ -3,8 +3,8 @@ The subdirectories in this directory contain copies of external libraries used by rippled. -| Folder | Upstream | Description | -|:----------------|:---------------------------------------------|:------------| -| `antithesis-sdk`| [Project](https://github.com/antithesishq/antithesis-sdk-cpp/) | [Antithesis](https://antithesis.com/docs/using_antithesis/sdk/cpp/overview.html) SDK for C++ | -| `ed25519-donna` | [Project](https://github.com/floodyberry/ed25519-donna) | [Ed25519](http://ed25519.cr.yp.to/) digital signatures | -| `secp256k1` | [Project](https://github.com/bitcoin-core/secp256k1) | ECDSA digital signatures using the **secp256k1** curve | +| Folder | Upstream | Description | +| :--------------- | :------------------------------------------------------------- | :------------------------------------------------------------------------------------------- | +| `antithesis-sdk` | [Project](https://github.com/antithesishq/antithesis-sdk-cpp/) | [Antithesis](https://antithesis.com/docs/using_antithesis/sdk/cpp/overview.html) SDK for C++ | +| `ed25519-donna` | [Project](https://github.com/floodyberry/ed25519-donna) | [Ed25519](http://ed25519.cr.yp.to/) digital signatures | +| `secp256k1` | [Project](https://github.com/bitcoin-core/secp256k1) | ECDSA digital signatures using the **secp256k1** curve | diff --git a/external/antithesis-sdk/README.md b/external/antithesis-sdk/README.md index eb0237868d..46056ec512 100644 --- a/external/antithesis-sdk/README.md +++ b/external/antithesis-sdk/README.md @@ -1,8 +1,9 @@ # Antithesis C++ SDK This library provides methods for C++ programs to configure the [Antithesis](https://antithesis.com) platform. It contains three kinds of functionality: -* Assertion macros that allow you to define test properties about your software or workload. -* Randomness functions for requesting both structured and unstructured randomness from the Antithesis platform. -* Lifecycle functions that inform the Antithesis environment that particular test phases or milestones have been reached. + +- Assertion macros that allow you to define test properties about your software or workload. +- Randomness functions for requesting both structured and unstructured randomness from the Antithesis platform. +- Lifecycle functions that inform the Antithesis environment that particular test phases or milestones have been reached. For general usage guidance see the [Antithesis C++ SDK Documentation](https://antithesis.com/docs/using_antithesis/sdk/cpp/overview/) diff --git a/external/ed25519-donna/README.md b/external/ed25519-donna/README.md index e09fc27e31..31b2431632 100644 --- a/external/ed25519-donna/README.md +++ b/external/ed25519-donna/README.md @@ -1,12 +1,12 @@ -[ed25519](http://ed25519.cr.yp.to/) is an -[Elliptic Curve Digital Signature Algortithm](http://en.wikipedia.org/wiki/Elliptic_Curve_DSA), -developed by [Dan Bernstein](http://cr.yp.to/djb.html), -[Niels Duif](http://www.nielsduif.nl/), -[Tanja Lange](http://hyperelliptic.org/tanja), -[Peter Schwabe](http://www.cryptojedi.org/users/peter/), +[ed25519](http://ed25519.cr.yp.to/) is an +[Elliptic Curve Digital Signature Algortithm](http://en.wikipedia.org/wiki/Elliptic_Curve_DSA), +developed by [Dan Bernstein](http://cr.yp.to/djb.html), +[Niels Duif](http://www.nielsduif.nl/), +[Tanja Lange](http://hyperelliptic.org/tanja), +[Peter Schwabe](http://www.cryptojedi.org/users/peter/), and [Bo-Yin Yang](http://www.iis.sinica.edu.tw/pages/byyang/). -This project provides performant, portable 32-bit & 64-bit implementations. All implementations are +This project provides performant, portable 32-bit & 64-bit implementations. All implementations are of course constant time in regard to secret data. #### Performance @@ -52,35 +52,35 @@ are made. #### Compilation -No configuration is needed **if you are compiling against OpenSSL**. +No configuration is needed **if you are compiling against OpenSSL**. ##### Hash Options If you are not compiling aginst OpenSSL, you will need a hash function. -To use a simple/**slow** implementation of SHA-512, use `-DED25519_REFHASH` when compiling `ed25519.c`. +To use a simple/**slow** implementation of SHA-512, use `-DED25519_REFHASH` when compiling `ed25519.c`. This should never be used except to verify the code works when OpenSSL is not available. -To use a custom hash function, use `-DED25519_CUSTOMHASH` when compiling `ed25519.c` and put your +To use a custom hash function, use `-DED25519_CUSTOMHASH` when compiling `ed25519.c` and put your custom hash implementation in ed25519-hash-custom.h. The hash must have a 512bit digest and implement - struct ed25519_hash_context; + struct ed25519_hash_context; - void ed25519_hash_init(ed25519_hash_context *ctx); - void ed25519_hash_update(ed25519_hash_context *ctx, const uint8_t *in, size_t inlen); - void ed25519_hash_final(ed25519_hash_context *ctx, uint8_t *hash); - void ed25519_hash(uint8_t *hash, const uint8_t *in, size_t inlen); + void ed25519_hash_init(ed25519_hash_context *ctx); + void ed25519_hash_update(ed25519_hash_context *ctx, const uint8_t *in, size_t inlen); + void ed25519_hash_final(ed25519_hash_context *ctx, uint8_t *hash); + void ed25519_hash(uint8_t *hash, const uint8_t *in, size_t inlen); ##### Random Options If you are not compiling aginst OpenSSL, you will need a random function for batch verification. -To use a custom random function, use `-DED25519_CUSTOMRANDOM` when compiling `ed25519.c` and put your +To use a custom random function, use `-DED25519_CUSTOMRANDOM` when compiling `ed25519.c` and put your custom hash implementation in ed25519-randombytes-custom.h. The random function must implement: - void ED25519_FN(ed25519_randombytes_unsafe) (void *p, size_t len); + void ED25519_FN(ed25519_randombytes_unsafe) (void *p, size_t len); -Use `-DED25519_TEST` when compiling `ed25519.c` to use a deterministically seeded, non-thread safe CSPRNG +Use `-DED25519_TEST` when compiling `ed25519.c` to use a deterministically seeded, non-thread safe CSPRNG variant of Bob Jenkins [ISAAC](http://en.wikipedia.org/wiki/ISAAC_%28cipher%29) ##### Minor options @@ -91,80 +91,79 @@ Use `-DED25519_FORCE_32BIT` to force the use of 32 bit routines even when compil ##### 32-bit - gcc ed25519.c -m32 -O3 -c + gcc ed25519.c -m32 -O3 -c ##### 64-bit - gcc ed25519.c -m64 -O3 -c + gcc ed25519.c -m64 -O3 -c ##### SSE2 - gcc ed25519.c -m32 -O3 -c -DED25519_SSE2 -msse2 - gcc ed25519.c -m64 -O3 -c -DED25519_SSE2 + gcc ed25519.c -m32 -O3 -c -DED25519_SSE2 -msse2 + gcc ed25519.c -m64 -O3 -c -DED25519_SSE2 clang and icc are also supported - #### Usage To use the code, link against `ed25519.o -mbits` and: - #include "ed25519.h" + #include "ed25519.h" Add `-lssl -lcrypto` when using OpenSSL (Some systems don't need -lcrypto? It might be trial and error). To generate a private key, simply generate 32 bytes from a secure cryptographic source: - ed25519_secret_key sk; - randombytes(sk, sizeof(ed25519_secret_key)); + ed25519_secret_key sk; + randombytes(sk, sizeof(ed25519_secret_key)); To generate a public key: - ed25519_public_key pk; - ed25519_publickey(sk, pk); + ed25519_public_key pk; + ed25519_publickey(sk, pk); To sign a message: - ed25519_signature sig; - ed25519_sign(message, message_len, sk, pk, signature); + ed25519_signature sig; + ed25519_sign(message, message_len, sk, pk, signature); To verify a signature: - int valid = ed25519_sign_open(message, message_len, pk, signature) == 0; + int valid = ed25519_sign_open(message, message_len, pk, signature) == 0; To batch verify signatures: - const unsigned char *mp[num] = {message1, message2..} - size_t ml[num] = {message_len1, message_len2..} - const unsigned char *pkp[num] = {pk1, pk2..} - const unsigned char *sigp[num] = {signature1, signature2..} - int valid[num] + const unsigned char *mp[num] = {message1, message2..} + size_t ml[num] = {message_len1, message_len2..} + const unsigned char *pkp[num] = {pk1, pk2..} + const unsigned char *sigp[num] = {signature1, signature2..} + int valid[num] - /* valid[i] will be set to 1 if the individual signature was valid, 0 otherwise */ - int all_valid = ed25519_sign_open_batch(mp, ml, pkp, sigp, num, valid) == 0; + /* valid[i] will be set to 1 if the individual signature was valid, 0 otherwise */ + int all_valid = ed25519_sign_open_batch(mp, ml, pkp, sigp, num, valid) == 0; -**Note**: Batch verification uses `ed25519_randombytes_unsafe`, implemented in -`ed25519-randombytes.h`, to generate random scalars for the verification code. +**Note**: Batch verification uses `ed25519_randombytes_unsafe`, implemented in +`ed25519-randombytes.h`, to generate random scalars for the verification code. The default implementation now uses OpenSSLs `RAND_bytes`. Unlike the [SUPERCOP](http://bench.cr.yp.to/supercop.html) version, signatures are -not appended to messages, and there is no need for padding in front of messages. -Additionally, the secret key does not contain a copy of the public key, so it is +not appended to messages, and there is no need for padding in front of messages. +Additionally, the secret key does not contain a copy of the public key, so it is 32 bytes instead of 64 bytes, and the public key must be provided to the signing function. ##### Curve25519 -Curve25519 public keys can be generated thanks to -[Adam Langley](http://www.imperialviolet.org/2013/05/10/fastercurve25519.html) +Curve25519 public keys can be generated thanks to +[Adam Langley](http://www.imperialviolet.org/2013/05/10/fastercurve25519.html) leveraging Ed25519's precomputed basepoint scalar multiplication. - curved25519_key sk, pk; - randombytes(sk, sizeof(curved25519_key)); - curved25519_scalarmult_basepoint(pk, sk); + curved25519_key sk, pk; + randombytes(sk, sizeof(curved25519_key)); + curved25519_scalarmult_basepoint(pk, sk); -Note the name is curved25519, a combination of curve and ed25519, to prevent +Note the name is curved25519, a combination of curve and ed25519, to prevent name clashes. Performance is slightly faster than short message ed25519 signing due to both using the same code for the scalar multiply. @@ -180,4 +179,4 @@ with extreme values to ensure they function correctly. SSE2 is now supported. #### Papers -[Available on the Ed25519 website](http://ed25519.cr.yp.to/papers.html) \ No newline at end of file +[Available on the Ed25519 website](http://ed25519.cr.yp.to/papers.html) diff --git a/external/ed25519-donna/fuzz/README.md b/external/ed25519-donna/fuzz/README.md index 306ddfe08c..0a5cd49177 100644 --- a/external/ed25519-donna/fuzz/README.md +++ b/external/ed25519-donna/fuzz/README.md @@ -1,78 +1,78 @@ This code fuzzes ed25519-donna (and optionally ed25519-donna-sse2) against the ref10 implementations of -[curve25519](https://github.com/floodyberry/supercop/tree/master/crypto_scalarmult/curve25519/ref10) and +[curve25519](https://github.com/floodyberry/supercop/tree/master/crypto_scalarmult/curve25519/ref10) and [ed25519](https://github.com/floodyberry/supercop/tree/master/crypto_sign/ed25519/ref10). Curve25519 tests that generating a public key from a secret key # Building -## *nix + PHP +## \*nix + PHP `php build-nix.php (required parameters) (optional parameters)` Required parameters: -* `--function=[curve25519,ed25519]` -* `--bits=[32,64]` +- `--function=[curve25519,ed25519]` +- `--bits=[32,64]` Optional parameters: -* `--with-sse2` +- `--with-sse2` - Also fuzz against ed25519-donna-sse2 -* `--with-openssl` + Also fuzz against ed25519-donna-sse2 - Build with OpenSSL's SHA-512. +- `--with-openssl` - Default: Reference SHA-512 implementation (slow!) + Build with OpenSSL's SHA-512. -* `--compiler=[gcc,clang,icc]` + Default: Reference SHA-512 implementation (slow!) - Default: gcc +- `--compiler=[gcc,clang,icc]` -* `--no-asm` + Default: gcc - Do not use platform specific assembler +- `--no-asm` + Do not use platform specific assembler example: - - php build-nix.php --bits=64 --function=ed25519 --with-sse2 --compiler=icc + + php build-nix.php --bits=64 --function=ed25519 --with-sse2 --compiler=icc ## Windows Create a project with access to the ed25519 files. -If you are not using OpenSSL, add the `ED25519_REFHASH` define to the projects +If you are not using OpenSSL, add the `ED25519_REFHASH` define to the projects "Properties/Preprocessor/Preprocessor Definitions" option Add the following files to the project: -* `fuzz/curve25519-ref10.c` -* `fuzz/ed25519-ref10.c` -* `fuzz/ed25519-donna.c` -* `fuzz/ed25519-donna-sse2.c` (optional) -* `fuzz-[curve25519/ed25519].c` (depending on which you want to fuzz) +- `fuzz/curve25519-ref10.c` +- `fuzz/ed25519-ref10.c` +- `fuzz/ed25519-donna.c` +- `fuzz/ed25519-donna-sse2.c` (optional) +- `fuzz-[curve25519/ed25519].c` (depending on which you want to fuzz) -If you are also fuzzing against ed25519-donna-sse2, add the `ED25519_SSE2` define for `fuzz-[curve25519/ed25519].c` under +If you are also fuzzing against ed25519-donna-sse2, add the `ED25519_SSE2` define for `fuzz-[curve25519/ed25519].c` under its "Properties/Preprocessor/Preprocessor Definitions" option. # Running -If everything agrees, the program will only output occasional status dots (every 0x1000 passes) +If everything agrees, the program will only output occasional status dots (every 0x1000 passes) and a 64bit progress count (every 0x20000 passes): fuzzing: ref10 curved25519 curved25519-sse2 - + ................................ [0000000000020000] ................................ [0000000000040000] ................................ [0000000000060000] ................................ [0000000000080000] ................................ [00000000000a0000] ................................ [00000000000c0000] - + If any of the implementations do not agree with the ref10 implementation, the program will dump -the random data that was used, the data generated by the ref10 implementation, and diffs of the +the random data that was used, the data generated by the ref10 implementation, and diffs of the ed25519-donna data against the ref10 data. ## Example errors @@ -83,21 +83,21 @@ These are example error dumps (with intentionally introduced errors). Random data: -* sk, or Secret Key -* m, or Message +- sk, or Secret Key +- m, or Message Generated data: -* pk, or Public Key -* sig, or Signature -* valid, or if the signature of the message is valid with the public key +- pk, or Public Key +- sig, or Signature +- valid, or if the signature of the message is valid with the public key Dump: sk: 0x3b,0xb7,0x17,0x7a,0x66,0xdc,0xb7,0x9a,0x90,0x25,0x07,0x99,0x96,0xf3,0x92,0xef, 0x78,0xf8,0xad,0x6c,0x35,0x87,0x81,0x67,0x03,0xe6,0x95,0xba,0x06,0x18,0x7c,0x9c, - + m: 0x7c,0x8d,0x3d,0xe1,0x92,0xee,0x7a,0xb8,0x4d,0xc9,0xfb,0x02,0x34,0x1e,0x5a,0x91, 0xee,0x01,0xa6,0xb8,0xab,0x37,0x3f,0x3d,0x6d,0xa2,0x47,0xe3,0x27,0x93,0x7c,0xb7, @@ -107,67 +107,66 @@ Dump: 0x63,0x14,0xe0,0x81,0x52,0xec,0xcd,0xcf,0x70,0x54,0x7d,0xa3,0x49,0x8b,0xf0,0x89, 0x70,0x07,0x12,0x2a,0xd9,0xaa,0x16,0x01,0xb2,0x16,0x3a,0xbb,0xfc,0xfa,0x13,0x5b, 0x69,0x83,0x92,0x70,0x95,0x76,0xa0,0x8e,0x16,0x79,0xcc,0xaa,0xb5,0x7c,0xf8,0x7a, - + ref10: pk: 0x71,0xb0,0x5e,0x62,0x1b,0xe3,0xe7,0x36,0x91,0x8b,0xc0,0x13,0x36,0x0c,0xc9,0x04, 0x16,0xf5,0xff,0x48,0x0c,0x83,0x6b,0x88,0x53,0xa2,0xc6,0x0f,0xf7,0xac,0x42,0x04, - + sig: 0x3e,0x05,0xc5,0x37,0x16,0x0b,0x29,0x30,0x89,0xa3,0xe7,0x83,0x08,0x16,0xdd,0x96, 0x02,0xfa,0x0d,0x44,0x2c,0x43,0xaa,0x80,0x93,0x04,0x58,0x22,0x09,0xbf,0x11,0xa5, 0xcc,0xa5,0x3c,0x9f,0xa0,0xa4,0x64,0x5a,0x4a,0xdb,0x20,0xfb,0xc7,0x9b,0xfd,0x3f, 0x08,0xae,0xc4,0x3c,0x1e,0xd8,0xb6,0xb4,0xd2,0x6d,0x80,0x92,0xcb,0x71,0xf3,0x02, - + valid: yes - + ed25519-donna: pk diff: ____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____, ____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____, - + sig diff: 0x2c,0xb9,0x25,0x14,0xd0,0x94,0xeb,0xfe,0x46,0x02,0xc2,0xe8,0xa3,0xeb,0xbf,0xb5, 0x72,0x84,0xbf,0xc1,0x8a,0x32,0x30,0x99,0xf7,0x58,0xfe,0x06,0xa8,0xdc,0xdc,0xab, 0xb5,0x57,0x03,0x33,0x87,0xce,0x54,0x55,0x6a,0x69,0x8a,0xc4,0xb7,0x2a,0xed,0x97, 0xb4,0x68,0xe7,0x52,0x7a,0x07,0x55,0x3b,0xa2,0x94,0xd6,0x5e,0xa1,0x61,0x80,0x08, - + valid: no -In this case, the generated public key matches, but the generated signature is completely +In this case, the generated public key matches, but the generated signature is completely different and does not validate. ### Curve25519 Random data: -* sk, or Secret Key +- sk, or Secret Key Generated data: -* pk, or Public Key +- pk, or Public Key Dump: sk: 0x44,0xec,0x0b,0x0e,0xa2,0x0e,0x9c,0x5b,0x8c,0xce,0x7b,0x1d,0x68,0xae,0x0f,0x9e, 0x81,0xe2,0x04,0x76,0xda,0x87,0xa4,0x9e,0xc9,0x4f,0x3b,0xf9,0xc3,0x89,0x63,0x70, - - + + ref10: 0x24,0x55,0x55,0xc0,0xf9,0x80,0xaf,0x02,0x43,0xee,0x8c,0x7f,0xc1,0xad,0x90,0x95, 0x57,0x91,0x14,0x2e,0xf2,0x14,0x22,0x80,0xdd,0x4e,0x3c,0x85,0x71,0x84,0x8c,0x62, - - + + curved25519 diff: 0x12,0xd1,0x61,0x2b,0x16,0xb3,0xd8,0x29,0xf8,0xa3,0xba,0x70,0x4e,0x49,0x4f,0x43, 0xa1,0x3c,0x6b,0x42,0x11,0x61,0xcc,0x30,0x87,0x73,0x46,0xfb,0x85,0xc7,0x9a,0x35, - - + + curved25519-sse2 diff: ____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____, ____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____, - -In this case, curved25519 is totally wrong, while curved25519-sse2 matches the reference -implementation. \ No newline at end of file +In this case, curved25519 is totally wrong, while curved25519-sse2 matches the reference +implementation. diff --git a/external/secp256k1/CHANGELOG.md b/external/secp256k1/CHANGELOG.md index ee447c0c1c..a000672887 100644 --- a/external/secp256k1/CHANGELOG.md +++ b/external/secp256k1/CHANGELOG.md @@ -8,153 +8,189 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [0.6.0] - 2024-11-04 #### Added - - New module `musig` implements the MuSig2 multisignature scheme according to the [BIP 327 specification](https://github.com/bitcoin/bips/blob/master/bip-0327.mediawiki). See: - - Header file `include/secp256k1_musig.h` which defines the new API. - - Document `doc/musig.md` for further notes on API usage. - - Usage example `examples/musig.c`. - - New CMake variable `SECP256K1_APPEND_LDFLAGS` for appending linker flags to the build command. + +- New module `musig` implements the MuSig2 multisignature scheme according to the [BIP 327 specification](https://github.com/bitcoin/bips/blob/master/bip-0327.mediawiki). See: + - Header file `include/secp256k1_musig.h` which defines the new API. + - Document `doc/musig.md` for further notes on API usage. + - Usage example `examples/musig.c`. +- New CMake variable `SECP256K1_APPEND_LDFLAGS` for appending linker flags to the build command. #### Changed - - API functions now use a significantly more robust method to clear secrets from the stack before returning. However, secret clearing remains a best-effort security measure and cannot guarantee complete removal. - - Any type `secp256k1_foo` can now be forward-declared using `typedef struct secp256k1_foo secp256k1_foo;` (or also `struct secp256k1_foo;` in C++). - - Organized CMake build artifacts into dedicated directories (`bin/` for executables, `lib/` for libraries) to improve build output structure and Windows shared library compatibility. + +- API functions now use a significantly more robust method to clear secrets from the stack before returning. However, secret clearing remains a best-effort security measure and cannot guarantee complete removal. +- Any type `secp256k1_foo` can now be forward-declared using `typedef struct secp256k1_foo secp256k1_foo;` (or also `struct secp256k1_foo;` in C++). +- Organized CMake build artifacts into dedicated directories (`bin/` for executables, `lib/` for libraries) to improve build output structure and Windows shared library compatibility. #### Removed - - Removed the `secp256k1_scratch_space` struct and its associated functions `secp256k1_scratch_space_create` and `secp256k1_scratch_space_destroy` because the scratch space was unused in the API. + +- Removed the `secp256k1_scratch_space` struct and its associated functions `secp256k1_scratch_space_create` and `secp256k1_scratch_space_destroy` because the scratch space was unused in the API. #### ABI Compatibility + The symbols `secp256k1_scratch_space_create` and `secp256k1_scratch_space_destroy` were removed. Otherwise, the library maintains backward compatibility with versions 0.3.x through 0.5.x. ## [0.5.1] - 2024-08-01 #### Added - - Added usage example for an ElligatorSwift key exchange. + +- Added usage example for an ElligatorSwift key exchange. #### Changed - - The default size of the precomputed table for signing was changed from 22 KiB to 86 KiB. The size can be changed with the configure option `--ecmult-gen-kb` (`SECP256K1_ECMULT_GEN_KB` for CMake). - - "auto" is no longer an accepted value for the `--with-ecmult-window` and `--with-ecmult-gen-kb` configure options (this also applies to `SECP256K1_ECMULT_WINDOW_SIZE` and `SECP256K1_ECMULT_GEN_KB` in CMake). To achieve the same configuration as previously provided by the "auto" value, omit setting the configure option explicitly. + +- The default size of the precomputed table for signing was changed from 22 KiB to 86 KiB. The size can be changed with the configure option `--ecmult-gen-kb` (`SECP256K1_ECMULT_GEN_KB` for CMake). +- "auto" is no longer an accepted value for the `--with-ecmult-window` and `--with-ecmult-gen-kb` configure options (this also applies to `SECP256K1_ECMULT_WINDOW_SIZE` and `SECP256K1_ECMULT_GEN_KB` in CMake). To achieve the same configuration as previously provided by the "auto" value, omit setting the configure option explicitly. #### Fixed - - Fixed compilation when the extrakeys module is disabled. + +- Fixed compilation when the extrakeys module is disabled. #### ABI Compatibility + The ABI is backward compatible with versions 0.5.0, 0.4.x and 0.3.x. ## [0.5.0] - 2024-05-06 #### Added - - New function `secp256k1_ec_pubkey_sort` that sorts public keys using lexicographic (of compressed serialization) order. + +- New function `secp256k1_ec_pubkey_sort` that sorts public keys using lexicographic (of compressed serialization) order. #### Changed - - The implementation of the point multiplication algorithm used for signing and public key generation was changed, resulting in improved performance for those operations. - - The related configure option `--ecmult-gen-precision` was replaced with `--ecmult-gen-kb` (`SECP256K1_ECMULT_GEN_KB` for CMake). - - This changes the supported precomputed table sizes for these operations. The new supported sizes are 2 KiB, 22 KiB, or 86 KiB (while the old supported sizes were 32 KiB, 64 KiB, or 512 KiB). + +- The implementation of the point multiplication algorithm used for signing and public key generation was changed, resulting in improved performance for those operations. + - The related configure option `--ecmult-gen-precision` was replaced with `--ecmult-gen-kb` (`SECP256K1_ECMULT_GEN_KB` for CMake). + - This changes the supported precomputed table sizes for these operations. The new supported sizes are 2 KiB, 22 KiB, or 86 KiB (while the old supported sizes were 32 KiB, 64 KiB, or 512 KiB). #### ABI Compatibility + The ABI is backward compatible with versions 0.4.x and 0.3.x. ## [0.4.1] - 2023-12-21 #### Changed - - The point multiplication algorithm used for ECDH operations (module `ecdh`) was replaced with a slightly faster one. - - Optional handwritten x86_64 assembly for field operations was removed because modern C compilers are able to output more efficient assembly. This change results in a significant speedup of some library functions when handwritten x86_64 assembly is enabled (`--with-asm=x86_64` in GNU Autotools, `-DSECP256K1_ASM=x86_64` in CMake), which is the default on x86_64. Benchmarks with GCC 10.5.0 show a 10% speedup for `secp256k1_ecdsa_verify` and `secp256k1_schnorrsig_verify`. + +- The point multiplication algorithm used for ECDH operations (module `ecdh`) was replaced with a slightly faster one. +- Optional handwritten x86_64 assembly for field operations was removed because modern C compilers are able to output more efficient assembly. This change results in a significant speedup of some library functions when handwritten x86_64 assembly is enabled (`--with-asm=x86_64` in GNU Autotools, `-DSECP256K1_ASM=x86_64` in CMake), which is the default on x86_64. Benchmarks with GCC 10.5.0 show a 10% speedup for `secp256k1_ecdsa_verify` and `secp256k1_schnorrsig_verify`. #### ABI Compatibility + The ABI is backward compatible with versions 0.4.0 and 0.3.x. ## [0.4.0] - 2023-09-04 #### Added - - New module `ellswift` implements ElligatorSwift encoding for public keys and x-only Diffie-Hellman key exchange for them. - ElligatorSwift permits representing secp256k1 public keys as 64-byte arrays which cannot be distinguished from uniformly random. See: - - Header file `include/secp256k1_ellswift.h` which defines the new API. - - Document `doc/ellswift.md` which explains the mathematical background of the scheme. - - The [paper](https://eprint.iacr.org/2022/759) on which the scheme is based. - - We now test the library with unreleased development snapshots of GCC and Clang. This gives us an early chance to catch miscompilations and constant-time issues introduced by the compiler (such as those that led to the previous two releases). + +- New module `ellswift` implements ElligatorSwift encoding for public keys and x-only Diffie-Hellman key exchange for them. + ElligatorSwift permits representing secp256k1 public keys as 64-byte arrays which cannot be distinguished from uniformly random. See: + - Header file `include/secp256k1_ellswift.h` which defines the new API. + - Document `doc/ellswift.md` which explains the mathematical background of the scheme. + - The [paper](https://eprint.iacr.org/2022/759) on which the scheme is based. +- We now test the library with unreleased development snapshots of GCC and Clang. This gives us an early chance to catch miscompilations and constant-time issues introduced by the compiler (such as those that led to the previous two releases). #### Fixed - - Fixed symbol visibility in Windows DLL builds, where three internal library symbols were wrongly exported. + +- Fixed symbol visibility in Windows DLL builds, where three internal library symbols were wrongly exported. #### Changed - - When consuming libsecp256k1 as a static library on Windows, the user must now define the `SECP256K1_STATIC` macro before including `secp256k1.h`. + +- When consuming libsecp256k1 as a static library on Windows, the user must now define the `SECP256K1_STATIC` macro before including `secp256k1.h`. #### ABI Compatibility + This release is backward compatible with the ABI of 0.3.0, 0.3.1, and 0.3.2. Symbol visibility is now believed to be handled properly on supported platforms and is now considered to be part of the ABI. Please report any improperly exported symbols as a bug. ## [0.3.2] - 2023-05-13 + We strongly recommend updating to 0.3.2 if you use or plan to use GCC >=13 to compile libsecp256k1. When in doubt, check the GCC version using `gcc -v`. #### Security - - Module `ecdh`: Fix "constant-timeness" issue with GCC 13.1 (and potentially future versions of GCC) that could leave applications using libsecp256k1's ECDH module vulnerable to a timing side-channel attack. The fix avoids secret-dependent control flow during ECDH computations when libsecp256k1 is compiled with GCC 13.1. + +- Module `ecdh`: Fix "constant-timeness" issue with GCC 13.1 (and potentially future versions of GCC) that could leave applications using libsecp256k1's ECDH module vulnerable to a timing side-channel attack. The fix avoids secret-dependent control flow during ECDH computations when libsecp256k1 is compiled with GCC 13.1. #### Fixed - - Fixed an old bug that permitted compilers to potentially output bad assembly code on x86_64. In theory, it could lead to a crash or a read of unrelated memory, but this has never been observed on any compilers so far. + +- Fixed an old bug that permitted compilers to potentially output bad assembly code on x86_64. In theory, it could lead to a crash or a read of unrelated memory, but this has never been observed on any compilers so far. #### Changed - - Various improvements and changes to CMake builds. CMake builds remain experimental. - - Made API versioning consistent with GNU Autotools builds. - - Switched to `BUILD_SHARED_LIBS` variable for controlling whether to build a static or a shared library. - - Added `SECP256K1_INSTALL` variable for the controlling whether to install the build artefacts. - - Renamed asm build option `arm` to `arm32`. Use `--with-asm=arm32` instead of `--with-asm=arm` (GNU Autotools), and `-DSECP256K1_ASM=arm32` instead of `-DSECP256K1_ASM=arm` (CMake). + +- Various improvements and changes to CMake builds. CMake builds remain experimental. + - Made API versioning consistent with GNU Autotools builds. + - Switched to `BUILD_SHARED_LIBS` variable for controlling whether to build a static or a shared library. + - Added `SECP256K1_INSTALL` variable for the controlling whether to install the build artefacts. +- Renamed asm build option `arm` to `arm32`. Use `--with-asm=arm32` instead of `--with-asm=arm` (GNU Autotools), and `-DSECP256K1_ASM=arm32` instead of `-DSECP256K1_ASM=arm` (CMake). #### ABI Compatibility + The ABI is compatible with versions 0.3.0 and 0.3.1. ## [0.3.1] - 2023-04-10 + We strongly recommend updating to 0.3.1 if you use or plan to use Clang >=14 to compile libsecp256k1, e.g., Xcode >=14 on macOS has Clang >=14. When in doubt, check the Clang version using `clang -v`. #### Security - - Fix "constant-timeness" issue with Clang >=14 that could leave applications using libsecp256k1 vulnerable to a timing side-channel attack. The fix avoids secret-dependent control flow and secret-dependent memory accesses in conditional moves of memory objects when libsecp256k1 is compiled with Clang >=14. + +- Fix "constant-timeness" issue with Clang >=14 that could leave applications using libsecp256k1 vulnerable to a timing side-channel attack. The fix avoids secret-dependent control flow and secret-dependent memory accesses in conditional moves of memory objects when libsecp256k1 is compiled with Clang >=14. #### Added - - Added tests against [Project Wycheproof's](https://github.com/google/wycheproof/) set of ECDSA test vectors (Bitcoin "low-S" variant), a fixed set of test cases designed to trigger various edge cases. + +- Added tests against [Project Wycheproof's](https://github.com/google/wycheproof/) set of ECDSA test vectors (Bitcoin "low-S" variant), a fixed set of test cases designed to trigger various edge cases. #### Changed - - Increased minimum required CMake version to 3.13. CMake builds remain experimental. + +- Increased minimum required CMake version to 3.13. CMake builds remain experimental. #### ABI Compatibility + The ABI is compatible with version 0.3.0. ## [0.3.0] - 2023-03-08 #### Added - - Added experimental support for CMake builds. Traditional GNU Autotools builds (`./configure` and `make`) remain fully supported. - - Usage examples: Added a recommended method for securely clearing sensitive data, e.g., secret keys, from memory. - - Tests: Added a new test binary `noverify_tests`. This binary runs the tests without some additional checks present in the ordinary `tests` binary and is thereby closer to production binaries. The `noverify_tests` binary is automatically run as part of the `make check` target. + +- Added experimental support for CMake builds. Traditional GNU Autotools builds (`./configure` and `make`) remain fully supported. +- Usage examples: Added a recommended method for securely clearing sensitive data, e.g., secret keys, from memory. +- Tests: Added a new test binary `noverify_tests`. This binary runs the tests without some additional checks present in the ordinary `tests` binary and is thereby closer to production binaries. The `noverify_tests` binary is automatically run as part of the `make check` target. #### Fixed - - Fixed declarations of API variables for MSVC (`__declspec(dllimport)`). This fixes MSVC builds of programs which link against a libsecp256k1 DLL dynamically and use API variables (and not only API functions). Unfortunately, the MSVC linker now will emit warning `LNK4217` when trying to link against libsecp256k1 statically. Pass `/ignore:4217` to the linker to suppress this warning. + +- Fixed declarations of API variables for MSVC (`__declspec(dllimport)`). This fixes MSVC builds of programs which link against a libsecp256k1 DLL dynamically and use API variables (and not only API functions). Unfortunately, the MSVC linker now will emit warning `LNK4217` when trying to link against libsecp256k1 statically. Pass `/ignore:4217` to the linker to suppress this warning. #### Changed - - Forbade cloning or destroying `secp256k1_context_static`. Create a new context instead of cloning the static context. (If this change breaks your code, your code is probably wrong.) - - Forbade randomizing (copies of) `secp256k1_context_static`. Randomizing a copy of `secp256k1_context_static` did not have any effect and did not provide defense-in-depth protection against side-channel attacks. Create a new context if you want to benefit from randomization. + +- Forbade cloning or destroying `secp256k1_context_static`. Create a new context instead of cloning the static context. (If this change breaks your code, your code is probably wrong.) +- Forbade randomizing (copies of) `secp256k1_context_static`. Randomizing a copy of `secp256k1_context_static` did not have any effect and did not provide defense-in-depth protection against side-channel attacks. Create a new context if you want to benefit from randomization. #### Removed - - Removed the configuration header `src/libsecp256k1-config.h`. We recommend passing flags to `./configure` or `cmake` to set configuration options (see `./configure --help` or `cmake -LH`). If you cannot or do not want to use one of the supported build systems, pass configuration flags such as `-DSECP256K1_ENABLE_MODULE_SCHNORRSIG` manually to the compiler (see the file `configure.ac` for supported flags). + +- Removed the configuration header `src/libsecp256k1-config.h`. We recommend passing flags to `./configure` or `cmake` to set configuration options (see `./configure --help` or `cmake -LH`). If you cannot or do not want to use one of the supported build systems, pass configuration flags such as `-DSECP256K1_ENABLE_MODULE_SCHNORRSIG` manually to the compiler (see the file `configure.ac` for supported flags). #### ABI Compatibility -Due to changes in the API regarding `secp256k1_context_static` described above, the ABI is *not* compatible with previous versions. + +Due to changes in the API regarding `secp256k1_context_static` described above, the ABI is _not_ compatible with previous versions. ## [0.2.0] - 2022-12-12 #### Added - - Added usage examples for common use cases in a new `examples/` directory. - - Added `secp256k1_selftest`, to be used in conjunction with `secp256k1_context_static`. - - Added support for 128-bit wide multiplication on MSVC for x86_64 and arm64, giving roughly a 20% speedup on those platforms. + +- Added usage examples for common use cases in a new `examples/` directory. +- Added `secp256k1_selftest`, to be used in conjunction with `secp256k1_context_static`. +- Added support for 128-bit wide multiplication on MSVC for x86_64 and arm64, giving roughly a 20% speedup on those platforms. #### Changed - - Enabled modules `schnorrsig`, `extrakeys` and `ecdh` by default in `./configure`. - - The `secp256k1_nonce_function_rfc6979` nonce function, used by default by `secp256k1_ecdsa_sign`, now reduces the message hash modulo the group order to match the specification. This only affects improper use of ECDSA signing API. + +- Enabled modules `schnorrsig`, `extrakeys` and `ecdh` by default in `./configure`. +- The `secp256k1_nonce_function_rfc6979` nonce function, used by default by `secp256k1_ecdsa_sign`, now reduces the message hash modulo the group order to match the specification. This only affects improper use of ECDSA signing API. #### Deprecated - - Deprecated context flags `SECP256K1_CONTEXT_VERIFY` and `SECP256K1_CONTEXT_SIGN`. Use `SECP256K1_CONTEXT_NONE` instead. - - Renamed `secp256k1_context_no_precomp` to `secp256k1_context_static`. - - Module `schnorrsig`: renamed `secp256k1_schnorrsig_sign` to `secp256k1_schnorrsig_sign32`. + +- Deprecated context flags `SECP256K1_CONTEXT_VERIFY` and `SECP256K1_CONTEXT_SIGN`. Use `SECP256K1_CONTEXT_NONE` instead. +- Renamed `secp256k1_context_no_precomp` to `secp256k1_context_static`. +- Module `schnorrsig`: renamed `secp256k1_schnorrsig_sign` to `secp256k1_schnorrsig_sign32`. #### ABI Compatibility + Since this is the first release, we do not compare application binary interfaces. -However, there are earlier unreleased versions of libsecp256k1 that are *not* ABI compatible with this version. +However, there are earlier unreleased versions of libsecp256k1 that are _not_ ABI compatible with this version. ## [0.1.0] - 2013-03-05 to 2021-12-25 diff --git a/external/secp256k1/CMakePresets.json b/external/secp256k1/CMakePresets.json index b35cd80579..60138c16bf 100644 --- a/external/secp256k1/CMakePresets.json +++ b/external/secp256k1/CMakePresets.json @@ -1,5 +1,9 @@ { - "cmakeMinimumRequired": {"major": 3, "minor": 21, "patch": 0}, + "cmakeMinimumRequired": { + "major": 3, + "minor": 21, + "patch": 0 + }, "version": 3, "configurePresets": [ { diff --git a/external/secp256k1/CONTRIBUTING.md b/external/secp256k1/CONTRIBUTING.md index a366d38b0e..88c22af02b 100644 --- a/external/secp256k1/CONTRIBUTING.md +++ b/external/secp256k1/CONTRIBUTING.md @@ -12,15 +12,15 @@ The libsecp256k1 project welcomes contributions in the form of new functionality It is the responsibility of the contributors to convince the maintainers that the proposed functionality is within the project's scope, high-quality and maintainable. Contributors are recommended to provide the following in addition to the new code: -* **Specification:** - A specification can help significantly in reviewing the new code as it provides documentation and context. - It may justify various design decisions, give a motivation and outline security goals. - If the specification contains pseudocode, a reference implementation or test vectors, these can be used to compare with the proposed libsecp256k1 code. -* **Security Arguments:** - In addition to a defining the security goals, it should be argued that the new functionality meets these goals. - Depending on the nature of the new functionality, a wide range of security arguments are acceptable, ranging from being "obviously secure" to rigorous proofs of security. -* **Relevance Arguments:** - The relevance of the new functionality for the Bitcoin ecosystem should be argued by outlining clear use cases. +- **Specification:** + A specification can help significantly in reviewing the new code as it provides documentation and context. + It may justify various design decisions, give a motivation and outline security goals. + If the specification contains pseudocode, a reference implementation or test vectors, these can be used to compare with the proposed libsecp256k1 code. +- **Security Arguments:** + In addition to a defining the security goals, it should be argued that the new functionality meets these goals. + Depending on the nature of the new functionality, a wide range of security arguments are acceptable, ranging from being "obviously secure" to rigorous proofs of security. +- **Relevance Arguments:** + The relevance of the new functionality for the Bitcoin ecosystem should be argued by outlining clear use cases. These are not the only factors taken into account when considering to add new functionality. The proposed new libsecp256k1 code must be of high quality, including API documentation and tests, as well as featuring a misuse-resistant API design. @@ -44,36 +44,36 @@ The Contributor Workflow & Peer Review in libsecp256k1 are similar to Bitcoin Co In addition, libsecp256k1 tries to maintain the following coding conventions: -* No runtime heap allocation (e.g., no `malloc`) unless explicitly requested by the caller (via `secp256k1_context_create` or `secp256k1_scratch_space_create`, for example). Moreover, it should be possible to use the library without any heap allocations. -* The tests should cover all lines and branches of the library (see [Test coverage](#coverage)). -* Operations involving secret data should be tested for being constant time with respect to the secrets (see [src/ctime_tests.c](src/ctime_tests.c)). -* Local variables containing secret data should be cleared explicitly to try to delete secrets from memory. -* Use `secp256k1_memcmp_var` instead of `memcmp` (see [#823](https://github.com/bitcoin-core/secp256k1/issues/823)). -* As a rule of thumb, the default values for configuration options should target standard desktop machines and align with Bitcoin Core's defaults, and the tests should mostly exercise the default configuration (see [#1549](https://github.com/bitcoin-core/secp256k1/issues/1549#issuecomment-2200559257)). +- No runtime heap allocation (e.g., no `malloc`) unless explicitly requested by the caller (via `secp256k1_context_create` or `secp256k1_scratch_space_create`, for example). Moreover, it should be possible to use the library without any heap allocations. +- The tests should cover all lines and branches of the library (see [Test coverage](#coverage)). +- Operations involving secret data should be tested for being constant time with respect to the secrets (see [src/ctime_tests.c](src/ctime_tests.c)). +- Local variables containing secret data should be cleared explicitly to try to delete secrets from memory. +- Use `secp256k1_memcmp_var` instead of `memcmp` (see [#823](https://github.com/bitcoin-core/secp256k1/issues/823)). +- As a rule of thumb, the default values for configuration options should target standard desktop machines and align with Bitcoin Core's defaults, and the tests should mostly exercise the default configuration (see [#1549](https://github.com/bitcoin-core/secp256k1/issues/1549#issuecomment-2200559257)). #### Style conventions -* Commits should be atomic and diffs should be easy to read. For this reason, do not mix any formatting fixes or code moves with actual code changes. Make sure each individual commit is hygienic: that it builds successfully on its own without warnings, errors, regressions, or test failures. -* New code should adhere to the style of existing, in particular surrounding, code. Other than that, we do not enforce strict rules for code formatting. -* The code conforms to C89. Most notably, that means that only `/* ... */` comments are allowed (no `//` line comments). Moreover, any declarations in a `{ ... }` block (e.g., a function) must appear at the beginning of the block before any statements. When you would like to declare a variable in the middle of a block, you can open a new block: - ```C - void secp256k_foo(void) { - unsigned int x; /* declaration */ - int y = 2*x; /* declaration */ - x = 17; /* statement */ - { - int a, b; /* declaration */ - a = x + y; /* statement */ - secp256k_bar(x, &b); /* statement */ - } - } - ``` -* Use `unsigned int` instead of just `unsigned`. -* Use `void *ptr` instead of `void* ptr`. -* Arguments of the publicly-facing API must have a specific order defined in [include/secp256k1.h](include/secp256k1.h). -* User-facing comment lines in headers should be limited to 80 chars if possible. -* All identifiers in file scope should start with `secp256k1_`. -* Avoid trailing whitespace. +- Commits should be atomic and diffs should be easy to read. For this reason, do not mix any formatting fixes or code moves with actual code changes. Make sure each individual commit is hygienic: that it builds successfully on its own without warnings, errors, regressions, or test failures. +- New code should adhere to the style of existing, in particular surrounding, code. Other than that, we do not enforce strict rules for code formatting. +- The code conforms to C89. Most notably, that means that only `/* ... */` comments are allowed (no `//` line comments). Moreover, any declarations in a `{ ... }` block (e.g., a function) must appear at the beginning of the block before any statements. When you would like to declare a variable in the middle of a block, you can open a new block: + ```C + void secp256k_foo(void) { + unsigned int x; /* declaration */ + int y = 2*x; /* declaration */ + x = 17; /* statement */ + { + int a, b; /* declaration */ + a = x + y; /* statement */ + secp256k_bar(x, &b); /* statement */ + } + } + ``` +- Use `unsigned int` instead of just `unsigned`. +- Use `void *ptr` instead of `void* ptr`. +- Arguments of the publicly-facing API must have a specific order defined in [include/secp256k1.h](include/secp256k1.h). +- User-facing comment lines in headers should be limited to 80 chars if possible. +- All identifiers in file scope should start with `secp256k1_`. +- Avoid trailing whitespace. ### Tests @@ -101,7 +101,7 @@ To create a HTML report with coloured and annotated source code: #### Exhaustive tests There are tests of several functions in which a small group replaces secp256k1. -These tests are *exhaustive* since they provide all elements and scalars of the small group as input arguments (see [src/tests_exhaustive.c](src/tests_exhaustive.c)). +These tests are _exhaustive_ since they provide all elements and scalars of the small group as input arguments (see [src/tests_exhaustive.c](src/tests_exhaustive.c)). ### Benchmarks diff --git a/external/secp256k1/README.md b/external/secp256k1/README.md index 222e5fb768..4cd64c7fee 100644 --- a/external/secp256k1/README.md +++ b/external/secp256k1/README.md @@ -1,5 +1,4 @@ -libsecp256k1 -============ +# libsecp256k1 ![Dependencies: None](https://img.shields.io/badge/dependencies-none-success) [![irc.libera.chat #secp256k1](https://img.shields.io/badge/irc.libera.chat-%23secp256k1-success)](https://web.libera.chat/#secp256k1) @@ -9,60 +8,59 @@ High-performance high-assurance C library for digital signatures and other crypt This library is intended to be the highest quality publicly available library for cryptography on the secp256k1 curve. However, the primary focus of its development has been for usage in the Bitcoin system and usage unlike Bitcoin's may be less well tested, verified, or suffer from a less well thought out interface. Correct usage requires some care and consideration that the library is fit for your application's purpose. Features: -* secp256k1 ECDSA signing/verification and key generation. -* Additive and multiplicative tweaking of secret/public keys. -* Serialization/parsing of secret keys, public keys, signatures. -* Constant time, constant memory access signing and public key generation. -* Derandomized ECDSA (via RFC6979 or with a caller provided function.) -* Very efficient implementation. -* Suitable for embedded systems. -* No runtime dependencies. -* Optional module for public key recovery. -* Optional module for ECDH key exchange. -* Optional module for Schnorr signatures according to [BIP-340](https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki). -* Optional module for ElligatorSwift key exchange according to [BIP-324](https://github.com/bitcoin/bips/blob/master/bip-0324.mediawiki). -* Optional module for MuSig2 Schnorr multi-signatures according to [BIP-327](https://github.com/bitcoin/bips/blob/master/bip-0327.mediawiki). -Implementation details ----------------------- +- secp256k1 ECDSA signing/verification and key generation. +- Additive and multiplicative tweaking of secret/public keys. +- Serialization/parsing of secret keys, public keys, signatures. +- Constant time, constant memory access signing and public key generation. +- Derandomized ECDSA (via RFC6979 or with a caller provided function.) +- Very efficient implementation. +- Suitable for embedded systems. +- No runtime dependencies. +- Optional module for public key recovery. +- Optional module for ECDH key exchange. +- Optional module for Schnorr signatures according to [BIP-340](https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki). +- Optional module for ElligatorSwift key exchange according to [BIP-324](https://github.com/bitcoin/bips/blob/master/bip-0324.mediawiki). +- Optional module for MuSig2 Schnorr multi-signatures according to [BIP-327](https://github.com/bitcoin/bips/blob/master/bip-0327.mediawiki). -* General - * No runtime heap allocation. - * Extensive testing infrastructure. - * Structured to facilitate review and analysis. - * Intended to be portable to any system with a C89 compiler and uint64_t support. - * No use of floating types. - * Expose only higher level interfaces to minimize the API surface and improve application security. ("Be difficult to use insecurely.") -* Field operations - * Optimized implementation of arithmetic modulo the curve's field size (2^256 - 0x1000003D1). - * Using 5 52-bit limbs - * Using 10 26-bit limbs (including hand-optimized assembly for 32-bit ARM, by Wladimir J. van der Laan). - * This is an experimental feature that has not received enough scrutiny to satisfy the standard of quality of this library but is made available for testing and review by the community. -* Scalar operations - * Optimized implementation without data-dependent branches of arithmetic modulo the curve's order. - * Using 4 64-bit limbs (relying on __int128 support in the compiler). - * Using 8 32-bit limbs. -* Modular inverses (both field elements and scalars) based on [safegcd](https://gcd.cr.yp.to/index.html) with some modifications, and a variable-time variant (by Peter Dettman). -* Group operations - * Point addition formula specifically simplified for the curve equation (y^2 = x^3 + 7). - * Use addition between points in Jacobian and affine coordinates where possible. - * Use a unified addition/doubling formula where necessary to avoid data-dependent branches. - * Point/x comparison without a field inversion by comparison in the Jacobian coordinate space. -* Point multiplication for verification (a*P + b*G). - * Use wNAF notation for point multiplicands. - * Use a much larger window for multiples of G, using precomputed multiples. - * Use Shamir's trick to do the multiplication with the public key and the generator simultaneously. - * Use secp256k1's efficiently-computable endomorphism to split the P multiplicand into 2 half-sized ones. -* Point multiplication for signing - * Use a precomputed table of multiples of powers of 16 multiplied with the generator, so general multiplication becomes a series of additions. - * Intended to be completely free of timing sidechannels for secret-key operations (on reasonable hardware/toolchains) - * Access the table with branch-free conditional moves so memory access is uniform. - * No data-dependent branches - * Optional runtime blinding which attempts to frustrate differential power analysis. - * The precomputed tables add and eventually subtract points for which no known scalar (secret key) is known, preventing even an attacker with control over the secret key used to control the data internally. +## Implementation details -Building with Autotools ------------------------ +- General + - No runtime heap allocation. + - Extensive testing infrastructure. + - Structured to facilitate review and analysis. + - Intended to be portable to any system with a C89 compiler and uint64_t support. + - No use of floating types. + - Expose only higher level interfaces to minimize the API surface and improve application security. ("Be difficult to use insecurely.") +- Field operations + - Optimized implementation of arithmetic modulo the curve's field size (2^256 - 0x1000003D1). + - Using 5 52-bit limbs + - Using 10 26-bit limbs (including hand-optimized assembly for 32-bit ARM, by Wladimir J. van der Laan). + - This is an experimental feature that has not received enough scrutiny to satisfy the standard of quality of this library but is made available for testing and review by the community. +- Scalar operations + - Optimized implementation without data-dependent branches of arithmetic modulo the curve's order. + - Using 4 64-bit limbs (relying on \_\_int128 support in the compiler). + - Using 8 32-bit limbs. +- Modular inverses (both field elements and scalars) based on [safegcd](https://gcd.cr.yp.to/index.html) with some modifications, and a variable-time variant (by Peter Dettman). +- Group operations + - Point addition formula specifically simplified for the curve equation (y^2 = x^3 + 7). + - Use addition between points in Jacobian and affine coordinates where possible. + - Use a unified addition/doubling formula where necessary to avoid data-dependent branches. + - Point/x comparison without a field inversion by comparison in the Jacobian coordinate space. +- Point multiplication for verification (a*P + b*G). + - Use wNAF notation for point multiplicands. + - Use a much larger window for multiples of G, using precomputed multiples. + - Use Shamir's trick to do the multiplication with the public key and the generator simultaneously. + - Use secp256k1's efficiently-computable endomorphism to split the P multiplicand into 2 half-sized ones. +- Point multiplication for signing + - Use a precomputed table of multiples of powers of 16 multiplied with the generator, so general multiplication becomes a series of additions. + - Intended to be completely free of timing sidechannels for secret-key operations (on reasonable hardware/toolchains) + - Access the table with branch-free conditional moves so memory access is uniform. + - No data-dependent branches + - Optional runtime blinding which attempts to frustrate differential power analysis. + - The precomputed tables add and eventually subtract points for which no known scalar (secret key) is known, preventing even an attacker with control over the secret key used to control the data internally. + +## Building with Autotools $ ./autogen.sh $ ./configure @@ -72,8 +70,7 @@ Building with Autotools To compile optional modules (such as Schnorr signatures), you need to run `./configure` with additional flags (such as `--enable-module-schnorrsig`). Run `./configure --help` to see the full list of available flags. -Building with CMake (experimental) ----------------------------------- +## Building with CMake (experimental) To maintain a pristine source tree, CMake encourages to perform an out-of-source build by using a separate dedicated build tree. @@ -109,18 +106,19 @@ In "Developer Command Prompt for VS 2022": >cmake -G "Visual Studio 17 2022" -A x64 -S . -B build >cmake --build build --config RelWithDebInfo -Usage examples ------------ +## Usage examples + Usage examples can be found in the [examples](examples) directory. To compile them you need to configure with `--enable-examples`. - * [ECDSA example](examples/ecdsa.c) - * [Schnorr signatures example](examples/schnorr.c) - * [Deriving a shared secret (ECDH) example](examples/ecdh.c) - * [ElligatorSwift key exchange example](examples/ellswift.c) + +- [ECDSA example](examples/ecdsa.c) +- [Schnorr signatures example](examples/schnorr.c) +- [Deriving a shared secret (ECDH) example](examples/ecdh.c) +- [ElligatorSwift key exchange example](examples/ellswift.c) To compile the Schnorr signature and ECDH examples, you also need to configure with `--enable-module-schnorrsig` and `--enable-module-ecdh`. -Benchmark ------------- +## Benchmark + If configured with `--enable-benchmark` (which is the default), binaries for benchmarking the libsecp256k1 functions will be present in the root directory after the build. To print the benchmark result to the command line: @@ -131,12 +129,10 @@ To create a CSV file for the benchmark result : $ ./bench_name | sed '2d;s/ \{1,\}//g' > bench_name.csv -Reporting a vulnerability ------------- +## Reporting a vulnerability See [SECURITY.md](SECURITY.md) -Contributing to libsecp256k1 ------------- +## Contributing to libsecp256k1 See [CONTRIBUTING.md](CONTRIBUTING.md) diff --git a/external/secp256k1/SECURITY.md b/external/secp256k1/SECURITY.md index b515cc1c8e..cb438707ce 100644 --- a/external/secp256k1/SECURITY.md +++ b/external/secp256k1/SECURITY.md @@ -6,10 +6,10 @@ To report security issues send an email to secp256k1-security@bitcoincore.org (n The following keys may be used to communicate sensitive information to developers: -| Name | Fingerprint | -|------|-------------| -| Pieter Wuille | 133E AC17 9436 F14A 5CF1 B794 860F EB80 4E66 9320 | -| Jonas Nick | 36C7 1A37 C9D9 88BD E825 08D9 B1A7 0E4F 8DCD 0366 | -| Tim Ruffing | 09E0 3F87 1092 E40E 106E 902B 33BC 86AB 80FF 5516 | +| Name | Fingerprint | +| ------------- | ------------------------------------------------- | +| Pieter Wuille | 133E AC17 9436 F14A 5CF1 B794 860F EB80 4E66 9320 | +| Jonas Nick | 36C7 1A37 C9D9 88BD E825 08D9 B1A7 0E4F 8DCD 0366 | +| Tim Ruffing | 09E0 3F87 1092 E40E 106E 902B 33BC 86AB 80FF 5516 | You can import a key by running the following command with that individual’s fingerprint: `gpg --keyserver hkps://keys.openpgp.org --recv-keys ""` Ensure that you put quotes around fingerprints containing spaces. diff --git a/external/secp256k1/doc/ellswift.md b/external/secp256k1/doc/ellswift.md index 9d60e6be0b..ffbe9d02ac 100644 --- a/external/secp256k1/doc/ellswift.md +++ b/external/secp256k1/doc/ellswift.md @@ -5,17 +5,17 @@ construction in the ["SwiftEC: Shallue–van de Woestijne Indifferentiable Function To Elliptic Curves"](https://eprint.iacr.org/2022/759) paper by Jorge Chávez-Saab, Francisco Rodríguez-Henríquez, and Mehdi Tibouchi. -* [1. Introduction](#1-introduction) -* [2. The decoding function](#2-the-decoding-function) - + [2.1 Decoding for `secp256k1`](#21-decoding-for-secp256k1) -* [3. The encoding function](#3-the-encoding-function) - + [3.1 Switching to *v, w* coordinates](#31-switching-to-v-w-coordinates) - + [3.2 Avoiding computing all inverses](#32-avoiding-computing-all-inverses) - + [3.3 Finding the inverse](#33-finding-the-inverse) - + [3.4 Dealing with special cases](#34-dealing-with-special-cases) - + [3.5 Encoding for `secp256k1`](#35-encoding-for-secp256k1) -* [4. Encoding and decoding full *(x, y)* coordinates](#4-encoding-and-decoding-full-x-y-coordinates) - + [4.1 Full *(x, y)* coordinates for `secp256k1`](#41-full-x-y-coordinates-for-secp256k1) +- [1. Introduction](#1-introduction) +- [2. The decoding function](#2-the-decoding-function) + - [2.1 Decoding for `secp256k1`](#21-decoding-for-secp256k1) +- [3. The encoding function](#3-the-encoding-function) + - [3.1 Switching to _v, w_ coordinates](#31-switching-to-v-w-coordinates) + - [3.2 Avoiding computing all inverses](#32-avoiding-computing-all-inverses) + - [3.3 Finding the inverse](#33-finding-the-inverse) + - [3.4 Dealing with special cases](#34-dealing-with-special-cases) + - [3.5 Encoding for `secp256k1`](#35-encoding-for-secp256k1) +- [4. Encoding and decoding full _(x, y)_ coordinates](#4-encoding-and-decoding-full-x-y-coordinates) + - [4.1 Full _(x, y)_ coordinates for `secp256k1`](#41-full-x-y-coordinates-for-secp256k1) ## 1. Introduction @@ -34,13 +34,14 @@ are taken modulo $p$), and then evaluating $F_u(t)$, which for every $u$ and $t$ x-coordinate on the curve. The functions $F_u$ will be defined in [Section 2](#2-the-decoding-function). **Encoding** a given $x$ coordinate is conceptually done as follows: -* Loop: - * Pick a uniformly random field element $u.$ - * Compute the set $L = F_u^{-1}(x)$ of $t$ values for which $F_u(t) = x$, which may have up to *8* elements. - * With probability $1 - \dfrac{\\#L}{8}$, restart the loop. - * Select a uniformly random $t \in L$ and return $(u, t).$ -This is the *ElligatorSwift* algorithm, here given for just x-coordinates. An extension to full +- Loop: + - Pick a uniformly random field element $u.$ + - Compute the set $L = F_u^{-1}(x)$ of $t$ values for which $F_u(t) = x$, which may have up to _8_ elements. + - With probability $1 - \dfrac{\\#L}{8}$, restart the loop. + - Select a uniformly random $t \in L$ and return $(u, t).$ + +This is the _ElligatorSwift_ algorithm, here given for just x-coordinates. An extension to full $(x, y)$ points will be given in [Section 4](#4-encoding-and-decoding-full-x-y-coordinates). The algorithm finds a uniformly random $(u, t)$ among (almost all) those for which $F_u(t) = x.$ Section 3.2 in the paper proves that the number of such encodings for @@ -50,37 +51,40 @@ almost all x-coordinates on the curve (all but at most 39) is close to two times ## 2. The decoding function First some definitions: -* $\mathbb{F}$ is the finite field of size $q$, of characteristic 5 or more, and $q \equiv 1 \mod 3.$ - * For `secp256k1`, $q = 2^{256} - 2^{32} - 977$, which satisfies that requirement. -* Let $E$ be the elliptic curve of points $(x, y) \in \mathbb{F}^2$ for which $y^2 = x^3 + ax + b$, with $a$ and $b$ + +- $\mathbb{F}$ is the finite field of size $q$, of characteristic 5 or more, and $q \equiv 1 \mod 3.$ + - For `secp256k1`, $q = 2^{256} - 2^{32} - 977$, which satisfies that requirement. +- Let $E$ be the elliptic curve of points $(x, y) \in \mathbb{F}^2$ for which $y^2 = x^3 + ax + b$, with $a$ and $b$ public constants, for which $\Delta_E = -16(4a^3 + 27b^2)$ is a square, and at least one of $(-b \pm \sqrt{-3 \Delta_E} / 36)/2$ is a square. - This implies that the order of $E$ is either odd, or a multiple of *4*. + This implies that the order of $E$ is either odd, or a multiple of _4_. If $a=0$, this condition is always fulfilled. - * For `secp256k1`, $a=0$ and $b=7.$ -* Let the function $g(x) = x^3 + ax + b$, so the $E$ curve equation is also $y^2 = g(x).$ -* Let the function $h(x) = 3x^3 + 4a.$ -* Define $V$ as the set of solutions $(x_1, x_2, x_3, z)$ to $z^2 = g(x_1)g(x_2)g(x_3).$ -* Define $S_u$ as the set of solutions $(X, Y)$ to $X^2 + h(u)Y^2 = -g(u)$ and $Y \neq 0.$ -* $P_u$ is a function from $\mathbb{F}$ to $S_u$ that will be defined below. -* $\psi_u$ is a function from $S_u$ to $V$ that will be defined below. + - For `secp256k1`, $a=0$ and $b=7.$ +- Let the function $g(x) = x^3 + ax + b$, so the $E$ curve equation is also $y^2 = g(x).$ +- Let the function $h(x) = 3x^3 + 4a.$ +- Define $V$ as the set of solutions $(x_1, x_2, x_3, z)$ to $z^2 = g(x_1)g(x_2)g(x_3).$ +- Define $S_u$ as the set of solutions $(X, Y)$ to $X^2 + h(u)Y^2 = -g(u)$ and $Y \neq 0.$ +- $P_u$ is a function from $\mathbb{F}$ to $S_u$ that will be defined below. +- $\psi_u$ is a function from $S_u$ to $V$ that will be defined below. **Note**: In the paper: -* $F_u$ corresponds to $F_{0,u}$ there. -* $P_u(t)$ is called $P$ there. -* All $S_u$ sets together correspond to $S$ there. -* All $\psi_u$ functions together (operating on elements of $S$) correspond to $\psi$ there. + +- $F_u$ corresponds to $F_{0,u}$ there. +- $P_u(t)$ is called $P$ there. +- All $S_u$ sets together correspond to $S$ there. +- All $\psi_u$ functions together (operating on elements of $S$) correspond to $\psi$ there. Note that for $V$, the left hand side of the equation $z^2$ is square, and thus the right hand must also be square. As multiplying non-squares results in a square in $\mathbb{F}$, out of the three right-hand side factors an even number must be non-squares. -This implies that exactly *1* or exactly *3* out of +This implies that exactly _1_ or exactly _3_ out of $\\{g(x_1), g(x_2), g(x_3)\\}$ must be square, and thus that for any $(x_1,x_2,x_3,z) \in V$, at least one of $\\{x_1, x_2, x_3\\}$ must be a valid x-coordinate on $E.$ There is one exception to this, namely when $z=0$, but even then one of the three values is a valid x-coordinate. **Define** the decoding function $F_u(t)$ as: -* Let $(x_1, x_2, x_3, z) = \psi_u(P_u(t)).$ -* Return the first element $x$ of $(x_3, x_2, x_1)$ which is a valid x-coordinate on $E$ (i.e., $g(x)$ is square). + +- Let $(x_1, x_2, x_3, z) = \psi_u(P_u(t)).$ +- Return the first element $x$ of $(x_3, x_2, x_1)$ which is a valid x-coordinate on $E$ (i.e., $g(x)$ is square). $P_u(t) = (X(u, t), Y(u, t))$, where: @@ -98,12 +102,13 @@ Y(u, t) & = & \left\\{\begin{array}{ll} $$ $P_u(t)$ is defined: -* For $a=0$, unless: - * $u = 0$ or $t = 0$ (division by zero) - * $g(u) = -t^2$ (would give $Y=0$). -* For $a \neq 0$, unless: - * $X_0(u) = 0$ or $h(u)t^2 = -1$ (division by zero) - * $Y_0(u) (1 - h(u)t^2) = 2X_0(u)t$ (would give $Y=0$). + +- For $a=0$, unless: + - $u = 0$ or $t = 0$ (division by zero) + - $g(u) = -t^2$ (would give $Y=0$). +- For $a \neq 0$, unless: + - $X_0(u) = 0$ or $h(u)t^2 = -1$ (division by zero) + - $Y_0(u) (1 - h(u)t^2) = 2X_0(u)t$ (would give $Y=0$). The functions $X_0(u)$ and $Y_0(u)$ are defined in Appendix A of the paper, and depend on various properties of $E.$ @@ -123,20 +128,22 @@ $$ Put together and specialized for $a=0$ curves, decoding $(u, t)$ to an x-coordinate is: **Define** $F_u(t)$ as: -* Let $X = \dfrac{u^3 + b - t^2}{2t}.$ -* Let $Y = \dfrac{X + t}{u\sqrt{-3}}.$ -* Return the first $x$ in $(u + 4Y^2, \dfrac{-X}{2Y} - \dfrac{u}{2}, \dfrac{X}{2Y} - \dfrac{u}{2})$ for which $g(x)$ is square. + +- Let $X = \dfrac{u^3 + b - t^2}{2t}.$ +- Let $Y = \dfrac{X + t}{u\sqrt{-3}}.$ +- Return the first $x$ in $(u + 4Y^2, \dfrac{-X}{2Y} - \dfrac{u}{2}, \dfrac{X}{2Y} - \dfrac{u}{2})$ for which $g(x)$ is square. To make sure that every input decodes to a valid x-coordinate, we remap the inputs in case $P_u$ is not defined (when $u=0$, $t=0$, or $g(u) = -t^2$): **Define** $F_u(t)$ as: -* Let $u'=u$ if $u \neq 0$; $1$ otherwise (guaranteeing $u' \neq 0$). -* Let $t'=t$ if $t \neq 0$; $1$ otherwise (guaranteeing $t' \neq 0$). -* Let $t''=t'$ if $g(u') \neq -t'^2$; $2t'$ otherwise (guaranteeing $t'' \neq 0$ and $g(u') \neq -t''^2$). -* Let $X = \dfrac{u'^3 + b - t''^2}{2t''}.$ -* Let $Y = \dfrac{X + t''}{u'\sqrt{-3}}.$ -* Return the first $x$ in $(u' + 4Y^2, \dfrac{-X}{2Y} - \dfrac{u'}{2}, \dfrac{X}{2Y} - \dfrac{u'}{2})$ for which $x^3 + b$ is square. + +- Let $u'=u$ if $u \neq 0$; $1$ otherwise (guaranteeing $u' \neq 0$). +- Let $t'=t$ if $t \neq 0$; $1$ otherwise (guaranteeing $t' \neq 0$). +- Let $t''=t'$ if $g(u') \neq -t'^2$; $2t'$ otherwise (guaranteeing $t'' \neq 0$ and $g(u') \neq -t''^2$). +- Let $X = \dfrac{u'^3 + b - t''^2}{2t''}.$ +- Let $Y = \dfrac{X + t''}{u'\sqrt{-3}}.$ +- Return the first $x$ in $(u' + 4Y^2, \dfrac{-X}{2Y} - \dfrac{u'}{2}, \dfrac{X}{2Y} - \dfrac{u'}{2})$ for which $x^3 + b$ is square. The choices here are not strictly necessary. Just returning a fixed constant in any of the undefined cases would suffice, but the approach here is simple enough and gives fairly uniform output even in these cases. @@ -150,10 +157,11 @@ in `secp256k1_ellswift_xswiftec_var` (which outputs the actual x-coordinate). ## 3. The encoding function To implement $F_u^{-1}(x)$, the function to find the set of inverses $t$ for which $F_u(t) = x$, we have to reverse the process: -* Find all the $(X, Y) \in S_u$ that could have given rise to $x$, through the $x_1$, $x_2$, or $x_3$ formulas in $\psi_u.$ -* Map those $(X, Y)$ solutions to $t$ values using $P_u^{-1}(X, Y).$ -* For each of the found $t$ values, verify that $F_u(t) = x.$ -* Return the remaining $t$ values. + +- Find all the $(X, Y) \in S_u$ that could have given rise to $x$, through the $x_1$, $x_2$, or $x_3$ formulas in $\psi_u.$ +- Map those $(X, Y)$ solutions to $t$ values using $P_u^{-1}(X, Y).$ +- For each of the found $t$ values, verify that $F_u(t) = x.$ +- Return the remaining $t$ values. The function $P_u^{-1}$, which finds $t$ given $(X, Y) \in S_u$, is significantly simpler than $P_u:$ @@ -185,13 +193,14 @@ precedence over both. Because of this, the $g(-u-x)$ being square test for $x_1$ values round-trip back to the input $x$ correctly. This is the reason for choosing the $(x_3, x_2, x_1)$ precedence order in the decoder; any order which does not place $x_3$ first requires more complicated round-trip checks in the encoder. -### 3.1 Switching to *v, w* coordinates +### 3.1 Switching to _v, w_ coordinates Before working out the formulas for all this, we switch to different variables for $S_u.$ Let $v = (X/Y - u)/2$, and $w = 2Y.$ Or in the other direction, $X = w(u/2 + v)$ and $Y = w/2:$ -* $S_u'$ becomes the set of $(v, w)$ for which $w^2 (u^2 + uv + v^2 + a) = -g(u)$ and $w \neq 0.$ -* For $a=0$ curves, $P_u^{-1}$ can be stated for $(v,w)$ as $P_u^{'-1}(v, w) = w\left(\frac{\sqrt{-3}-1}{2}u - v\right).$ -* $\psi_u$ can be stated for $(v, w)$ as $\psi_u'(v, w) = (x_1, x_2, x_3, z)$, where + +- $S_u'$ becomes the set of $(v, w)$ for which $w^2 (u^2 + uv + v^2 + a) = -g(u)$ and $w \neq 0.$ +- For $a=0$ curves, $P_u^{-1}$ can be stated for $(v,w)$ as $P_u^{'-1}(v, w) = w\left(\frac{\sqrt{-3}-1}{2}u - v\right).$ +- $\psi_u$ can be stated for $(v, w)$ as $\psi_u'(v, w) = (x_1, x_2, x_3, z)$, where $$ \begin{array}{lcl} @@ -204,34 +213,37 @@ $$ We can now write the expressions for finding $(v, w)$ given $x$ explicitly, by solving each of the $\\{x_1, x_2, x_3\\}$ expressions for $v$ or $w$, and using the $S_u'$ equation to find the other variable: -* Assuming $x = x_1$, we find $v = x$ and $w = \pm\sqrt{-g(u)/(u^2 + uv + v^2 + a)}$ (two solutions). -* Assuming $x = x_2$, we find $v = -u-x$ and $w = \pm\sqrt{-g(u)/(u^2 + uv + v^2 + a)}$ (two solutions). -* Assuming $x = x_3$, we find $w = \pm\sqrt{x-u}$ and $v = -u/2 \pm \sqrt{-w^2(4g(u) + w^2h(u))}/(2w^2)$ (four solutions). + +- Assuming $x = x_1$, we find $v = x$ and $w = \pm\sqrt{-g(u)/(u^2 + uv + v^2 + a)}$ (two solutions). +- Assuming $x = x_2$, we find $v = -u-x$ and $w = \pm\sqrt{-g(u)/(u^2 + uv + v^2 + a)}$ (two solutions). +- Assuming $x = x_3$, we find $w = \pm\sqrt{x-u}$ and $v = -u/2 \pm \sqrt{-w^2(4g(u) + w^2h(u))}/(2w^2)$ (four solutions). ### 3.2 Avoiding computing all inverses -The *ElligatorSwift* algorithm as stated in Section 1 requires the computation of $L = F_u^{-1}(x)$ (the +The _ElligatorSwift_ algorithm as stated in Section 1 requires the computation of $L = F_u^{-1}(x)$ (the set of all $t$ such that $(u, t)$ decode to $x$) in full. This is unnecessary. Observe that the procedure of restarting with probability $(1 - \frac{\\#L}{8})$ and otherwise returning a uniformly random element from $L$ is actually equivalent to always padding $L$ with $\bot$ values up to length 8, picking a uniformly random element from that, restarting whenever $\bot$ is picked: -**Define** *ElligatorSwift(x)* as: -* Loop: - * Pick a uniformly random field element $u.$ - * Compute the set $L = F_u^{-1}(x).$ - * Let $T$ be the 8-element vector consisting of the elements of $L$, plus $8 - \\#L$ times $\\{\bot\\}.$ - * Select a uniformly random $t \in T.$ - * If $t \neq \bot$, return $(u, t)$; restart loop otherwise. +**Define** _ElligatorSwift(x)_ as: + +- Loop: + - Pick a uniformly random field element $u.$ + - Compute the set $L = F_u^{-1}(x).$ + - Let $T$ be the 8-element vector consisting of the elements of $L$, plus $8 - \\#L$ times $\\{\bot\\}.$ + - Select a uniformly random $t \in T.$ + - If $t \neq \bot$, return $(u, t)$; restart loop otherwise. Now notice that the order of elements in $T$ does not matter, as all we do is pick a uniformly random element in it, so we do not need to have all $\bot$ values at the end. As we have 8 distinct formulas for finding $(v, w)$ (taking the variants due to $\pm$ into account), we can associate every index in $T$ with exactly one of those formulas, making sure that: -* Formulas that yield no solutions (due to division by zero or non-existing square roots) or invalid solutions are made to return $\bot.$ -* For the $x_1$ and $x_2$ cases, if $g(-u-x)$ is a square, $\bot$ is returned instead (the round-trip check). -* In case multiple formulas would return the same non- $\bot$ result, all but one of those must be turned into $\bot$ to avoid biasing those. + +- Formulas that yield no solutions (due to division by zero or non-existing square roots) or invalid solutions are made to return $\bot.$ +- For the $x_1$ and $x_2$ cases, if $g(-u-x)$ is a square, $\bot$ is returned instead (the round-trip check). +- In case multiple formulas would return the same non- $\bot$ result, all but one of those must be turned into $\bot$ to avoid biasing those. The last condition above only occurs with negligible probability for cryptographically-sized curves, but is interesting to take into account as it allows exhaustive testing in small groups. See [Section 3.4](#34-dealing-with-special-cases) @@ -240,12 +252,13 @@ for an analysis of all the negligible cases. If we define $T = (G_{0,u}(x), G_{1,u}(x), \ldots, G_{7,u}(x))$, with each $G_{i,u}$ matching one of the formulas, the loop can be simplified to only compute one of the inverses instead of all of them: -**Define** *ElligatorSwift(x)* as: -* Loop: - * Pick a uniformly random field element $u.$ - * Pick a uniformly random integer $c$ in $[0,8).$ - * Let $t = G_{c,u}(x).$ - * If $t \neq \bot$, return $(u, t)$; restart loop otherwise. +**Define** _ElligatorSwift(x)_ as: + +- Loop: + - Pick a uniformly random field element $u.$ + - Pick a uniformly random integer $c$ in $[0,8).$ + - Let $t = G_{c,u}(x).$ + - If $t \neq \bot$, return $(u, t)$; restart loop otherwise. This is implemented in `secp256k1_ellswift_xelligatorswift_var`. @@ -256,18 +269,19 @@ Those are then repeated as $c=4$ through $c=7$ for the other sign of $w$ (noting Ignoring the negligible cases, we get: **Define** $G_{c,u}(x)$ as: -* If $c \in \\{0, 1, 4, 5\\}$ (for $x_1$ and $x_2$ formulas): - * If $g(-u-x)$ is square, return $\bot$ (as $x_3$ would be valid and take precedence). - * If $c \in \\{0, 4\\}$ (the $x_1$ formula) let $v = x$, otherwise let $v = -u-x$ (the $x_2$ formula) - * Let $s = -g(u)/(u^2 + uv + v^2 + a)$ (using $s = w^2$ in what follows). -* Otherwise, when $c \in \\{2, 3, 6, 7\\}$ (for $x_3$ formulas): - * Let $s = x-u.$ - * Let $r = \sqrt{-s(4g(u) + sh(u))}.$ - * Let $v = (r/s - u)/2$ if $c \in \\{3, 7\\}$; $(-r/s - u)/2$ otherwise. -* Let $w = \sqrt{s}.$ -* Depending on $c:$ - * If $c \in \\{0, 1, 2, 3\\}:$ return $P_u^{'-1}(v, w).$ - * If $c \in \\{4, 5, 6, 7\\}:$ return $P_u^{'-1}(v, -w).$ + +- If $c \in \\{0, 1, 4, 5\\}$ (for $x_1$ and $x_2$ formulas): + - If $g(-u-x)$ is square, return $\bot$ (as $x_3$ would be valid and take precedence). + - If $c \in \\{0, 4\\}$ (the $x_1$ formula) let $v = x$, otherwise let $v = -u-x$ (the $x_2$ formula) + - Let $s = -g(u)/(u^2 + uv + v^2 + a)$ (using $s = w^2$ in what follows). +- Otherwise, when $c \in \\{2, 3, 6, 7\\}$ (for $x_3$ formulas): + - Let $s = x-u.$ + - Let $r = \sqrt{-s(4g(u) + sh(u))}.$ + - Let $v = (r/s - u)/2$ if $c \in \\{3, 7\\}$; $(-r/s - u)/2$ otherwise. +- Let $w = \sqrt{s}.$ +- Depending on $c:$ + - If $c \in \\{0, 1, 2, 3\\}:$ return $P_u^{'-1}(v, w).$ + - If $c \in \\{4, 5, 6, 7\\}:$ return $P_u^{'-1}(v, -w).$ Whenever a square root of a non-square is taken, $\bot$ is returned; for both square roots this happens with roughly 50% on random inputs. Similarly, when a division by 0 would occur, $\bot$ is returned as well; this will only happen @@ -284,20 +298,21 @@ transformation. Furthermore, that transformation has no effect on $s$ in the fir as $u^2 + ux + x^2 + a = u^2 + u(-u-x) + (-u-x)^2 + a.$ Thus we can extract it out and move it down: **Define** $G_{c,u}(x)$ as: -* If $c \in \\{0, 1, 4, 5\\}:$ - * If $g(-u-x)$ is square, return $\bot.$ - * Let $s = -g(u)/(u^2 + ux + x^2 + a).$ - * Let $v = x.$ -* Otherwise, when $c \in \\{2, 3, 6, 7\\}:$ - * Let $s = x-u.$ - * Let $r = \sqrt{-s(4g(u) + sh(u))}.$ - * Let $v = (r/s - u)/2.$ -* Let $w = \sqrt{s}.$ -* Depending on $c:$ - * If $c \in \\{0, 2\\}:$ return $P_u^{'-1}(v, w).$ - * If $c \in \\{1, 3\\}:$ return $P_u^{'-1}(-u-v, w).$ - * If $c \in \\{4, 6\\}:$ return $P_u^{'-1}(v, -w).$ - * If $c \in \\{5, 7\\}:$ return $P_u^{'-1}(-u-v, -w).$ + +- If $c \in \\{0, 1, 4, 5\\}:$ + - If $g(-u-x)$ is square, return $\bot.$ + - Let $s = -g(u)/(u^2 + ux + x^2 + a).$ + - Let $v = x.$ +- Otherwise, when $c \in \\{2, 3, 6, 7\\}:$ + - Let $s = x-u.$ + - Let $r = \sqrt{-s(4g(u) + sh(u))}.$ + - Let $v = (r/s - u)/2.$ +- Let $w = \sqrt{s}.$ +- Depending on $c:$ + - If $c \in \\{0, 2\\}:$ return $P_u^{'-1}(v, w).$ + - If $c \in \\{1, 3\\}:$ return $P_u^{'-1}(-u-v, w).$ + - If $c \in \\{4, 6\\}:$ return $P_u^{'-1}(v, -w).$ + - If $c \in \\{5, 7\\}:$ return $P_u^{'-1}(-u-v, -w).$ This shows there will always be exactly 0, 4, or 8 $t$ values for a given $(u, x)$ input. There can be 0, 1, or 2 $(v, w)$ pairs before invoking $P_u^{'-1}$, and each results in 4 distinct $t$ values. @@ -310,58 +325,60 @@ we analyse them here. They generally fall into two categories: cases in which th do not decode back to $x$ (or at least cannot guarantee that they do), and cases in which the encoder might produce the same $t$ value for multiple $c$ inputs (thereby biasing that encoding): -* In the branch for $x_1$ and $x_2$ (where $c \in \\{0, 1, 4, 5\\}$): - * When $g(u) = 0$, we would have $s=w=Y=0$, which is not on $S_u.$ This is only possible on even-ordered curves. +- In the branch for $x_1$ and $x_2$ (where $c \in \\{0, 1, 4, 5\\}$): + - When $g(u) = 0$, we would have $s=w=Y=0$, which is not on $S_u.$ This is only possible on even-ordered curves. Excluding this also removes the one condition under which the simplified check for $x_3$ on the curve fails (namely when $g(x_1)=g(x_2)=0$ but $g(x_3)$ is not square). This does exclude some valid encodings: when both $g(u)=0$ and $u^2+ux+x^2+a=0$ (also implying $g(x)=0$), the $S_u'$ equation degenerates to $0 = 0$, and many valid $t$ values may exist. Yet, these cannot be targeted uniformly by the encoder anyway as there will generally be more than 8. - * When $g(x) = 0$, the same $t$ would be produced as in the $x_3$ branch (where $c \in \\{2, 3, 6, 7\\}$) which we give precedence + - When $g(x) = 0$, the same $t$ would be produced as in the $x_3$ branch (where $c \in \\{2, 3, 6, 7\\}$) which we give precedence as it can deal with $g(u)=0$. This is again only possible on even-ordered curves. -* In the branch for $x_3$ (where $c \in \\{2, 3, 6, 7\\}$): - * When $s=0$, a division by zero would occur. - * When $v = -u-v$ and $c \in \\{3, 7\\}$, the same $t$ would be returned as in the $c \in \\{2, 6\\}$ cases. +- In the branch for $x_3$ (where $c \in \\{2, 3, 6, 7\\}$): + - When $s=0$, a division by zero would occur. + - When $v = -u-v$ and $c \in \\{3, 7\\}$, the same $t$ would be returned as in the $c \in \\{2, 6\\}$ cases. It is equivalent to checking whether $r=0$. This cannot occur in the $x_1$ or $x_2$ branches, as it would trigger the $g(-u-x)$ is square condition. A similar concern for $w = -w$ does not exist, as $w=0$ is already impossible in both branches: in the first it requires $g(u)=0$ which is already outlawed on even-ordered curves and impossible on others; in the second it would trigger division by zero. -* Curve-specific special cases also exist that need to be rejected, because they result in $(u,t)$ which is invalid to the decoder, or because of division by zero in the encoder: - * For $a=0$ curves, when $u=0$ or when $t=0$. The latter can only be reached by the encoder when $g(u)=0$, which requires an even-ordered curve. - * For $a \neq 0$ curves, when $X_0(u)=0$, when $h(u)t^2 = -1$, or when $w(u + 2v) = 2X_0(u)$ while also either $w \neq 2Y_0(u)$ or $h(u)=0$. +- Curve-specific special cases also exist that need to be rejected, because they result in $(u,t)$ which is invalid to the decoder, or because of division by zero in the encoder: + - For $a=0$ curves, when $u=0$ or when $t=0$. The latter can only be reached by the encoder when $g(u)=0$, which requires an even-ordered curve. + - For $a \neq 0$ curves, when $X_0(u)=0$, when $h(u)t^2 = -1$, or when $w(u + 2v) = 2X_0(u)$ while also either $w \neq 2Y_0(u)$ or $h(u)=0$. **Define** a version of $G_{c,u}(x)$ which deals with all these cases: -* If $a=0$ and $u=0$, return $\bot.$ -* If $a \neq 0$ and $X_0(u)=0$, return $\bot.$ -* If $c \in \\{0, 1, 4, 5\\}:$ - * If $g(u) = 0$ or $g(x) = 0$, return $\bot$ (even curves only). - * If $g(-u-x)$ is square, return $\bot.$ - * Let $s = -g(u)/(u^2 + ux + x^2 + a)$ (cannot cause division by zero). - * Let $v = x.$ -* Otherwise, when $c \in \\{2, 3, 6, 7\\}:$ - * Let $s = x-u.$ - * Let $r = \sqrt{-s(4g(u) + sh(u))}$; return $\bot$ if not square. - * If $c \in \\{3, 7\\}$ and $r=0$, return $\bot.$ - * If $s = 0$, return $\bot.$ - * Let $v = (r/s - u)/2.$ -* Let $w = \sqrt{s}$; return $\bot$ if not square. -* If $a \neq 0$ and $w(u+2v) = 2X_0(u)$ and either $w \neq 2Y_0(u)$ or $h(u) = 0$, return $\bot.$ -* Depending on $c:$ - * If $c \in \\{0, 2\\}$, let $t = P_u^{'-1}(v, w).$ - * If $c \in \\{1, 3\\}$, let $t = P_u^{'-1}(-u-v, w).$ - * If $c \in \\{4, 6\\}$, let $t = P_u^{'-1}(v, -w).$ - * If $c \in \\{5, 7\\}$, let $t = P_u^{'-1}(-u-v, -w).$ -* If $a=0$ and $t=0$, return $\bot$ (even curves only). -* If $a \neq 0$ and $h(u)t^2 = -1$, return $\bot.$ -* Return $t.$ + +- If $a=0$ and $u=0$, return $\bot.$ +- If $a \neq 0$ and $X_0(u)=0$, return $\bot.$ +- If $c \in \\{0, 1, 4, 5\\}:$ + - If $g(u) = 0$ or $g(x) = 0$, return $\bot$ (even curves only). + - If $g(-u-x)$ is square, return $\bot.$ + - Let $s = -g(u)/(u^2 + ux + x^2 + a)$ (cannot cause division by zero). + - Let $v = x.$ +- Otherwise, when $c \in \\{2, 3, 6, 7\\}:$ + - Let $s = x-u.$ + - Let $r = \sqrt{-s(4g(u) + sh(u))}$; return $\bot$ if not square. + - If $c \in \\{3, 7\\}$ and $r=0$, return $\bot.$ + - If $s = 0$, return $\bot.$ + - Let $v = (r/s - u)/2.$ +- Let $w = \sqrt{s}$; return $\bot$ if not square. +- If $a \neq 0$ and $w(u+2v) = 2X_0(u)$ and either $w \neq 2Y_0(u)$ or $h(u) = 0$, return $\bot.$ +- Depending on $c:$ + - If $c \in \\{0, 2\\}$, let $t = P_u^{'-1}(v, w).$ + - If $c \in \\{1, 3\\}$, let $t = P_u^{'-1}(-u-v, w).$ + - If $c \in \\{4, 6\\}$, let $t = P_u^{'-1}(v, -w).$ + - If $c \in \\{5, 7\\}$, let $t = P_u^{'-1}(-u-v, -w).$ +- If $a=0$ and $t=0$, return $\bot$ (even curves only). +- If $a \neq 0$ and $h(u)t^2 = -1$, return $\bot.$ +- Return $t.$ Given any $u$, using this algorithm over all $x$ and $c$ values, every $t$ value will be reached exactly once, for an $x$ for which $F_u(t) = x$ holds, except for these cases that will not be reached: -* All cases where $P_u(t)$ is not defined: - * For $a=0$ curves, when $u=0$, $t=0$, or $g(u) = -t^2.$ - * For $a \neq 0$ curves, when $h(u)t^2 = -1$, $X_0(u) = 0$, or $Y_0(u) (1 - h(u) t^2) = 2X_0(u)t.$ -* When $g(u)=0$, the potentially many $t$ values that decode to an $x$ satisfying $g(x)=0$ using the $x_2$ formula. These were excluded by the $g(u)=0$ condition in the $c \in \\{0, 1, 4, 5\\}$ branch. + +- All cases where $P_u(t)$ is not defined: + - For $a=0$ curves, when $u=0$, $t=0$, or $g(u) = -t^2.$ + - For $a \neq 0$ curves, when $h(u)t^2 = -1$, $X_0(u) = 0$, or $Y_0(u) (1 - h(u) t^2) = 2X_0(u)t.$ +- When $g(u)=0$, the potentially many $t$ values that decode to an $x$ satisfying $g(x)=0$ using the $x_2$ formula. These were excluded by the $g(u)=0$ condition in the $c \in \\{0, 1, 4, 5\\}$ branch. These cases form a negligible subset of all $(u, t)$ for cryptographically sized curves. @@ -370,40 +387,42 @@ These cases form a negligible subset of all $(u, t)$ for cryptographically sized Specialized for odd-ordered $a=0$ curves: **Define** $G_{c,u}(x)$ as: -* If $u=0$, return $\bot.$ -* If $c \in \\{0, 1, 4, 5\\}:$ - * If $(-u-x)^3 + b$ is square, return $\bot$ - * Let $s = -(u^3 + b)/(u^2 + ux + x^2)$ (cannot cause division by 0). - * Let $v = x.$ -* Otherwise, when $c \in \\{2, 3, 6, 7\\}:$ - * Let $s = x-u.$ - * Let $r = \sqrt{-s(4(u^3 + b) + 3su^2)}$; return $\bot$ if not square. - * If $c \in \\{3, 7\\}$ and $r=0$, return $\bot.$ - * If $s = 0$, return $\bot.$ - * Let $v = (r/s - u)/2.$ -* Let $w = \sqrt{s}$; return $\bot$ if not square. -* Depending on $c:$ - * If $c \in \\{0, 2\\}:$ return $w(\frac{\sqrt{-3}-1}{2}u - v).$ - * If $c \in \\{1, 3\\}:$ return $w(\frac{\sqrt{-3}+1}{2}u + v).$ - * If $c \in \\{4, 6\\}:$ return $w(\frac{-\sqrt{-3}+1}{2}u + v).$ - * If $c \in \\{5, 7\\}:$ return $w(\frac{-\sqrt{-3}-1}{2}u - v).$ + +- If $u=0$, return $\bot.$ +- If $c \in \\{0, 1, 4, 5\\}:$ + - If $(-u-x)^3 + b$ is square, return $\bot$ + - Let $s = -(u^3 + b)/(u^2 + ux + x^2)$ (cannot cause division by 0). + - Let $v = x.$ +- Otherwise, when $c \in \\{2, 3, 6, 7\\}:$ + - Let $s = x-u.$ + - Let $r = \sqrt{-s(4(u^3 + b) + 3su^2)}$; return $\bot$ if not square. + - If $c \in \\{3, 7\\}$ and $r=0$, return $\bot.$ + - If $s = 0$, return $\bot.$ + - Let $v = (r/s - u)/2.$ +- Let $w = \sqrt{s}$; return $\bot$ if not square. +- Depending on $c:$ + - If $c \in \\{0, 2\\}:$ return $w(\frac{\sqrt{-3}-1}{2}u - v).$ + - If $c \in \\{1, 3\\}:$ return $w(\frac{\sqrt{-3}+1}{2}u + v).$ + - If $c \in \\{4, 6\\}:$ return $w(\frac{-\sqrt{-3}+1}{2}u + v).$ + - If $c \in \\{5, 7\\}:$ return $w(\frac{-\sqrt{-3}-1}{2}u - v).$ This is implemented in `secp256k1_ellswift_xswiftec_inv_var`. And the x-only ElligatorSwift encoding algorithm is still: -**Define** *ElligatorSwift(x)* as: -* Loop: - * Pick a uniformly random field element $u.$ - * Pick a uniformly random integer $c$ in $[0,8).$ - * Let $t = G_{c,u}(x).$ - * If $t \neq \bot$, return $(u, t)$; restart loop otherwise. +**Define** _ElligatorSwift(x)_ as: + +- Loop: + - Pick a uniformly random field element $u.$ + - Pick a uniformly random integer $c$ in $[0,8).$ + - Let $t = G_{c,u}(x).$ + - If $t \neq \bot$, return $(u, t)$; restart loop otherwise. Note that this logic does not take the remapped $u=0$, $t=0$, and $g(u) = -t^2$ cases into account; it just avoids them. While it is not impossible to make the encoder target them, this would increase the maximum number of $t$ values for a given $(u, x)$ combination beyond 8, and thereby slow down the ElligatorSwift loop proportionally, for a negligible gain in uniformity. -## 4. Encoding and decoding full *(x, y)* coordinates +## 4. Encoding and decoding full _(x, y)_ coordinates So far we have only addressed encoding and decoding x-coordinates, but in some cases an encoding for full points with $(x, y)$ coordinates is desirable. It is possible to encode this information @@ -422,30 +441,32 @@ four distinct $P_u^{'-1}$ calls in the definition of $G_{u,c}.$ To encode the sign of $y$ in the sign of $Y:$ -**Define** *Decode(u, t)* for full $(x, y)$ as: -* Let $(X, Y) = P_u(t).$ -* Let $x$ be the first value in $(u + 4Y^2, \frac{-X}{2Y} - \frac{u}{2}, \frac{X}{2Y} - \frac{u}{2})$ for which $g(x)$ is square. -* Let $y = \sqrt{g(x)}.$ -* If $sign(y) = sign(Y)$, return $(x, y)$; otherwise return $(x, -y).$ +**Define** _Decode(u, t)_ for full $(x, y)$ as: + +- Let $(X, Y) = P_u(t).$ +- Let $x$ be the first value in $(u + 4Y^2, \frac{-X}{2Y} - \frac{u}{2}, \frac{X}{2Y} - \frac{u}{2})$ for which $g(x)$ is square. +- Let $y = \sqrt{g(x)}.$ +- If $sign(y) = sign(Y)$, return $(x, y)$; otherwise return $(x, -y).$ And encoding would be done using a $G_{c,u}(x, y)$ function defined as: **Define** $G_{c,u}(x, y)$ as: -* If $c \in \\{0, 1\\}:$ - * If $g(u) = 0$ or $g(x) = 0$, return $\bot$ (even curves only). - * If $g(-u-x)$ is square, return $\bot.$ - * Let $s = -g(u)/(u^2 + ux + x^2 + a)$ (cannot cause division by zero). - * Let $v = x.$ -* Otherwise, when $c \in \\{2, 3\\}:$ - * Let $s = x-u.$ - * Let $r = \sqrt{-s(4g(u) + sh(u))}$; return $\bot$ if not square. - * If $c = 3$ and $r = 0$, return $\bot.$ - * Let $v = (r/s - u)/2.$ -* Let $w = \sqrt{s}$; return $\bot$ if not square. -* Let $w' = w$ if $sign(w/2) = sign(y)$; $-w$ otherwise. -* Depending on $c:$ - * If $c \in \\{0, 2\\}:$ return $P_u^{'-1}(v, w').$ - * If $c \in \\{1, 3\\}:$ return $P_u^{'-1}(-u-v, w').$ + +- If $c \in \\{0, 1\\}:$ + - If $g(u) = 0$ or $g(x) = 0$, return $\bot$ (even curves only). + - If $g(-u-x)$ is square, return $\bot.$ + - Let $s = -g(u)/(u^2 + ux + x^2 + a)$ (cannot cause division by zero). + - Let $v = x.$ +- Otherwise, when $c \in \\{2, 3\\}:$ + - Let $s = x-u.$ + - Let $r = \sqrt{-s(4g(u) + sh(u))}$; return $\bot$ if not square. + - If $c = 3$ and $r = 0$, return $\bot.$ + - Let $v = (r/s - u)/2.$ +- Let $w = \sqrt{s}$; return $\bot$ if not square. +- Let $w' = w$ if $sign(w/2) = sign(y)$; $-w$ otherwise. +- Depending on $c:$ + - If $c \in \\{0, 2\\}:$ return $P_u^{'-1}(v, w').$ + - If $c \in \\{1, 3\\}:$ return $P_u^{'-1}(-u-v, w').$ Note that $c$ now only ranges $[0,4)$, as the sign of $w'$ is decided based on that of $y$, rather than on $c.$ This change makes some valid encodings unreachable: when $y = 0$ and $sign(Y) \neq sign(0)$. @@ -454,22 +475,23 @@ In the above logic, $sign$ can be implemented in several ways, such as parity of of the input field element (for prime-sized fields) or the quadratic residuosity (for fields where $-1$ is not square). The choice does not matter, as long as it only takes on two possible values, and for $x \neq 0$ it holds that $sign(x) \neq sign(-x)$. -### 4.1 Full *(x, y)* coordinates for `secp256k1` +### 4.1 Full _(x, y)_ coordinates for `secp256k1` For $a=0$ curves, there is another option. Note that for those, the $P_u(t)$ function translates negations of $t$ to negations of (both) $X$ and $Y.$ Thus, we can use $sign(t)$ to encode the y-coordinate directly. Combined with the earlier remapping to guarantee all inputs land on the curve, we get as decoder: -**Define** *Decode(u, t)* as: -* Let $u'=u$ if $u \neq 0$; $1$ otherwise. -* Let $t'=t$ if $t \neq 0$; $1$ otherwise. -* Let $t''=t'$ if $u'^3 + b + t'^2 \neq 0$; $2t'$ otherwise. -* Let $X = \dfrac{u'^3 + b - t''^2}{2t''}.$ -* Let $Y = \dfrac{X + t''}{u'\sqrt{-3}}.$ -* Let $x$ be the first element of $(u' + 4Y^2, \frac{-X}{2Y} - \frac{u'}{2}, \frac{X}{2Y} - \frac{u'}{2})$ for which $g(x)$ is square. -* Let $y = \sqrt{g(x)}.$ -* Return $(x, y)$ if $sign(y) = sign(t)$; $(x, -y)$ otherwise. +**Define** _Decode(u, t)_ as: + +- Let $u'=u$ if $u \neq 0$; $1$ otherwise. +- Let $t'=t$ if $t \neq 0$; $1$ otherwise. +- Let $t''=t'$ if $u'^3 + b + t'^2 \neq 0$; $2t'$ otherwise. +- Let $X = \dfrac{u'^3 + b - t''^2}{2t''}.$ +- Let $Y = \dfrac{X + t''}{u'\sqrt{-3}}.$ +- Let $x$ be the first element of $(u' + 4Y^2, \frac{-X}{2Y} - \frac{u'}{2}, \frac{X}{2Y} - \frac{u'}{2})$ for which $g(x)$ is square. +- Let $y = \sqrt{g(x)}.$ +- Return $(x, y)$ if $sign(y) = sign(t)$; $(x, -y)$ otherwise. This is implemented in `secp256k1_ellswift_swiftec_var`. The used $sign(x)$ function is the parity of $x$ when represented as in integer in $[0,q).$ diff --git a/external/secp256k1/doc/musig.md b/external/secp256k1/doc/musig.md index ae21f9b131..176b131da6 100644 --- a/external/secp256k1/doc/musig.md +++ b/external/secp256k1/doc/musig.md @@ -1,5 +1,4 @@ -Notes on the musig module API -=========================== +# Notes on the musig module API The following sections contain additional notes on the API of the musig module (`include/secp256k1_musig.h`). A usage example can be found in `examples/musig.c`. diff --git a/external/secp256k1/doc/release-process.md b/external/secp256k1/doc/release-process.md index a64bae0f0d..4ac9ca0d23 100644 --- a/external/secp256k1/doc/release-process.md +++ b/external/secp256k1/doc/release-process.md @@ -2,7 +2,7 @@ This document outlines the process for releasing versions of the form `$MAJOR.$MINOR.$PATCH`. -We distinguish between two types of releases: *regular* and *maintenance* releases. +We distinguish between two types of releases: _regular_ and _maintenance_ releases. Regular releases are releases of a new major or minor version as well as patches of the most recent release. Maintenance releases, on the other hand, are required for patches of older releases. @@ -15,6 +15,7 @@ This process also assumes that there will be no minor releases for old major rel We aim to cut a regular release every 3-4 months, approximately twice as frequent as major Bitcoin Core releases. Every second release should be published one month before the feature freeze of the next major Bitcoin Core release, allowing sufficient time to update the library in Core. ## Sanity checks + Perform these checks when reviewing the release PR (see below): 1. Ensure `make distcheck` doesn't fail. @@ -42,15 +43,15 @@ Perform these checks when reviewing the release PR (see below): ## Regular release 1. Open a PR to the master branch with a commit (using message `"release: prepare for $MAJOR.$MINOR.$PATCH"`, for example) that - * finalizes the release notes in [CHANGELOG.md](../CHANGELOG.md) by - * adding a section for the release (make sure that the version number is a link to a diff between the previous and new version), - * removing the `[Unreleased]` section header, - * ensuring that the release notes are not missing entries (check the `needs-changelog` label on github), and - * including an entry for `### ABI Compatibility` if it doesn't exist, - * sets `_PKG_VERSION_IS_RELEASE` to `true` in `configure.ac`, and, - * if this is not a patch release, - * updates `_PKG_VERSION_*` and `_LIB_VERSION_*` in `configure.ac`, and - * updates `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_*` in `CMakeLists.txt`. + - finalizes the release notes in [CHANGELOG.md](../CHANGELOG.md) by + - adding a section for the release (make sure that the version number is a link to a diff between the previous and new version), + - removing the `[Unreleased]` section header, + - ensuring that the release notes are not missing entries (check the `needs-changelog` label on github), and + - including an entry for `### ABI Compatibility` if it doesn't exist, + - sets `_PKG_VERSION_IS_RELEASE` to `true` in `configure.ac`, and, + - if this is not a patch release, + - updates `_PKG_VERSION_*` and `_LIB_VERSION_*` in `configure.ac`, and + - updates `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_*` in `CMakeLists.txt`. 2. Perform the [sanity checks](#sanity-checks) on the PR branch. 3. After the PR is merged, tag the commit, and push the tag: ``` @@ -59,11 +60,12 @@ Perform these checks when reviewing the release PR (see below): git push git@github.com:bitcoin-core/secp256k1.git v$MAJOR.$MINOR.$PATCH ``` 4. Open a PR to the master branch with a commit (using message `"release cleanup: bump version after $MAJOR.$MINOR.$PATCH"`, for example) that - * sets `_PKG_VERSION_IS_RELEASE` to `false` and increments `_PKG_VERSION_PATCH` and `_LIB_VERSION_REVISION` in `configure.ac`, - * increments the `$PATCH` component of `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_REVISION` in `CMakeLists.txt`, and - * adds an `[Unreleased]` section header to the [CHANGELOG.md](../CHANGELOG.md). + - sets `_PKG_VERSION_IS_RELEASE` to `false` and increments `_PKG_VERSION_PATCH` and `_LIB_VERSION_REVISION` in `configure.ac`, + - increments the `$PATCH` component of `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_REVISION` in `CMakeLists.txt`, and + - adds an `[Unreleased]` section header to the [CHANGELOG.md](../CHANGELOG.md). If other maintainers are not present to approve the PR, it can be merged without ACKs. + 5. Create a new GitHub release with a link to the corresponding entry in [CHANGELOG.md](../CHANGELOG.md). 6. Send an announcement email to the bitcoin-dev mailing list. @@ -77,9 +79,9 @@ Note that bug fixes need to be backported only to releases for which no compatib git push git@github.com:bitcoin-core/secp256k1.git $MAJOR.$MINOR ``` 2. Open a pull request to the `$MAJOR.$MINOR` branch that - * includes the bug fixes, - * finalizes the release notes similar to a regular release, - * increments `_PKG_VERSION_PATCH` and `_LIB_VERSION_REVISION` in `configure.ac` + - includes the bug fixes, + - finalizes the release notes similar to a regular release, + - increments `_PKG_VERSION_PATCH` and `_LIB_VERSION_REVISION` in `configure.ac` and the `$PATCH` component of `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_REVISION` in `CMakeLists.txt` (with commit message `"release: bump versions for $MAJOR.$MINOR.$PATCH"`, for example). 3. Perform the [sanity checks](#sanity-checks) on the PR branch. @@ -89,6 +91,6 @@ Note that bug fixes need to be backported only to releases for which no compatib git tag -s v$MAJOR.$MINOR.$PATCH -m "libsecp256k1 $MAJOR.$MINOR.$PATCH" git push git@github.com:bitcoin-core/secp256k1.git v$MAJOR.$MINOR.$PATCH ``` -6. Create a new GitHub release with a link to the corresponding entry in [CHANGELOG.md](../CHANGELOG.md). -7. Send an announcement email to the bitcoin-dev mailing list. -8. Open PR to the master branch that includes a commit (with commit message `"release notes: add $MAJOR.$MINOR.$PATCH"`, for example) that adds release notes to [CHANGELOG.md](../CHANGELOG.md). +5. Create a new GitHub release with a link to the corresponding entry in [CHANGELOG.md](../CHANGELOG.md). +6. Send an announcement email to the bitcoin-dev mailing list. +7. Open PR to the master branch that includes a commit (with commit message `"release notes: add $MAJOR.$MINOR.$PATCH"`, for example) that adds release notes to [CHANGELOG.md](../CHANGELOG.md). diff --git a/external/secp256k1/doc/safegcd_implementation.md b/external/secp256k1/doc/safegcd_implementation.md index 5dbbb7bbd2..72d99daad3 100644 --- a/external/secp256k1/doc/safegcd_implementation.md +++ b/external/secp256k1/doc/safegcd_implementation.md @@ -29,65 +29,67 @@ def gcd(f, g): return abs(f) ``` -It computes the greatest common divisor of an odd integer *f* and any integer *g*. Its inner loop -keeps rewriting the variables *f* and *g* alongside a state variable *δ* that starts at *1*, until -*g=0* is reached. At that point, *|f|* gives the GCD. Each of the transitions in the loop is called a +It computes the greatest common divisor of an odd integer _f_ and any integer _g_. Its inner loop +keeps rewriting the variables _f_ and _g_ alongside a state variable _δ_ that starts at _1_, until +_g=0_ is reached. At that point, _|f|_ gives the GCD. Each of the transitions in the loop is called a "division step" (referred to as divstep in what follows). -For example, *gcd(21, 14)* would be computed as: -- Start with *δ=1 f=21 g=14* -- Take the third branch: *δ=2 f=21 g=7* -- Take the first branch: *δ=-1 f=7 g=-7* -- Take the second branch: *δ=0 f=7 g=0* -- The answer *|f| = 7*. +For example, _gcd(21, 14)_ would be computed as: + +- Start with _δ=1 f=21 g=14_ +- Take the third branch: _δ=2 f=21 g=7_ +- Take the first branch: _δ=-1 f=7 g=-7_ +- Take the second branch: _δ=0 f=7 g=0_ +- The answer _|f| = 7_. Why it works: + - Divsteps can be decomposed into two steps (see paragraph 8.2 in the paper): - - (a) If *g* is odd, replace *(f,g)* with *(g,g-f)* or (f,g+f), resulting in an even *g*. - - (b) Replace *(f,g)* with *(f,g/2)* (where *g* is guaranteed to be even). + - (a) If _g_ is odd, replace _(f,g)_ with _(g,g-f)_ or (f,g+f), resulting in an even _g_. + - (b) Replace _(f,g)_ with _(f,g/2)_ (where _g_ is guaranteed to be even). - Neither of those two operations change the GCD: - - For (a), assume *gcd(f,g)=c*, then it must be the case that *f=a c* and *g=b c* for some integers *a* - and *b*. As *(g,g-f)=(b c,(b-a)c)* and *(f,f+g)=(a c,(a+b)c)*, the result clearly still has - common factor *c*. Reasoning in the other direction shows that no common factor can be added by + - For (a), assume _gcd(f,g)=c_, then it must be the case that _f=a c_ and _g=b c_ for some integers _a_ + and _b_. As _(g,g-f)=(b c,(b-a)c)_ and _(f,f+g)=(a c,(a+b)c)_, the result clearly still has + common factor _c_. Reasoning in the other direction shows that no common factor can be added by doing so either. - - For (b), we know that *f* is odd, so *gcd(f,g)* clearly has no factor *2*, and we can remove - it from *g*. -- The algorithm will eventually converge to *g=0*. This is proven in the paper (see theorem G.3). -- It follows that eventually we find a final value *f'* for which *gcd(f,g) = gcd(f',0)*. As the - gcd of *f'* and *0* is *|f'|* by definition, that is our answer. + - For (b), we know that _f_ is odd, so _gcd(f,g)_ clearly has no factor _2_, and we can remove + it from _g_. +- The algorithm will eventually converge to _g=0_. This is proven in the paper (see theorem G.3). +- It follows that eventually we find a final value _f'_ for which _gcd(f,g) = gcd(f',0)_. As the + gcd of _f'_ and _0_ is _|f'|_ by definition, that is our answer. Compared to more [traditional GCD algorithms](https://en.wikipedia.org/wiki/Euclidean_algorithm), this one has the property of only ever looking at the low-order bits of the variables to decide the next steps, and being easy to make -constant-time (in more low-level languages than Python). The *δ* parameter is necessary to +constant-time (in more low-level languages than Python). The _δ_ parameter is necessary to guide the algorithm towards shrinking the numbers' magnitudes without explicitly needing to look at high order bits. Properties that will become important later: -- Performing more divsteps than needed is not a problem, as *f* does not change anymore after *g=0*. -- Only even numbers are divided by *2*. This means that when reasoning about it algebraically we - do not need to worry about rounding. -- At every point during the algorithm's execution the next *N* steps only depend on the bottom *N* - bits of *f* and *g*, and on *δ*. +- Performing more divsteps than needed is not a problem, as _f_ does not change anymore after _g=0_. +- Only even numbers are divided by _2_. This means that when reasoning about it algebraically we + do not need to worry about rounding. +- At every point during the algorithm's execution the next _N_ steps only depend on the bottom _N_ + bits of _f_ and _g_, and on _δ_. ## 2. From GCDs to modular inverses -We want an algorithm to compute the inverse *a* of *x* modulo *M*, i.e. the number a such that *a x=1 -mod M*. This inverse only exists if the GCD of *x* and *M* is *1*, but that is always the case if *M* is -prime and *0 < x < M*. In what follows, assume that the modular inverse exists. +We want an algorithm to compute the inverse _a_ of _x_ modulo _M_, i.e. the number a such that _a x=1 +mod M_. This inverse only exists if the GCD of _x_ and _M_ is _1_, but that is always the case if _M_ is +prime and _0 < x < M_. In what follows, assume that the modular inverse exists. It turns out this inverse can be computed as a side effect of computing the GCD by keeping track of how the internal variables can be written as linear combinations of the inputs at every step (see the [extended Euclidean algorithm](https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm)). -Since the GCD is *1*, such an algorithm will compute numbers *a* and *b* such that a x + b M = 1*. +Since the GCD is _1_, such an algorithm will compute numbers _a_ and _b_ such that a x + b M = 1*. Taking that expression *mod M* gives *a x mod M = 1*, and we see that *a* is the modular inverse of *x -mod M*. +mod M\*. A similar approach can be used to calculate modular inverses using the divsteps-based GCD -algorithm shown above, if the modulus *M* is odd. To do so, compute *gcd(f=M,g=x)*, while keeping -track of extra variables *d* and *e*, for which at every step *d = f/x (mod M)* and *e = g/x (mod M)*. -*f/x* here means the number which multiplied with *x* gives *f mod M*. As *f* and *g* are initialized to *M* -and *x* respectively, *d* and *e* just start off being *0* (*M/x mod M = 0/x mod M = 0*) and *1* (*x/x mod M -= 1*). +algorithm shown above, if the modulus _M_ is odd. To do so, compute _gcd(f=M,g=x)_, while keeping +track of extra variables _d_ and _e_, for which at every step _d = f/x (mod M)_ and _e = g/x (mod M)_. +_f/x_ here means the number which multiplied with _x_ gives _f mod M_. As _f_ and _g_ are initialized to _M_ +and _x_ respectively, _d_ and _e_ just start off being _0_ (_M/x mod M = 0/x mod M = 0_) and _1_ (_x/x mod M += 1_). ```python def div2(M, x): @@ -119,17 +121,16 @@ def modinv(M, x): return (d * f) % M ``` -Also note that this approach to track *d* and *e* throughout the computation to determine the inverse +Also note that this approach to track _d_ and _e_ throughout the computation to determine the inverse is different from the paper. There (see paragraph 12.1 in the paper) a transition matrix for the entire computation is determined (see section 3 below) and the inverse is computed from that. The approach here avoids the need for 2x2 matrix multiplications of various sizes, and appears to be faster at the level of optimization we're able to do in C. - ## 3. Batching multiple divsteps -Every divstep can be expressed as a matrix multiplication, applying a transition matrix *(1/2 t)* -to both vectors *[f, g]* and *[d, e]* (see paragraph 8.1 in the paper): +Every divstep can be expressed as a matrix multiplication, applying a transition matrix _(1/2 t)_ +to both vectors _[f, g]_ and _[d, e]_ (see paragraph 8.1 in the paper): ``` t = [ u, v ] @@ -142,15 +143,15 @@ to both vectors *[f, g]* and *[d, e]* (see paragraph 8.1 in the paper): [ out_e ] [ in_e ] ``` -where *(u, v, q, r)* is *(0, 2, -1, 1)*, *(2, 0, 1, 1)*, or *(2, 0, 0, 1)*, depending on which branch is -taken. As above, the resulting *f* and *g* are always integers. +where _(u, v, q, r)_ is _(0, 2, -1, 1)_, _(2, 0, 1, 1)_, or _(2, 0, 0, 1)_, depending on which branch is +taken. As above, the resulting _f_ and _g_ are always integers. Performing multiple divsteps corresponds to a multiplication with the product of all the individual divsteps' transition matrices. As each transition matrix consists of integers -divided by *2*, the product of these matrices will consist of integers divided by *2N* (see also -theorem 9.2 in the paper). These divisions are expensive when updating *d* and *e*, so we delay -them: we compute the integer coefficients of the combined transition matrix scaled by *2N*, and -do one division by *2N* as a final step: +divided by _2_, the product of these matrices will consist of integers divided by _2N_ (see also +theorem 9.2 in the paper). These divisions are expensive when updating _d_ and _e_, so we delay +them: we compute the integer coefficients of the combined transition matrix scaled by _2N_, and +do one division by _2N_ as a final step: ```python def divsteps_n_matrix(delta, f, g): @@ -166,13 +167,13 @@ def divsteps_n_matrix(delta, f, g): return delta, (u, v, q, r) ``` -As the branches in the divsteps are completely determined by the bottom *N* bits of *f* and *g*, this +As the branches in the divsteps are completely determined by the bottom _N_ bits of _f_ and _g_, this function to compute the transition matrix only needs to see those bottom bits. Furthermore all -intermediate results and outputs fit in *(N+1)*-bit numbers (unsigned for *f* and *g*; signed for *u*, *v*, -*q*, and *r*) (see also paragraph 8.3 in the paper). This means that an implementation using 64-bit -integers could set *N=62* and compute the full transition matrix for 62 steps at once without any +intermediate results and outputs fit in _(N+1)_-bit numbers (unsigned for _f_ and _g_; signed for _u_, _v_, +_q_, and _r_) (see also paragraph 8.3 in the paper). This means that an implementation using 64-bit +integers could set _N=62_ and compute the full transition matrix for 62 steps at once without any big integer arithmetic at all. This is the reason why this algorithm is efficient: it only needs -to update the full-size *f*, *g*, *d*, and *e* numbers once every *N* steps. +to update the full-size _f_, _g_, _d_, and _e_ numbers once every _N_ steps. We still need functions to compute: @@ -184,8 +185,8 @@ We still need functions to compute: [ out_e ] ( [ q, r ]) [ in_e ] ``` -Because the divsteps transformation only ever divides even numbers by two, the result of *t [f,g]* is always even. When *t* is a composition of *N* divsteps, it follows that the resulting *f* -and *g* will be multiple of *2N*, and division by *2N* is simply shifting them down: +Because the divsteps transformation only ever divides even numbers by two, the result of _t [f,g]_ is always even. When _t_ is a composition of _N_ divsteps, it follows that the resulting _f_ +and _g_ will be multiple of _2N_, and division by _2N_ is simply shifting them down: ```python def update_fg(f, g, t): @@ -199,8 +200,8 @@ def update_fg(f, g, t): return cf >> N, cg >> N ``` -The same is not true for *d* and *e*, and we need an equivalent of the `div2` function for division by *2N mod M*. -This is easy if we have precomputed *1/M mod 2N* (which always exists for odd *M*): +The same is not true for _d_ and _e_, and we need an equivalent of the `div2` function for division by _2N mod M_. +This is easy if we have precomputed _1/M mod 2N_ (which always exists for odd _M_): ```python def div2n(M, Mi, x): @@ -224,7 +225,7 @@ def update_de(d, e, t, M, Mi): return div2n(M, Mi, cd), div2n(M, Mi, ce) ``` -With all of those, we can write a version of `modinv` that performs *N* divsteps at once: +With all of those, we can write a version of `modinv` that performs _N_ divsteps at once: ```python3 def modinv(M, Mi, x): @@ -242,20 +243,19 @@ def modinv(M, Mi, x): return (d * f) % M ``` -This means that in practice we'll always perform a multiple of *N* divsteps. This is not a problem -because once *g=0*, further divsteps do not affect *f*, *g*, *d*, or *e* anymore (only *δ* keeps +This means that in practice we'll always perform a multiple of _N_ divsteps. This is not a problem +because once _g=0_, further divsteps do not affect _f_, _g_, _d_, or _e_ anymore (only _δ_ keeps increasing). For variable time code such excess iterations will be mostly optimized away in later sections. - ## 4. Avoiding modulus operations -So far, there are two places where we compute a remainder of big numbers modulo *M*: at the end of -`div2n` in every `update_de`, and at the very end of `modinv` after potentially negating *d* due to the -sign of *f*. These are relatively expensive operations when done generically. +So far, there are two places where we compute a remainder of big numbers modulo _M_: at the end of +`div2n` in every `update_de`, and at the very end of `modinv` after potentially negating _d_ due to the +sign of _f_. These are relatively expensive operations when done generically. -To deal with the modulus operation in `div2n`, we simply stop requiring *d* and *e* to be in range -*[0,M)* all the time. Let's start by inlining `div2n` into `update_de`, and dropping the modulus +To deal with the modulus operation in `div2n`, we simply stop requiring _d_ and _e_ to be in range +_[0,M)_ all the time. Let's start by inlining `div2n` into `update_de`, and dropping the modulus operation at the end: ```python @@ -272,15 +272,15 @@ def update_de(d, e, t, M, Mi): return cd >> N, ce >> N ``` -Let's look at bounds on the ranges of these numbers. It can be shown that *|u|+|v|* and *|q|+|r|* -never exceed *2N* (see paragraph 8.3 in the paper), and thus a multiplication with *t* will have -outputs whose absolute values are at most *2N* times the maximum absolute input value. In case the -inputs *d* and *e* are in *(-M,M)*, which is certainly true for the initial values *d=0* and *e=1* assuming -*M > 1*, the multiplication results in numbers in range *(-2NM,2NM)*. Subtracting less than *2N* -times *M* to cancel out *N* bits brings that up to *(-2N+1M,2NM)*, and -dividing by *2N* at the end takes it to *(-2M,M)*. Another application of `update_de` would take that -to *(-3M,2M)*, and so forth. This progressive expansion of the variables' ranges can be -counteracted by incrementing *d* and *e* by *M* whenever they're negative: +Let's look at bounds on the ranges of these numbers. It can be shown that _|u|+|v|_ and _|q|+|r|_ +never exceed _2N_ (see paragraph 8.3 in the paper), and thus a multiplication with _t_ will have +outputs whose absolute values are at most _2N_ times the maximum absolute input value. In case the +inputs _d_ and _e_ are in _(-M,M)_, which is certainly true for the initial values _d=0_ and _e=1_ assuming +_M > 1_, the multiplication results in numbers in range _(-2NM,2NM)_. Subtracting less than _2N_ +times _M_ to cancel out _N_ bits brings that up to _(-2N+1M,2NM)_, and +dividing by _2N_ at the end takes it to _(-2M,M)_. Another application of `update_de` would take that +to _(-3M,2M)_, and so forth. This progressive expansion of the variables' ranges can be +counteracted by incrementing _d_ and _e_ by _M_ whenever they're negative: ```python ... @@ -293,12 +293,12 @@ counteracted by incrementing *d* and *e* by *M* whenever they're negative: ... ``` -With inputs in *(-2M,M)*, they will first be shifted into range *(-M,M)*, which means that the -output will again be in *(-2M,M)*, and this remains the case regardless of how many `update_de` +With inputs in _(-2M,M)_, they will first be shifted into range _(-M,M)_, which means that the +output will again be in _(-2M,M)_, and this remains the case regardless of how many `update_de` invocations there are. In what follows, we will try to make this more efficient. -Note that increasing *d* by *M* is equal to incrementing *cd* by *u M* and *ce* by *q M*. Similarly, -increasing *e* by *M* is equal to incrementing *cd* by *v M* and *ce* by *r M*. So we could instead write: +Note that increasing _d_ by _M_ is equal to incrementing _cd_ by _u M_ and _ce_ by _q M_. Similarly, +increasing _e_ by _M_ is equal to incrementing _cd_ by _v M_ and _ce_ by _r M_. So we could instead write: ```python ... @@ -318,10 +318,10 @@ increasing *e* by *M* is equal to incrementing *cd* by *v M* and *ce* by ... ``` -Now note that we have two steps of corrections to *cd* and *ce* that add multiples of *M*: this +Now note that we have two steps of corrections to _cd_ and _ce_ that add multiples of _M_: this increment, and the decrement that cancels out bottom bits. The second one depends on the first -one, but they can still be efficiently combined by only computing the bottom bits of *cd* and *ce* -at first, and using that to compute the final *md*, *me* values: +one, but they can still be efficiently combined by only computing the bottom bits of _cd_ and _ce_ +at first, and using that to compute the final _md_, _me_ values: ```python def update_de(d, e, t, M, Mi): @@ -346,8 +346,8 @@ def update_de(d, e, t, M, Mi): return cd >> N, ce >> N ``` -One last optimization: we can avoid the *md M* and *me M* multiplications in the bottom bits of *cd* -and *ce* by moving them to the *md* and *me* correction: +One last optimization: we can avoid the _md M_ and _me M_ multiplications in the bottom bits of _cd_ +and _ce_ by moving them to the _md_ and _me_ correction: ```python ... @@ -362,10 +362,10 @@ and *ce* by moving them to the *md* and *me* correction: ... ``` -The resulting function takes *d* and *e* in range *(-2M,M)* as inputs, and outputs values in the same -range. That also means that the *d* value at the end of `modinv` will be in that range, while we want -a result in *[0,M)*. To do that, we need a normalization function. It's easy to integrate the -conditional negation of *d* (based on the sign of *f*) into it as well: +The resulting function takes _d_ and _e_ in range _(-2M,M)_ as inputs, and outputs values in the same +range. That also means that the _d_ value at the end of `modinv` will be in that range, while we want +a result in _[0,M)_. To do that, we need a normalization function. It's easy to integrate the +conditional negation of _d_ (based on the sign of _f_) into it as well: ```python def normalize(sign, v, M): @@ -391,22 +391,21 @@ And calling it in `modinv` is simply: return normalize(f, d, M) ``` - ## 5. Constant-time operation The primary selling point of the algorithm is fast constant-time operation. What code flow still depends on the input data so far? -- the number of iterations of the while *g ≠ 0* loop in `modinv` +- the number of iterations of the while _g ≠ 0_ loop in `modinv` - the branches inside `divsteps_n_matrix` - the sign checks in `update_de` - the sign checks in `normalize` To make the while loop in `modinv` constant time it can be replaced with a constant number of -iterations. The paper proves (Theorem 11.2) that *741* divsteps are sufficient for any *256*-bit -inputs, and [safegcd-bounds](https://github.com/sipa/safegcd-bounds) shows that the slightly better bound *724* is -sufficient even. Given that every loop iteration performs *N* divsteps, it will run a total of -*⌈724/N⌉* times. +iterations. The paper proves (Theorem 11.2) that _741_ divsteps are sufficient for any _256_-bit +inputs, and [safegcd-bounds](https://github.com/sipa/safegcd-bounds) shows that the slightly better bound _724_ is +sufficient even. Given that every loop iteration performs _N_ divsteps, it will run a total of +_⌈724/N⌉_ times. To deal with the branches in `divsteps_n_matrix` we will replace them with constant-time bitwise operations (and hope the C compiler isn't smart enough to turn them back into branches; see @@ -425,10 +424,10 @@ divstep can be written instead as (compare to the inner loop of `gcd` in section ``` To convert the above to bitwise operations, we rely on a trick to negate conditionally: per the -definition of negative numbers in two's complement, (*-v == ~v + 1*) holds for every number *v*. As -*-1* in two's complement is all *1* bits, bitflipping can be expressed as xor with *-1*. It follows -that *-v == (v ^ -1) - (-1)*. Thus, if we have a variable *c* that takes on values *0* or *-1*, then -*(v ^ c) - c* is *v* if *c=0* and *-v* if *c=-1*. +definition of negative numbers in two's complement, (_-v == ~v + 1_) holds for every number _v_. As +_-1_ in two's complement is all _1_ bits, bitflipping can be expressed as xor with _-1_. It follows +that _-v == (v ^ -1) - (-1)_. Thus, if we have a variable _c_ that takes on values _0_ or _-1_, then +_(v ^ c) - c_ is _v_ if _c=0_ and _-v_ if _c=-1_. Using this we can write: @@ -444,13 +443,13 @@ in constant-time form as: x = (f ^ c1) - c1 ``` -To use that trick, we need a helper mask variable *c1* that resolves the condition *δ>0* to *-1* -(if true) or *0* (if false). We compute *c1* using right shifting, which is equivalent to dividing by -the specified power of *2* and rounding down (in Python, and also in C under the assumption of a typical two's complement system; see -`assumptions.h` for tests that this is the case). Right shifting by *63* thus maps all -numbers in range *[-263,0)* to *-1*, and numbers in range *[0,263)* to *0*. +To use that trick, we need a helper mask variable _c1_ that resolves the condition _δ>0_ to _-1_ +(if true) or _0_ (if false). We compute _c1_ using right shifting, which is equivalent to dividing by +the specified power of _2_ and rounding down (in Python, and also in C under the assumption of a typical two's complement system; see +`assumptions.h` for tests that this is the case). Right shifting by _63_ thus maps all +numbers in range _[-263,0)_ to _-1_, and numbers in range _[0,263)_ to _0_. -Using the facts that *x&0=0* and *x&(-1)=x* (on two's complement systems again), we can write: +Using the facts that _x&0=0_ and _x&(-1)=x_ (on two's complement systems again), we can write: ```python if g & 1: @@ -498,8 +497,8 @@ becomes: ``` It turns out that this can be implemented more efficiently by applying the substitution -*η=-δ*. In this representation, negating *δ* corresponds to negating *η*, and incrementing -*δ* corresponds to decrementing *η*. This allows us to remove the negation in the *c1* +_η=-δ_. In this representation, negating _δ_ corresponds to negating _η_, and incrementing +_δ_ corresponds to decrementing _η_. This allows us to remove the negation in the _c1_ computation: ```python @@ -519,12 +518,12 @@ computation: g >>= 1 ``` -A variant of divsteps with better worst-case performance can be used instead: starting *δ* at -*1/2* instead of *1*. This reduces the worst case number of iterations to *590* for *256*-bit inputs -(which can be shown using convex hull analysis). In this case, the substitution *ζ=-(δ+1/2)* -is used instead to keep the variable integral. Incrementing *δ* by *1* still translates to -decrementing *ζ* by *1*, but negating *δ* now corresponds to going from *ζ* to *-(ζ+1)*, or -*~ζ*. Doing that conditionally based on *c3* is simply: +A variant of divsteps with better worst-case performance can be used instead: starting _δ_ at +_1/2_ instead of _1_. This reduces the worst case number of iterations to _590_ for _256_-bit inputs +(which can be shown using convex hull analysis). In this case, the substitution _ζ=-(δ+1/2)_ +is used instead to keep the variable integral. Incrementing _δ_ by _1_ still translates to +decrementing _ζ_ by _1_, but negating _δ_ now corresponds to going from _ζ_ to _-(ζ+1)_, or +_~ζ_. Doing that conditionally based on _c3_ is simply: ```python ... @@ -534,13 +533,12 @@ decrementing *ζ* by *1*, but negating *δ* now corresponds to going fr ``` By replacing the loop in `divsteps_n_matrix` with a variant of the divstep code above (extended to -also apply all *f* operations to *u*, *v* and all *g* operations to *q*, *r*), a constant-time version of +also apply all _f_ operations to _u_, _v_ and all _g_ operations to _q_, _r_), a constant-time version of `divsteps_n_matrix` is obtained. The full code will be in section 7. These bit fiddling tricks can also be used to make the conditional negations and additions in `update_de` and `normalize` constant-time. - ## 6. Variable-time optimizations In section 5, we modified the `divsteps_n_matrix` function (and a few others) to be constant time. @@ -550,7 +548,7 @@ faster non-constant time `divsteps_n_matrix` function. To do so, first consider yet another way of writing the inner loop of divstep operations in `gcd` from section 1. This decomposition is also explained in the paper in section 8.2. We use -the original version with initial *δ=1* and *η=-δ* here. +the original version with initial _δ=1_ and _η=-δ_ here. ```python for _ in range(N): @@ -562,7 +560,7 @@ for _ in range(N): g >>= 1 ``` -Whenever *g* is even, the loop only shifts *g* down and decreases *η*. When *g* ends in multiple zero +Whenever _g_ is even, the loop only shifts _g_ down and decreases _η_. When _g_ ends in multiple zero bits, these iterations can be consolidated into one step. This requires counting the bottom zero bits efficiently, which is possible on most platforms; it is abstracted here as the function `count_trailing_zeros`. @@ -595,20 +593,20 @@ while True: # g is even now, and the eta decrement and g shift will happen in the next loop. ``` -We can now remove multiple bottom *0* bits from *g* at once, but still need a full iteration whenever -there is a bottom *1* bit. In what follows, we will get rid of multiple *1* bits simultaneously as +We can now remove multiple bottom _0_ bits from _g_ at once, but still need a full iteration whenever +there is a bottom _1_ bit. In what follows, we will get rid of multiple _1_ bits simultaneously as well. -Observe that as long as *η ≥ 0*, the loop does not modify *f*. Instead, it cancels out bottom -bits of *g* and shifts them out, and decreases *η* and *i* accordingly - interrupting only when *η* -becomes negative, or when *i* reaches *0*. Combined, this is equivalent to adding a multiple of *f* to -*g* to cancel out multiple bottom bits, and then shifting them out. +Observe that as long as _η ≥ 0_, the loop does not modify _f_. Instead, it cancels out bottom +bits of _g_ and shifts them out, and decreases _η_ and _i_ accordingly - interrupting only when _η_ +becomes negative, or when _i_ reaches _0_. Combined, this is equivalent to adding a multiple of _f_ to +_g_ to cancel out multiple bottom bits, and then shifting them out. -It is easy to find what that multiple is: we want a number *w* such that *g+w f* has a few bottom -zero bits. If that number of bits is *L*, we want *g+w f mod 2L = 0*, or *w = -g/f mod 2L*. Since *f* -is odd, such a *w* exists for any *L*. *L* cannot be more than *i* steps (as we'd finish the loop before -doing more) or more than *η+1* steps (as we'd run `eta, f, g = -eta, g, -f` at that point), but -apart from that, we're only limited by the complexity of computing *w*. +It is easy to find what that multiple is: we want a number _w_ such that _g+w f_ has a few bottom +zero bits. If that number of bits is _L_, we want _g+w f mod 2L = 0_, or _w = -g/f mod 2L_. Since _f_ +is odd, such a _w_ exists for any _L_. _L_ cannot be more than _i_ steps (as we'd finish the loop before +doing more) or more than _η+1_ steps (as we'd run `eta, f, g = -eta, g, -f` at that point), but +apart from that, we're only limited by the complexity of computing _w_. This code demonstrates how to cancel up to 4 bits per step: @@ -642,26 +640,25 @@ some can be found in Hacker's Delight second edition by Henry S. Warren, Jr. pag Here we need the negated modular inverse, which is a simple transformation of those: - Instead of a 3-bit table: - - *-f* or *f ^ 6* + - _-f_ or _f ^ 6_ - Instead of a 4-bit table: - - *1 - f(f + 1)* - - *-(f + (((f + 1) & 4) << 1))* -- For larger tables the following technique can be used: if *w=-1/f mod 2L*, then *w(w f+2)* is - *-1/f mod 22L*. This allows extending the previous formulas (or tables). In particular we + - _1 - f(f + 1)_ + - _-(f + (((f + 1) & 4) << 1))_ +- For larger tables the following technique can be used: if _w=-1/f mod 2L_, then _w(w f+2)_ is + _-1/f mod 22L_. This allows extending the previous formulas (or tables). In particular we have this 6-bit function (based on the 3-bit function above): - - *f(f2 - 2)* + - _f(f2 - 2)_ -This loop, again extended to also handle *u*, *v*, *q*, and *r* alongside *f* and *g*, placed in +This loop, again extended to also handle _u_, _v_, _q_, and _r_ alongside _f_ and _g_, placed in `divsteps_n_matrix`, gives a significantly faster, but non-constant time version. - ## 7. Final Python version All together we need the following functions: - A way to compute the transition matrix in constant time, using the `divsteps_n_matrix` function from section 2, but with its loop replaced by a variant of the constant-time divstep from - section 5, extended to handle *u*, *v*, *q*, *r*: + section 5, extended to handle _u_, _v_, _q_, _r_: ```python def divsteps_n_matrix(zeta, f, g): @@ -684,7 +681,7 @@ def divsteps_n_matrix(zeta, f, g): return zeta, (u, v, q, r) ``` -- The functions to update *f* and *g*, and *d* and *e*, from section 2 and section 4, with the constant-time +- The functions to update _f_ and _g_, and _d_ and _e_, from section 2 and section 4, with the constant-time changes to `update_de` from section 5: ```python @@ -723,7 +720,7 @@ def normalize(sign, v, M): return v ``` -- And finally the `modinv` function too, adapted to use *ζ* instead of *δ*, and using the fixed +- And finally the `modinv` function too, adapted to use _ζ_ instead of _δ_, and using the fixed iteration count from section 5: ```python @@ -772,20 +769,21 @@ def modinv_var(M, Mi, x): ## 8. From GCDs to Jacobi symbol -We can also use a similar approach to calculate Jacobi symbol *(x | M)* by keeping track of an -extra variable *j*, for which at every step *(x | M) = j (g | f)*. As we update *f* and *g*, we -make corresponding updates to *j* using +We can also use a similar approach to calculate Jacobi symbol _(x | M)_ by keeping track of an +extra variable _j_, for which at every step _(x | M) = j (g | f)_. As we update _f_ and _g_, we +make corresponding updates to _j_ using [properties of the Jacobi symbol](https://en.wikipedia.org/wiki/Jacobi_symbol#Properties): -* *((g/2) | f)* is either *(g | f)* or *-(g | f)*, depending on the value of *f mod 8* (negating if it's *3* or *5*). -* *(f | g)* is either *(g | f)* or *-(g | f)*, depending on *f mod 4* and *g mod 4* (negating if both are *3*). -These updates depend only on the values of *f* and *g* modulo *4* or *8*, and can thus be applied -very quickly, as long as we keep track of a few additional bits of *f* and *g*. Overall, this +- _((g/2) | f)_ is either _(g | f)_ or _-(g | f)_, depending on the value of _f mod 8_ (negating if it's _3_ or _5_). +- _(f | g)_ is either _(g | f)_ or _-(g | f)_, depending on _f mod 4_ and _g mod 4_ (negating if both are _3_). + +These updates depend only on the values of _f_ and _g_ modulo _4_ or _8_, and can thus be applied +very quickly, as long as we keep track of a few additional bits of _f_ and _g_. Overall, this calculation is slightly simpler than the one for the modular inverse because we no longer need to -keep track of *d* and *e*. +keep track of _d_ and _e_. -However, one difficulty of this approach is that the Jacobi symbol *(a | n)* is only defined for -positive odd integers *n*, whereas in the original safegcd algorithm, *f, g* can take negative +However, one difficulty of this approach is that the Jacobi symbol _(a | n)_ is only defined for +positive odd integers _n_, whereas in the original safegcd algorithm, _f, g_ can take negative values. We resolve this by using the following modified steps: ```python @@ -799,15 +797,16 @@ values. We resolve this by using the following modified steps: ``` The algorithm is still correct, since the changed divstep, called a "posdivstep" (see section 8.4 -and E.5 in the paper) preserves *gcd(f, g)*. However, there's no proof that the modified algorithm +and E.5 in the paper) preserves _gcd(f, g)_. However, there's no proof that the modified algorithm will converge. The justification for posdivsteps is completely empirical: in practice, it appears -that the vast majority of nonzero inputs converge to *f=g=gcd(f0, g0)* in a +that the vast majority of nonzero inputs converge to _f=g=gcd(f0, g0)_ in a number of steps proportional to their logarithm. Note that: -- We require inputs to satisfy *gcd(x, M) = 1*, as otherwise *f=1* is not reached. -- We require inputs *x &neq; 0*, because applying posdivstep with *g=0* has no effect. -- We need to update the termination condition from *g=0* to *f=1*. + +- We require inputs to satisfy _gcd(x, M) = 1_, as otherwise _f=1_ is not reached. +- We require inputs _x &neq; 0_, because applying posdivstep with _g=0_ has no effect. +- We need to update the termination condition from _g=0_ to _f=1_. We account for the possibility of nonconvergence by only performing a bounded number of posdivsteps, and then falling back to square-root based Jacobi calculation if a solution has not @@ -815,5 +814,5 @@ yet been found. The optimizations in sections 3-7 above are described in the context of the original divsteps, but in the C implementation we also adapt most of them (not including "avoiding modulus operations", -since it's not necessary to track *d, e*, and "constant-time operation", since we never calculate +since it's not necessary to track _d, e_, and "constant-time operation", since we never calculate Jacobi symbols for secret data) to the posdivsteps version. diff --git a/external/secp256k1/src/wycheproof/ecdsa_secp256k1_sha256_bitcoin_test.json b/external/secp256k1/src/wycheproof/ecdsa_secp256k1_sha256_bitcoin_test.json index 9c90747993..04e34f5a17 100644 --- a/external/secp256k1/src/wycheproof/ecdsa_secp256k1_sha256_bitcoin_test.json +++ b/external/secp256k1/src/wycheproof/ecdsa_secp256k1_sha256_bitcoin_test.json @@ -1,6358 +1,6358 @@ { - "algorithm" : "ECDSA", - "schema" : "ecdsa_bitcoin_verify_schema.json", - "generatorVersion" : "0.9rc5", - "numberOfTests" : 463, - "header" : [ + "algorithm": "ECDSA", + "schema": "ecdsa_bitcoin_verify_schema.json", + "generatorVersion": "0.9rc5", + "numberOfTests": 463, + "header": [ "Test vectors of type EcdsaBitcoinVerify are meant for the verification", "of a ECDSA variant used for bitcoin, that add signature non-malleability." ], - "notes" : { - "ArithmeticError" : { - "bugType" : "EDGE_CASE", - "description" : "Some implementations of ECDSA have arithmetic errors that occur when intermediate results have extreme values. This test vector has been constructed to test such occurences.", - "cves" : [ + "notes": { + "ArithmeticError": { + "bugType": "EDGE_CASE", + "description": "Some implementations of ECDSA have arithmetic errors that occur when intermediate results have extreme values. This test vector has been constructed to test such occurences.", + "cves": [ "CVE-2017-18146" ] }, - "BerEncodedSignature" : { - "bugType" : "BER_ENCODING", - "description" : "ECDSA signatures are usually DER encoded. This signature contains valid values for r and s, but it uses alternative BER encoding.", - "effect" : "Accepting alternative BER encodings may be benign in some cases, or be an issue if protocol requires signature malleability.", - "cves" : [ + "BerEncodedSignature": { + "bugType": "BER_ENCODING", + "description": "ECDSA signatures are usually DER encoded. This signature contains valid values for r and s, but it uses alternative BER encoding.", + "effect": "Accepting alternative BER encodings may be benign in some cases, or be an issue if protocol requires signature malleability.", + "cves": [ "CVE-2020-14966", "CVE-2020-13822", "CVE-2019-14859", "CVE-2016-1000342" ] }, - "EdgeCasePublicKey" : { - "bugType" : "EDGE_CASE", - "description" : "The test vector uses a special case public key. " + "EdgeCasePublicKey": { + "bugType": "EDGE_CASE", + "description": "The test vector uses a special case public key. " }, - "EdgeCaseShamirMultiplication" : { - "bugType" : "EDGE_CASE", - "description" : "Shamir proposed a fast method for computing the sum of two scalar multiplications efficiently. This test vector has been constructed so that an intermediate result is the point at infinity if Shamir's method is used." + "EdgeCaseShamirMultiplication": { + "bugType": "EDGE_CASE", + "description": "Shamir proposed a fast method for computing the sum of two scalar multiplications efficiently. This test vector has been constructed so that an intermediate result is the point at infinity if Shamir's method is used." }, - "IntegerOverflow" : { - "bugType" : "CAN_OF_WORMS", - "description" : "The test vector contains an r and s that has been modified, so that the original value is restored if the implementation ignores the most significant bits.", - "effect" : "Without further analysis it is unclear if the modification can be used to forge signatures." + "IntegerOverflow": { + "bugType": "CAN_OF_WORMS", + "description": "The test vector contains an r and s that has been modified, so that the original value is restored if the implementation ignores the most significant bits.", + "effect": "Without further analysis it is unclear if the modification can be used to forge signatures." }, - "InvalidEncoding" : { - "bugType" : "CAN_OF_WORMS", - "description" : "ECDSA signatures are encoded using ASN.1. This test vector contains an incorrectly encoded signature. The test vector itself was generated from a valid signature by modifying its encoding.", - "effect" : "Without further analysis it is unclear if the modification can be used to forge signatures." + "InvalidEncoding": { + "bugType": "CAN_OF_WORMS", + "description": "ECDSA signatures are encoded using ASN.1. This test vector contains an incorrectly encoded signature. The test vector itself was generated from a valid signature by modifying its encoding.", + "effect": "Without further analysis it is unclear if the modification can be used to forge signatures." }, - "InvalidSignature" : { - "bugType" : "AUTH_BYPASS", - "description" : "The signature contains special case values such as r=0 and s=0. Buggy implementations may accept such values, if the implementation does not check boundaries and computes s^(-1) == 0.", - "effect" : "Accepting such signatures can have the effect that an adversary can forge signatures without even knowning the message to sign.", - "cves" : [ + "InvalidSignature": { + "bugType": "AUTH_BYPASS", + "description": "The signature contains special case values such as r=0 and s=0. Buggy implementations may accept such values, if the implementation does not check boundaries and computes s^(-1) == 0.", + "effect": "Accepting such signatures can have the effect that an adversary can forge signatures without even knowning the message to sign.", + "cves": [ "CVE-2022-21449", "CVE-2021-43572", "CVE-2022-24884" ] }, - "InvalidTypesInSignature" : { - "bugType" : "AUTH_BYPASS", - "description" : "The signature contains invalid types. Dynamic typed languages sometime coerce such values of different types into integers. If an implementation is careless and has additional bugs, such as not checking integer boundaries then it may be possible that such signatures are accepted.", - "effect" : "Accepting such signatures can have the effect that an adversary can forge signatures without even knowning the message to sign.", - "cves" : [ + "InvalidTypesInSignature": { + "bugType": "AUTH_BYPASS", + "description": "The signature contains invalid types. Dynamic typed languages sometime coerce such values of different types into integers. If an implementation is careless and has additional bugs, such as not checking integer boundaries then it may be possible that such signatures are accepted.", + "effect": "Accepting such signatures can have the effect that an adversary can forge signatures without even knowning the message to sign.", + "cves": [ "CVE-2022-21449" ] }, - "ModifiedInteger" : { - "bugType" : "CAN_OF_WORMS", - "description" : "The test vector contains an r and s that has been modified. The goal is to check for arithmetic errors.", - "effect" : "Without further analysis it is unclear if the modification can be used to forge signatures." + "ModifiedInteger": { + "bugType": "CAN_OF_WORMS", + "description": "The test vector contains an r and s that has been modified. The goal is to check for arithmetic errors.", + "effect": "Without further analysis it is unclear if the modification can be used to forge signatures." }, - "ModifiedSignature" : { - "bugType" : "CAN_OF_WORMS", - "description" : "The test vector contains an invalid signature that was generated from a valid signature by modifying it.", - "effect" : "Without further analysis it is unclear if the modification can be used to forge signatures." + "ModifiedSignature": { + "bugType": "CAN_OF_WORMS", + "description": "The test vector contains an invalid signature that was generated from a valid signature by modifying it.", + "effect": "Without further analysis it is unclear if the modification can be used to forge signatures." }, - "ModularInverse" : { - "bugType" : "EDGE_CASE", - "description" : "The test vectors contains a signature where computing the modular inverse of s hits an edge case.", - "effect" : "While the signature in this test vector is constructed and similar cases are unlikely to occur, it is important to determine if the underlying arithmetic error can be used to forge signatures.", - "cves" : [ + "ModularInverse": { + "bugType": "EDGE_CASE", + "description": "The test vectors contains a signature where computing the modular inverse of s hits an edge case.", + "effect": "While the signature in this test vector is constructed and similar cases are unlikely to occur, it is important to determine if the underlying arithmetic error can be used to forge signatures.", + "cves": [ "CVE-2019-0865" ] }, - "PointDuplication" : { - "bugType" : "EDGE_CASE", - "description" : "Some implementations of ECDSA do not handle duplication and points at infinity correctly. This is a test vector that has been specially crafted to check for such an omission.", - "cves" : [ + "PointDuplication": { + "bugType": "EDGE_CASE", + "description": "Some implementations of ECDSA do not handle duplication and points at infinity correctly. This is a test vector that has been specially crafted to check for such an omission.", + "cves": [ "2020-12607", "CVE-2015-2730" ] }, - "RangeCheck" : { - "bugType" : "CAN_OF_WORMS", - "description" : "The test vector contains an r and s that has been modified. By adding or subtracting the order of the group (or other values) the test vector checks whether signature verification verifies the range of r and s.", - "effect" : "Without further analysis it is unclear if the modification can be used to forge signatures." + "RangeCheck": { + "bugType": "CAN_OF_WORMS", + "description": "The test vector contains an r and s that has been modified. By adding or subtracting the order of the group (or other values) the test vector checks whether signature verification verifies the range of r and s.", + "effect": "Without further analysis it is unclear if the modification can be used to forge signatures." }, - "SignatureMalleabilityBitcoin" : { - "bugType" : "SIGNATURE_MALLEABILITY", - "description" : "\"BitCoins\"-curves are curves where signature malleability can be a serious issue. An implementation should only accept a signature s where s < n/2. If an implementation is not meant for uses cases that require signature malleability then this implemenation should be tested with another set of test vectors.", - "effect" : "In bitcoin exchanges, it may be used to make a double deposits or double withdrawals", - "links" : [ + "SignatureMalleabilityBitcoin": { + "bugType": "SIGNATURE_MALLEABILITY", + "description": "\"BitCoins\"-curves are curves where signature malleability can be a serious issue. An implementation should only accept a signature s where s < n/2. If an implementation is not meant for uses cases that require signature malleability then this implemenation should be tested with another set of test vectors.", + "effect": "In bitcoin exchanges, it may be used to make a double deposits or double withdrawals", + "links": [ "https://en.bitcoin.it/wiki/Transaction_malleability", "https://en.bitcoinwiki.org/wiki/Transaction_Malleability" ] }, - "SmallRandS" : { - "bugType" : "EDGE_CASE", - "description" : "The test vectors contains a signature where both r and s are small integers. Some libraries cannot verify such signatures.", - "effect" : "While the signature in this test vector is constructed and similar cases are unlikely to occur, it is important to determine if the underlying arithmetic error can be used to forge signatures.", - "cves" : [ + "SmallRandS": { + "bugType": "EDGE_CASE", + "description": "The test vectors contains a signature where both r and s are small integers. Some libraries cannot verify such signatures.", + "effect": "While the signature in this test vector is constructed and similar cases are unlikely to occur, it is important to determine if the underlying arithmetic error can be used to forge signatures.", + "cves": [ "2020-13895" ] }, - "SpecialCaseHash" : { - "bugType" : "EDGE_CASE", - "description" : "The test vector contains a signature where the hash of the message is a special case, e.g., contains a long run of 0 or 1 bits." + "SpecialCaseHash": { + "bugType": "EDGE_CASE", + "description": "The test vector contains a signature where the hash of the message is a special case, e.g., contains a long run of 0 or 1 bits." }, - "ValidSignature" : { - "bugType" : "BASIC", - "description" : "The test vector contains a valid signature that was generated pseudorandomly. Such signatures should not fail to verify unless some of the parameters (e.g. curve or hash function) are not supported." + "ValidSignature": { + "bugType": "BASIC", + "description": "The test vector contains a valid signature that was generated pseudorandomly. Such signatures should not fail to verify unless some of the parameters (e.g. curve or hash function) are not supported." } }, - "testGroups" : [ + "testGroups": [ { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04b838ff44e5bc177bf21189d0766082fc9d843226887fc9760371100b7ee20a6ff0c9d75bfba7b31a6bca1974496eeb56de357071955d83c4b1badaa0b21832e9", - "wx" : "00b838ff44e5bc177bf21189d0766082fc9d843226887fc9760371100b7ee20a6f", - "wy" : "00f0c9d75bfba7b31a6bca1974496eeb56de357071955d83c4b1badaa0b21832e9" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04b838ff44e5bc177bf21189d0766082fc9d843226887fc9760371100b7ee20a6ff0c9d75bfba7b31a6bca1974496eeb56de357071955d83c4b1badaa0b21832e9", + "wx": "00b838ff44e5bc177bf21189d0766082fc9d843226887fc9760371100b7ee20a6f", + "wy": "00f0c9d75bfba7b31a6bca1974496eeb56de357071955d83c4b1badaa0b21832e9" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004b838ff44e5bc177bf21189d0766082fc9d843226887fc9760371100b7ee20a6ff0c9d75bfba7b31a6bca1974496eeb56de357071955d83c4b1badaa0b21832e9", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEuDj/ROW8F3vyEYnQdmCC/J2EMiaIf8l2\nA3EQC37iCm/wyddb+6ezGmvKGXRJbutW3jVwcZVdg8Sxutqgshgy6Q==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004b838ff44e5bc177bf21189d0766082fc9d843226887fc9760371100b7ee20a6ff0c9d75bfba7b31a6bca1974496eeb56de357071955d83c4b1badaa0b21832e9", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEuDj/ROW8F3vyEYnQdmCC/J2EMiaIf8l2\nA3EQC37iCm/wyddb+6ezGmvKGXRJbutW3jVwcZVdg8Sxutqgshgy6Q==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 1, - "comment" : "Signature malleability", - "flags" : [ + "tcId": 1, + "comment": "Signature malleability", + "flags": [ "SignatureMalleabilityBitcoin" ], - "msg" : "313233343030", - "sig" : "3046022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365022100900e75ad233fcc908509dbff5922647db37c21f4afd3203ae8dc4ae7794b0f87", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365022100900e75ad233fcc908509dbff5922647db37c21f4afd3203ae8dc4ae7794b0f87", + "result": "invalid" }, { - "tcId" : 2, - "comment" : "valid", - "flags" : [ + "tcId": 2, + "comment": "valid", + "flags": [ "ValidSignature" ], - "msg" : "313233343030", - "sig" : "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "valid" + "msg": "313233343030", + "sig": "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "valid" }, { - "tcId" : 3, - "comment" : "length of sequence [r, s] uses long form encoding", - "flags" : [ + "tcId": 3, + "comment": "length of sequence [r, s] uses long form encoding", + "flags": [ "BerEncodedSignature" ], - "msg" : "313233343030", - "sig" : "308145022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "308145022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 4, - "comment" : "length of sequence [r, s] contains a leading 0", - "flags" : [ + "tcId": 4, + "comment": "length of sequence [r, s] contains a leading 0", + "flags": [ "BerEncodedSignature" ], - "msg" : "313233343030", - "sig" : "30820045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "30820045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 5, - "comment" : "length of sequence [r, s] uses 70 instead of 69", - "flags" : [ + "tcId": 5, + "comment": "length of sequence [r, s] uses 70 instead of 69", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3046022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 6, - "comment" : "length of sequence [r, s] uses 68 instead of 69", - "flags" : [ + "tcId": 6, + "comment": "length of sequence [r, s] uses 68 instead of 69", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3044022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3044022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 7, - "comment" : "uint32 overflow in length of sequence [r, s]", - "flags" : [ + "tcId": 7, + "comment": "uint32 overflow in length of sequence [r, s]", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "30850100000045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "30850100000045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 8, - "comment" : "uint64 overflow in length of sequence [r, s]", - "flags" : [ + "tcId": 8, + "comment": "uint64 overflow in length of sequence [r, s]", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3089010000000000000045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3089010000000000000045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 9, - "comment" : "length of sequence [r, s] = 2**31 - 1", - "flags" : [ + "tcId": 9, + "comment": "length of sequence [r, s] = 2**31 - 1", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "30847fffffff022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "30847fffffff022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 10, - "comment" : "length of sequence [r, s] = 2**31", - "flags" : [ + "tcId": 10, + "comment": "length of sequence [r, s] = 2**31", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "308480000000022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "308480000000022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 11, - "comment" : "length of sequence [r, s] = 2**32 - 1", - "flags" : [ + "tcId": 11, + "comment": "length of sequence [r, s] = 2**32 - 1", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3084ffffffff022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3084ffffffff022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 12, - "comment" : "length of sequence [r, s] = 2**40 - 1", - "flags" : [ + "tcId": 12, + "comment": "length of sequence [r, s] = 2**40 - 1", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3085ffffffffff022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3085ffffffffff022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 13, - "comment" : "length of sequence [r, s] = 2**64 - 1", - "flags" : [ + "tcId": 13, + "comment": "length of sequence [r, s] = 2**64 - 1", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3088ffffffffffffffff022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3088ffffffffffffffff022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 14, - "comment" : "incorrect length of sequence [r, s]", - "flags" : [ + "tcId": 14, + "comment": "incorrect length of sequence [r, s]", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "30ff022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "30ff022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 15, - "comment" : "replaced sequence [r, s] by an indefinite length tag without termination", - "flags" : [ + "tcId": 15, + "comment": "replaced sequence [r, s] by an indefinite length tag without termination", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3080022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3080022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 16, - "comment" : "removing sequence [r, s]", - "flags" : [ + "tcId": 16, + "comment": "removing sequence [r, s]", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "", - "result" : "invalid" + "msg": "313233343030", + "sig": "", + "result": "invalid" }, { - "tcId" : 17, - "comment" : "lonely sequence tag", - "flags" : [ + "tcId": 17, + "comment": "lonely sequence tag", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "30", - "result" : "invalid" + "msg": "313233343030", + "sig": "30", + "result": "invalid" }, { - "tcId" : 18, - "comment" : "appending 0's to sequence [r, s]", - "flags" : [ + "tcId": 18, + "comment": "appending 0's to sequence [r, s]", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", - "result" : "invalid" + "msg": "313233343030", + "sig": "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", + "result": "invalid" }, { - "tcId" : 19, - "comment" : "prepending 0's to sequence [r, s]", - "flags" : [ + "tcId": 19, + "comment": "prepending 0's to sequence [r, s]", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "30470000022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "30470000022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 20, - "comment" : "appending unused 0's to sequence [r, s]", - "flags" : [ + "tcId": 20, + "comment": "appending unused 0's to sequence [r, s]", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", - "result" : "invalid" + "msg": "313233343030", + "sig": "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", + "result": "invalid" }, { - "tcId" : 21, - "comment" : "appending null value to sequence [r, s]", - "flags" : [ + "tcId": 21, + "comment": "appending null value to sequence [r, s]", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0500", - "result" : "invalid" + "msg": "313233343030", + "sig": "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0500", + "result": "invalid" }, { - "tcId" : 22, - "comment" : "prepending garbage to sequence [r, s]", - "flags" : [ + "tcId": 22, + "comment": "prepending garbage to sequence [r, s]", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "304a4981773045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304a4981773045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 23, - "comment" : "prepending garbage to sequence [r, s]", - "flags" : [ + "tcId": 23, + "comment": "prepending garbage to sequence [r, s]", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "304925003045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304925003045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 24, - "comment" : "appending garbage to sequence [r, s]", - "flags" : [ + "tcId": 24, + "comment": "appending garbage to sequence [r, s]", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "30473045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0004deadbeef", - "result" : "invalid" + "msg": "313233343030", + "sig": "30473045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0004deadbeef", + "result": "invalid" }, { - "tcId" : 25, - "comment" : "including undefined tags", - "flags" : [ + "tcId": 25, + "comment": "including undefined tags", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "304daa00bb00cd003045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304daa00bb00cd003045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 26, - "comment" : "including undefined tags", - "flags" : [ + "tcId": 26, + "comment": "including undefined tags", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "304d2229aa00bb00cd00022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304d2229aa00bb00cd00022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 27, - "comment" : "including undefined tags", - "flags" : [ + "tcId": 27, + "comment": "including undefined tags", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "304d022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323652228aa00bb00cd0002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304d022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323652228aa00bb00cd0002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 28, - "comment" : "truncated length of sequence [r, s]", - "flags" : [ + "tcId": 28, + "comment": "truncated length of sequence [r, s]", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3081", - "result" : "invalid" + "msg": "313233343030", + "sig": "3081", + "result": "invalid" }, { - "tcId" : 29, - "comment" : "including undefined tags to sequence [r, s]", - "flags" : [ + "tcId": 29, + "comment": "including undefined tags to sequence [r, s]", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "304baa02aabb3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304baa02aabb3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 30, - "comment" : "using composition with indefinite length for sequence [r, s]", - "flags" : [ + "tcId": 30, + "comment": "using composition with indefinite length for sequence [r, s]", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "30803045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", - "result" : "invalid" + "msg": "313233343030", + "sig": "30803045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", + "result": "invalid" }, { - "tcId" : 31, - "comment" : "using composition with wrong tag for sequence [r, s]", - "flags" : [ + "tcId": 31, + "comment": "using composition with wrong tag for sequence [r, s]", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "30803145022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", - "result" : "invalid" + "msg": "313233343030", + "sig": "30803145022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", + "result": "invalid" }, { - "tcId" : 32, - "comment" : "Replacing sequence [r, s] with NULL", - "flags" : [ + "tcId": 32, + "comment": "Replacing sequence [r, s] with NULL", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "0500", - "result" : "invalid" + "msg": "313233343030", + "sig": "0500", + "result": "invalid" }, { - "tcId" : 33, - "comment" : "changing tag value of sequence [r, s]", - "flags" : [ + "tcId": 33, + "comment": "changing tag value of sequence [r, s]", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "2e45022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "2e45022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 34, - "comment" : "changing tag value of sequence [r, s]", - "flags" : [ + "tcId": 34, + "comment": "changing tag value of sequence [r, s]", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "2f45022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "2f45022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 35, - "comment" : "changing tag value of sequence [r, s]", - "flags" : [ + "tcId": 35, + "comment": "changing tag value of sequence [r, s]", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3145022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3145022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 36, - "comment" : "changing tag value of sequence [r, s]", - "flags" : [ + "tcId": 36, + "comment": "changing tag value of sequence [r, s]", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3245022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3245022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 37, - "comment" : "changing tag value of sequence [r, s]", - "flags" : [ + "tcId": 37, + "comment": "changing tag value of sequence [r, s]", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "ff45022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "ff45022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 38, - "comment" : "dropping value of sequence [r, s]", - "flags" : [ + "tcId": 38, + "comment": "dropping value of sequence [r, s]", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3000", - "result" : "invalid" + "msg": "313233343030", + "sig": "3000", + "result": "invalid" }, { - "tcId" : 39, - "comment" : "using composition for sequence [r, s]", - "flags" : [ + "tcId": 39, + "comment": "using composition for sequence [r, s]", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "304930010230442100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304930010230442100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 40, - "comment" : "truncated sequence [r, s]", - "flags" : [ + "tcId": 40, + "comment": "truncated sequence [r, s]", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3044022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31", - "result" : "invalid" + "msg": "313233343030", + "sig": "3044022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31", + "result": "invalid" }, { - "tcId" : 41, - "comment" : "truncated sequence [r, s]", - "flags" : [ + "tcId": 41, + "comment": "truncated sequence [r, s]", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "30442100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "30442100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 42, - "comment" : "sequence [r, s] of size 4166 to check for overflows", - "flags" : [ + "tcId": 42, + "comment": "sequence [r, s] of size 4166 to check for overflows", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "30821046022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "result" : "invalid" + "msg": "313233343030", + "sig": "30821046022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "result": "invalid" }, { - "tcId" : 43, - "comment" : "indefinite length", - "flags" : [ + "tcId": 43, + "comment": "indefinite length", + "flags": [ "BerEncodedSignature" ], - "msg" : "313233343030", - "sig" : "3080022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", - "result" : "invalid" + "msg": "313233343030", + "sig": "3080022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", + "result": "invalid" }, { - "tcId" : 44, - "comment" : "indefinite length with truncated delimiter", - "flags" : [ + "tcId": 44, + "comment": "indefinite length with truncated delimiter", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3080022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba00", - "result" : "invalid" + "msg": "313233343030", + "sig": "3080022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba00", + "result": "invalid" }, { - "tcId" : 45, - "comment" : "indefinite length with additional element", - "flags" : [ + "tcId": 45, + "comment": "indefinite length with additional element", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3080022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba05000000", - "result" : "invalid" + "msg": "313233343030", + "sig": "3080022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba05000000", + "result": "invalid" }, { - "tcId" : 46, - "comment" : "indefinite length with truncated element", - "flags" : [ + "tcId": 46, + "comment": "indefinite length with truncated element", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3080022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba060811220000", - "result" : "invalid" + "msg": "313233343030", + "sig": "3080022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba060811220000", + "result": "invalid" }, { - "tcId" : 47, - "comment" : "indefinite length with garbage", - "flags" : [ + "tcId": 47, + "comment": "indefinite length with garbage", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3080022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000fe02beef", - "result" : "invalid" + "msg": "313233343030", + "sig": "3080022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000fe02beef", + "result": "invalid" }, { - "tcId" : 48, - "comment" : "indefinite length with nonempty EOC", - "flags" : [ + "tcId": 48, + "comment": "indefinite length with nonempty EOC", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3080022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0002beef", - "result" : "invalid" + "msg": "313233343030", + "sig": "3080022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0002beef", + "result": "invalid" }, { - "tcId" : 49, - "comment" : "prepend empty sequence", - "flags" : [ + "tcId": 49, + "comment": "prepend empty sequence", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "30473000022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "30473000022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 50, - "comment" : "append empty sequence", - "flags" : [ + "tcId": 50, + "comment": "append empty sequence", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba3000", - "result" : "invalid" + "msg": "313233343030", + "sig": "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba3000", + "result": "invalid" }, { - "tcId" : 51, - "comment" : "append zero", - "flags" : [ + "tcId": 51, + "comment": "append zero", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3048022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba020100", - "result" : "invalid" + "msg": "313233343030", + "sig": "3048022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba020100", + "result": "invalid" }, { - "tcId" : 52, - "comment" : "append garbage with high tag number", - "flags" : [ + "tcId": 52, + "comment": "append garbage with high tag number", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3048022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31babf7f00", - "result" : "invalid" + "msg": "313233343030", + "sig": "3048022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31babf7f00", + "result": "invalid" }, { - "tcId" : 53, - "comment" : "append null with explicit tag", - "flags" : [ + "tcId": 53, + "comment": "append null with explicit tag", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31baa0020500", - "result" : "invalid" + "msg": "313233343030", + "sig": "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31baa0020500", + "result": "invalid" }, { - "tcId" : 54, - "comment" : "append null with implicit tag", - "flags" : [ + "tcId": 54, + "comment": "append null with implicit tag", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31baa000", - "result" : "invalid" + "msg": "313233343030", + "sig": "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31baa000", + "result": "invalid" }, { - "tcId" : 55, - "comment" : "sequence of sequence", - "flags" : [ + "tcId": 55, + "comment": "sequence of sequence", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "30473045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "30473045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 56, - "comment" : "truncated sequence: removed last 1 elements", - "flags" : [ + "tcId": 56, + "comment": "truncated sequence: removed last 1 elements", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3023022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365", - "result" : "invalid" + "msg": "313233343030", + "sig": "3023022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365", + "result": "invalid" }, { - "tcId" : 57, - "comment" : "repeating element in sequence", - "flags" : [ + "tcId": 57, + "comment": "repeating element in sequence", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3067022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3067022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 58, - "comment" : "flipped bit 0 in r", - "flags" : [ + "tcId": 58, + "comment": "flipped bit 0 in r", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "304300813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236402206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304300813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236402206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 59, - "comment" : "flipped bit 32 in r", - "flags" : [ + "tcId": 59, + "comment": "flipped bit 32 in r", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "304300813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccac983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304300813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccac983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 60, - "comment" : "flipped bit 48 in r", - "flags" : [ + "tcId": 60, + "comment": "flipped bit 48 in r", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "304300813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5133ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304300813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5133ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 61, - "comment" : "flipped bit 64 in r", - "flags" : [ + "tcId": 61, + "comment": "flipped bit 64 in r", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "304300813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc08b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304300813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc08b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 62, - "comment" : "length of r uses long form encoding", - "flags" : [ + "tcId": 62, + "comment": "length of r uses long form encoding", + "flags": [ "BerEncodedSignature" ], - "msg" : "313233343030", - "sig" : "304602812100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304602812100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 63, - "comment" : "length of r contains a leading 0", - "flags" : [ + "tcId": 63, + "comment": "length of r contains a leading 0", + "flags": [ "BerEncodedSignature" ], - "msg" : "313233343030", - "sig" : "30470282002100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "30470282002100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 64, - "comment" : "length of r uses 34 instead of 33", - "flags" : [ + "tcId": 64, + "comment": "length of r uses 34 instead of 33", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3045022200813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3045022200813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 65, - "comment" : "length of r uses 32 instead of 33", - "flags" : [ + "tcId": 65, + "comment": "length of r uses 32 instead of 33", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3045022000813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3045022000813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 66, - "comment" : "uint32 overflow in length of r", - "flags" : [ + "tcId": 66, + "comment": "uint32 overflow in length of r", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "304a0285010000002100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304a0285010000002100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 67, - "comment" : "uint64 overflow in length of r", - "flags" : [ + "tcId": 67, + "comment": "uint64 overflow in length of r", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "304e028901000000000000002100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304e028901000000000000002100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 68, - "comment" : "length of r = 2**31 - 1", - "flags" : [ + "tcId": 68, + "comment": "length of r = 2**31 - 1", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "304902847fffffff00813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304902847fffffff00813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 69, - "comment" : "length of r = 2**31", - "flags" : [ + "tcId": 69, + "comment": "length of r = 2**31", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "304902848000000000813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304902848000000000813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 70, - "comment" : "length of r = 2**32 - 1", - "flags" : [ + "tcId": 70, + "comment": "length of r = 2**32 - 1", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "30490284ffffffff00813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "30490284ffffffff00813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 71, - "comment" : "length of r = 2**40 - 1", - "flags" : [ + "tcId": 71, + "comment": "length of r = 2**40 - 1", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "304a0285ffffffffff00813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304a0285ffffffffff00813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 72, - "comment" : "length of r = 2**64 - 1", - "flags" : [ + "tcId": 72, + "comment": "length of r = 2**64 - 1", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "304d0288ffffffffffffffff00813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304d0288ffffffffffffffff00813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 73, - "comment" : "incorrect length of r", - "flags" : [ + "tcId": 73, + "comment": "incorrect length of r", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "304502ff00813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304502ff00813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 74, - "comment" : "replaced r by an indefinite length tag without termination", - "flags" : [ + "tcId": 74, + "comment": "replaced r by an indefinite length tag without termination", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3045028000813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3045028000813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 75, - "comment" : "removing r", - "flags" : [ + "tcId": 75, + "comment": "removing r", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "302202206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "302202206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 76, - "comment" : "lonely integer tag", - "flags" : [ + "tcId": 76, + "comment": "lonely integer tag", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "30230202206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "30230202206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 77, - "comment" : "lonely integer tag", - "flags" : [ + "tcId": 77, + "comment": "lonely integer tag", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3024022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502", - "result" : "invalid" + "msg": "313233343030", + "sig": "3024022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502", + "result": "invalid" }, { - "tcId" : 78, - "comment" : "appending 0's to r", - "flags" : [ + "tcId": 78, + "comment": "appending 0's to r", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3047022300813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365000002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3047022300813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365000002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 79, - "comment" : "prepending 0's to r", - "flags" : [ + "tcId": 79, + "comment": "prepending 0's to r", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "30470223000000813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "30470223000000813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 80, - "comment" : "appending unused 0's to r", - "flags" : [ + "tcId": 80, + "comment": "appending unused 0's to r", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365000002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365000002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 81, - "comment" : "appending null value to r", - "flags" : [ + "tcId": 81, + "comment": "appending null value to r", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3047022300813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365050002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3047022300813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365050002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 82, - "comment" : "prepending garbage to r", - "flags" : [ + "tcId": 82, + "comment": "prepending garbage to r", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "304a2226498177022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304a2226498177022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 83, - "comment" : "prepending garbage to r", - "flags" : [ + "tcId": 83, + "comment": "prepending garbage to r", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "304922252500022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304922252500022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 84, - "comment" : "appending garbage to r", - "flags" : [ + "tcId": 84, + "comment": "appending garbage to r", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "304d2223022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650004deadbeef02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304d2223022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650004deadbeef02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 85, - "comment" : "truncated length of r", - "flags" : [ + "tcId": 85, + "comment": "truncated length of r", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3024028102206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3024028102206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 86, - "comment" : "including undefined tags to r", - "flags" : [ + "tcId": 86, + "comment": "including undefined tags to r", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "304b2227aa02aabb022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304b2227aa02aabb022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 87, - "comment" : "using composition with indefinite length for r", - "flags" : [ + "tcId": 87, + "comment": "using composition with indefinite length for r", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "30492280022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365000002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "30492280022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365000002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 88, - "comment" : "using composition with wrong tag for r", - "flags" : [ + "tcId": 88, + "comment": "using composition with wrong tag for r", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "30492280032100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365000002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "30492280032100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365000002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 89, - "comment" : "Replacing r with NULL", - "flags" : [ + "tcId": 89, + "comment": "Replacing r with NULL", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3024050002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3024050002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 90, - "comment" : "changing tag value of r", - "flags" : [ + "tcId": 90, + "comment": "changing tag value of r", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3045002100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3045002100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 91, - "comment" : "changing tag value of r", - "flags" : [ + "tcId": 91, + "comment": "changing tag value of r", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3045012100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3045012100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 92, - "comment" : "changing tag value of r", - "flags" : [ + "tcId": 92, + "comment": "changing tag value of r", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3045032100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3045032100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 93, - "comment" : "changing tag value of r", - "flags" : [ + "tcId": 93, + "comment": "changing tag value of r", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3045042100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3045042100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 94, - "comment" : "changing tag value of r", - "flags" : [ + "tcId": 94, + "comment": "changing tag value of r", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3045ff2100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3045ff2100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 95, - "comment" : "dropping value of r", - "flags" : [ + "tcId": 95, + "comment": "dropping value of r", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3024020002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3024020002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 96, - "comment" : "using composition for r", - "flags" : [ + "tcId": 96, + "comment": "using composition for r", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "304922250201000220813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304922250201000220813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 97, - "comment" : "modifying first byte of r", - "flags" : [ + "tcId": 97, + "comment": "modifying first byte of r", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3045022102813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3045022102813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 98, - "comment" : "modifying last byte of r", - "flags" : [ + "tcId": 98, + "comment": "modifying last byte of r", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323e502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323e502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 99, - "comment" : "truncated r", - "flags" : [ + "tcId": 99, + "comment": "truncated r", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3044022000813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832302206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3044022000813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832302206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 100, - "comment" : "truncated r", - "flags" : [ + "tcId": 100, + "comment": "truncated r", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "30440220813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "30440220813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 101, - "comment" : "r of size 4130 to check for overflows", - "flags" : [ + "tcId": 101, + "comment": "r of size 4130 to check for overflows", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "308210480282102200813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "308210480282102200813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 102, - "comment" : "leading ff in r", - "flags" : [ + "tcId": 102, + "comment": "leading ff in r", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "30460222ff00813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "30460222ff00813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 103, - "comment" : "replaced r by infinity", - "flags" : [ + "tcId": 103, + "comment": "replaced r by infinity", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "302509018002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "302509018002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 104, - "comment" : "replacing r with zero", - "flags" : [ + "tcId": 104, + "comment": "replacing r with zero", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "302502010002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "302502010002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 105, - "comment" : "flipped bit 0 in s", - "flags" : [ + "tcId": 105, + "comment": "flipped bit 0 in s", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3043022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323656ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31bb", - "result" : "invalid" + "msg": "313233343030", + "sig": "3043022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323656ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31bb", + "result": "invalid" }, { - "tcId" : 106, - "comment" : "flipped bit 32 in s", - "flags" : [ + "tcId": 106, + "comment": "flipped bit 32 in s", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3043022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323656ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a456eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3043022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323656ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a456eb31ba", + "result": "invalid" }, { - "tcId" : 107, - "comment" : "flipped bit 48 in s", - "flags" : [ + "tcId": 107, + "comment": "flipped bit 48 in s", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3043022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323656ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f713a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3043022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323656ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f713a556eb31ba", + "result": "invalid" }, { - "tcId" : 108, - "comment" : "flipped bit 64 in s", - "flags" : [ + "tcId": 108, + "comment": "flipped bit 64 in s", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3043022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323656ff18a52dcc0336f7af62400a6dd9b810732baf1ff758001d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3043022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323656ff18a52dcc0336f7af62400a6dd9b810732baf1ff758001d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 109, - "comment" : "length of s uses long form encoding", - "flags" : [ + "tcId": 109, + "comment": "length of s uses long form encoding", + "flags": [ "BerEncodedSignature" ], - "msg" : "313233343030", - "sig" : "3046022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650281206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650281206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 110, - "comment" : "length of s contains a leading 0", - "flags" : [ + "tcId": 110, + "comment": "length of s contains a leading 0", + "flags": [ "BerEncodedSignature" ], - "msg" : "313233343030", - "sig" : "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365028200206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365028200206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 111, - "comment" : "length of s uses 33 instead of 32", - "flags" : [ + "tcId": 111, + "comment": "length of s uses 33 instead of 32", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502216ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502216ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 112, - "comment" : "length of s uses 31 instead of 32", - "flags" : [ + "tcId": 112, + "comment": "length of s uses 31 instead of 32", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365021f6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365021f6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 113, - "comment" : "uint32 overflow in length of s", - "flags" : [ + "tcId": 113, + "comment": "uint32 overflow in length of s", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "304a022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365028501000000206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304a022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365028501000000206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 114, - "comment" : "uint64 overflow in length of s", - "flags" : [ + "tcId": 114, + "comment": "uint64 overflow in length of s", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "304e022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502890100000000000000206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304e022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502890100000000000000206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 115, - "comment" : "length of s = 2**31 - 1", - "flags" : [ + "tcId": 115, + "comment": "length of s = 2**31 - 1", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502847fffffff6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502847fffffff6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 116, - "comment" : "length of s = 2**31", - "flags" : [ + "tcId": 116, + "comment": "length of s = 2**31", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650284800000006ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650284800000006ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 117, - "comment" : "length of s = 2**32 - 1", - "flags" : [ + "tcId": 117, + "comment": "length of s = 2**32 - 1", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650284ffffffff6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650284ffffffff6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 118, - "comment" : "length of s = 2**40 - 1", - "flags" : [ + "tcId": 118, + "comment": "length of s = 2**40 - 1", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "304a022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650285ffffffffff6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304a022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650285ffffffffff6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 119, - "comment" : "length of s = 2**64 - 1", - "flags" : [ + "tcId": 119, + "comment": "length of s = 2**64 - 1", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "304d022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650288ffffffffffffffff6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304d022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650288ffffffffffffffff6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 120, - "comment" : "incorrect length of s", - "flags" : [ + "tcId": 120, + "comment": "incorrect length of s", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502ff6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502ff6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 121, - "comment" : "replaced s by an indefinite length tag without termination", - "flags" : [ + "tcId": 121, + "comment": "replaced s by an indefinite length tag without termination", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502806ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502806ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 122, - "comment" : "appending 0's to s", - "flags" : [ + "tcId": 122, + "comment": "appending 0's to s", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502226ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", - "result" : "invalid" + "msg": "313233343030", + "sig": "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502226ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", + "result": "invalid" }, { - "tcId" : 123, - "comment" : "prepending 0's to s", - "flags" : [ + "tcId": 123, + "comment": "prepending 0's to s", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365022200006ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365022200006ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 124, - "comment" : "appending null value to s", - "flags" : [ + "tcId": 124, + "comment": "appending null value to s", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502226ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0500", - "result" : "invalid" + "msg": "313233343030", + "sig": "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502226ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0500", + "result": "invalid" }, { - "tcId" : 125, - "comment" : "prepending garbage to s", - "flags" : [ + "tcId": 125, + "comment": "prepending garbage to s", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "304a022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365222549817702206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304a022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365222549817702206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 126, - "comment" : "prepending garbage to s", - "flags" : [ + "tcId": 126, + "comment": "prepending garbage to s", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323652224250002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323652224250002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 127, - "comment" : "appending garbage to s", - "flags" : [ + "tcId": 127, + "comment": "appending garbage to s", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "304d022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365222202206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0004deadbeef", - "result" : "invalid" + "msg": "313233343030", + "sig": "304d022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365222202206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0004deadbeef", + "result": "invalid" }, { - "tcId" : 128, - "comment" : "truncated length of s", - "flags" : [ + "tcId": 128, + "comment": "truncated length of s", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3025022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650281", - "result" : "invalid" + "msg": "313233343030", + "sig": "3025022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650281", + "result": "invalid" }, { - "tcId" : 129, - "comment" : "including undefined tags to s", - "flags" : [ + "tcId": 129, + "comment": "including undefined tags to s", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "304b022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323652226aa02aabb02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304b022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323652226aa02aabb02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 130, - "comment" : "using composition with indefinite length for s", - "flags" : [ + "tcId": 130, + "comment": "using composition with indefinite length for s", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365228002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", - "result" : "invalid" + "msg": "313233343030", + "sig": "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365228002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", + "result": "invalid" }, { - "tcId" : 131, - "comment" : "using composition with wrong tag for s", - "flags" : [ + "tcId": 131, + "comment": "using composition with wrong tag for s", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365228003206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", - "result" : "invalid" + "msg": "313233343030", + "sig": "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365228003206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", + "result": "invalid" }, { - "tcId" : 132, - "comment" : "Replacing s with NULL", - "flags" : [ + "tcId": 132, + "comment": "Replacing s with NULL", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3025022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650500", - "result" : "invalid" + "msg": "313233343030", + "sig": "3025022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650500", + "result": "invalid" }, { - "tcId" : 133, - "comment" : "changing tag value of s", - "flags" : [ + "tcId": 133, + "comment": "changing tag value of s", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236500206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236500206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 134, - "comment" : "changing tag value of s", - "flags" : [ + "tcId": 134, + "comment": "changing tag value of s", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236501206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236501206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 135, - "comment" : "changing tag value of s", - "flags" : [ + "tcId": 135, + "comment": "changing tag value of s", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236503206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236503206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 136, - "comment" : "changing tag value of s", - "flags" : [ + "tcId": 136, + "comment": "changing tag value of s", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236504206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236504206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 137, - "comment" : "changing tag value of s", - "flags" : [ + "tcId": 137, + "comment": "changing tag value of s", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365ff206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365ff206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 138, - "comment" : "dropping value of s", - "flags" : [ + "tcId": 138, + "comment": "dropping value of s", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3025022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650200", - "result" : "invalid" + "msg": "313233343030", + "sig": "3025022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650200", + "result": "invalid" }, { - "tcId" : 139, - "comment" : "using composition for s", - "flags" : [ + "tcId": 139, + "comment": "using composition for s", + "flags": [ "InvalidEncoding" ], - "msg" : "313233343030", - "sig" : "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365222402016f021ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365222402016f021ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 140, - "comment" : "modifying first byte of s", - "flags" : [ + "tcId": 140, + "comment": "modifying first byte of s", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206df18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206df18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 141, - "comment" : "modifying last byte of s", - "flags" : [ + "tcId": 141, + "comment": "modifying last byte of s", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb313a", - "result" : "invalid" + "msg": "313233343030", + "sig": "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb313a", + "result": "invalid" }, { - "tcId" : 142, - "comment" : "truncated s", - "flags" : [ + "tcId": 142, + "comment": "truncated s", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3044022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365021f6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31", - "result" : "invalid" + "msg": "313233343030", + "sig": "3044022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365021f6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31", + "result": "invalid" }, { - "tcId" : 143, - "comment" : "truncated s", - "flags" : [ + "tcId": 143, + "comment": "truncated s", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3044022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365021ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3044022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365021ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 144, - "comment" : "s of size 4129 to check for overflows", - "flags" : [ + "tcId": 144, + "comment": "s of size 4129 to check for overflows", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "30821048022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365028210216ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "result" : "invalid" + "msg": "313233343030", + "sig": "30821048022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365028210216ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "result": "invalid" }, { - "tcId" : 145, - "comment" : "leading ff in s", - "flags" : [ + "tcId": 145, + "comment": "leading ff in s", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3046022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650221ff6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650221ff6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 146, - "comment" : "replaced s by infinity", - "flags" : [ + "tcId": 146, + "comment": "replaced s by infinity", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3026022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365090180", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365090180", + "result": "invalid" }, { - "tcId" : 147, - "comment" : "replacing s with zero", - "flags" : [ + "tcId": 147, + "comment": "replacing s with zero", + "flags": [ "ModifiedSignature" ], - "msg" : "313233343030", - "sig" : "3026022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365020100", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365020100", + "result": "invalid" }, { - "tcId" : 148, - "comment" : "replaced r by r + n", - "flags" : [ + "tcId": 148, + "comment": "replaced r by r + n", + "flags": [ "RangeCheck" ], - "msg" : "313233343030", - "sig" : "3045022101813ef79ccefa9a56f7ba805f0e478583b90deabca4b05c4574e49b5899b964a602206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3045022101813ef79ccefa9a56f7ba805f0e478583b90deabca4b05c4574e49b5899b964a602206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 149, - "comment" : "replaced r by r - n", - "flags" : [ + "tcId": 149, + "comment": "replaced r by r - n", + "flags": [ "RangeCheck" ], - "msg" : "313233343030", - "sig" : "30440220813ef79ccefa9a56f7ba805f0e47858643b030ef461f1bcdf53fde3ef94ce22402206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "30440220813ef79ccefa9a56f7ba805f0e47858643b030ef461f1bcdf53fde3ef94ce22402206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 150, - "comment" : "replaced r by r + 256 * n", - "flags" : [ + "tcId": 150, + "comment": "replaced r by r + 256 * n", + "flags": [ "RangeCheck" ], - "msg" : "313233343030", - "sig" : "304602220100813ef79ccefa9a56f7ba805f0e47843fad3bf4853e07f7c98770c99bffc4646502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304602220100813ef79ccefa9a56f7ba805f0e47843fad3bf4853e07f7c98770c99bffc4646502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 151, - "comment" : "replaced r by -r", - "flags" : [ + "tcId": 151, + "comment": "replaced r by -r", + "flags": [ "ModifiedInteger" ], - "msg" : "313233343030", - "sig" : "30450221ff7ec10863310565a908457fa0f1b87a7b01a0f22a0a9843f64aedc334367cdc9b02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "30450221ff7ec10863310565a908457fa0f1b87a7b01a0f22a0a9843f64aedc334367cdc9b02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 152, - "comment" : "replaced r by n - r", - "flags" : [ + "tcId": 152, + "comment": "replaced r by n - r", + "flags": [ "ModifiedInteger" ], - "msg" : "313233343030", - "sig" : "304402207ec10863310565a908457fa0f1b87a79bc4fcf10b9e0e4320ac021c106b31ddc02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304402207ec10863310565a908457fa0f1b87a79bc4fcf10b9e0e4320ac021c106b31ddc02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 153, - "comment" : "replaced r by -n - r", - "flags" : [ + "tcId": 153, + "comment": "replaced r by -n - r", + "flags": [ "ModifiedInteger" ], - "msg" : "313233343030", - "sig" : "30450221fe7ec10863310565a908457fa0f1b87a7c46f215435b4fa3ba8b1b64a766469b5a02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "30450221fe7ec10863310565a908457fa0f1b87a7c46f215435b4fa3ba8b1b64a766469b5a02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 154, - "comment" : "replaced r by r + 2**256", - "flags" : [ + "tcId": 154, + "comment": "replaced r by r + 2**256", + "flags": [ "IntegerOverflow" ], - "msg" : "313233343030", - "sig" : "3045022101813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3045022101813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 155, - "comment" : "replaced r by r + 2**320", - "flags" : [ + "tcId": 155, + "comment": "replaced r by r + 2**320", + "flags": [ "IntegerOverflow" ], - "msg" : "313233343030", - "sig" : "304d0229010000000000000000813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304d0229010000000000000000813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 156, - "comment" : "replaced s by s + n", - "flags" : [ + "tcId": 156, + "comment": "replaced s by s + n", + "flags": [ "RangeCheck" ], - "msg" : "313233343030", - "sig" : "30450221016ff18a52dcc0336f7af62400a6dd9b7fc1e197d8aebe203c96c87232272172fb02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "30450221016ff18a52dcc0336f7af62400a6dd9b7fc1e197d8aebe203c96c87232272172fb02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 157, - "comment" : "replaced s by s - n", - "flags" : [ + "tcId": 157, + "comment": "replaced s by s - n", + "flags": [ "RangeCheck" ], - "msg" : "313233343030", - "sig" : "30450221ff6ff18a52dcc0336f7af62400a6dd9b824c83de0b502cdfc51723b51886b4f07902206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "30450221ff6ff18a52dcc0336f7af62400a6dd9b824c83de0b502cdfc51723b51886b4f07902206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 158, - "comment" : "replaced s by s + 256 * n", - "flags" : [ + "tcId": 158, + "comment": "replaced s by s + 256 * n", + "flags": [ "RangeCheck" ], - "msg" : "313233343030", - "sig" : "3046022201006ff18a52dcc0336f7af62400a6dd9a3bb60fa1a14815bbc0a954a0758d2c72ba02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022201006ff18a52dcc0336f7af62400a6dd9a3bb60fa1a14815bbc0a954a0758d2c72ba02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 159, - "comment" : "replaced s by -s", - "flags" : [ + "tcId": 159, + "comment": "replaced s by -s", + "flags": [ "ModifiedInteger" ], - "msg" : "313233343030", - "sig" : "30440220900e75ad233fcc908509dbff5922647ef8cd450e008a7fff2909ec5aa914ce4602206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "30440220900e75ad233fcc908509dbff5922647ef8cd450e008a7fff2909ec5aa914ce4602206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 160, - "comment" : "replaced s by -n - s", - "flags" : [ + "tcId": 160, + "comment": "replaced s by -n - s", + "flags": [ "ModifiedInteger" ], - "msg" : "313233343030", - "sig" : "30450221fe900e75ad233fcc908509dbff592264803e1e68275141dfc369378dcdd8de8d0502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "30450221fe900e75ad233fcc908509dbff592264803e1e68275141dfc369378dcdd8de8d0502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 161, - "comment" : "replaced s by s + 2**256", - "flags" : [ + "tcId": 161, + "comment": "replaced s by s + 2**256", + "flags": [ "IntegerOverflow" ], - "msg" : "313233343030", - "sig" : "30450221016ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "30450221016ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 162, - "comment" : "replaced s by s - 2**256", - "flags" : [ + "tcId": 162, + "comment": "replaced s by s - 2**256", + "flags": [ "IntegerOverflow" ], - "msg" : "313233343030", - "sig" : "30450221ff6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "30450221ff6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 163, - "comment" : "replaced s by s + 2**320", - "flags" : [ + "tcId": 163, + "comment": "replaced s by s + 2**320", + "flags": [ "IntegerOverflow" ], - "msg" : "313233343030", - "sig" : "304d02290100000000000000006ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result" : "invalid" + "msg": "313233343030", + "sig": "304d02290100000000000000006ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result": "invalid" }, { - "tcId" : 164, - "comment" : "Signature with special case values r=0 and s=0", - "flags" : [ + "tcId": 164, + "comment": "Signature with special case values r=0 and s=0", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3006020100020100", - "result" : "invalid" + "msg": "313233343030", + "sig": "3006020100020100", + "result": "invalid" }, { - "tcId" : 165, - "comment" : "Signature with special case values r=0 and s=1", - "flags" : [ + "tcId": 165, + "comment": "Signature with special case values r=0 and s=1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3006020100020101", - "result" : "invalid" + "msg": "313233343030", + "sig": "3006020100020101", + "result": "invalid" }, { - "tcId" : 166, - "comment" : "Signature with special case values r=0 and s=-1", - "flags" : [ + "tcId": 166, + "comment": "Signature with special case values r=0 and s=-1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "30060201000201ff", - "result" : "invalid" + "msg": "313233343030", + "sig": "30060201000201ff", + "result": "invalid" }, { - "tcId" : 167, - "comment" : "Signature with special case values r=0 and s=n", - "flags" : [ + "tcId": 167, + "comment": "Signature with special case values r=0 and s=n", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3026020100022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026020100022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", + "result": "invalid" }, { - "tcId" : 168, - "comment" : "Signature with special case values r=0 and s=n - 1", - "flags" : [ + "tcId": 168, + "comment": "Signature with special case values r=0 and s=n - 1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3026020100022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026020100022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", + "result": "invalid" }, { - "tcId" : 169, - "comment" : "Signature with special case values r=0 and s=n + 1", - "flags" : [ + "tcId": 169, + "comment": "Signature with special case values r=0 and s=n + 1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3026020100022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026020100022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", + "result": "invalid" }, { - "tcId" : 170, - "comment" : "Signature with special case values r=0 and s=p", - "flags" : [ + "tcId": 170, + "comment": "Signature with special case values r=0 and s=p", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3026020100022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026020100022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + "result": "invalid" }, { - "tcId" : 171, - "comment" : "Signature with special case values r=0 and s=p + 1", - "flags" : [ + "tcId": 171, + "comment": "Signature with special case values r=0 and s=p + 1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3026020100022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026020100022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", + "result": "invalid" }, { - "tcId" : 172, - "comment" : "Signature with special case values r=1 and s=0", - "flags" : [ + "tcId": 172, + "comment": "Signature with special case values r=1 and s=0", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3006020101020100", - "result" : "invalid" + "msg": "313233343030", + "sig": "3006020101020100", + "result": "invalid" }, { - "tcId" : 173, - "comment" : "Signature with special case values r=1 and s=1", - "flags" : [ + "tcId": 173, + "comment": "Signature with special case values r=1 and s=1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3006020101020101", - "result" : "invalid" + "msg": "313233343030", + "sig": "3006020101020101", + "result": "invalid" }, { - "tcId" : 174, - "comment" : "Signature with special case values r=1 and s=-1", - "flags" : [ + "tcId": 174, + "comment": "Signature with special case values r=1 and s=-1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "30060201010201ff", - "result" : "invalid" + "msg": "313233343030", + "sig": "30060201010201ff", + "result": "invalid" }, { - "tcId" : 175, - "comment" : "Signature with special case values r=1 and s=n", - "flags" : [ + "tcId": 175, + "comment": "Signature with special case values r=1 and s=n", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3026020101022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026020101022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", + "result": "invalid" }, { - "tcId" : 176, - "comment" : "Signature with special case values r=1 and s=n - 1", - "flags" : [ + "tcId": 176, + "comment": "Signature with special case values r=1 and s=n - 1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3026020101022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026020101022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", + "result": "invalid" }, { - "tcId" : 177, - "comment" : "Signature with special case values r=1 and s=n + 1", - "flags" : [ + "tcId": 177, + "comment": "Signature with special case values r=1 and s=n + 1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3026020101022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026020101022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", + "result": "invalid" }, { - "tcId" : 178, - "comment" : "Signature with special case values r=1 and s=p", - "flags" : [ + "tcId": 178, + "comment": "Signature with special case values r=1 and s=p", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3026020101022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026020101022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + "result": "invalid" }, { - "tcId" : 179, - "comment" : "Signature with special case values r=1 and s=p + 1", - "flags" : [ + "tcId": 179, + "comment": "Signature with special case values r=1 and s=p + 1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3026020101022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026020101022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", + "result": "invalid" }, { - "tcId" : 180, - "comment" : "Signature with special case values r=-1 and s=0", - "flags" : [ + "tcId": 180, + "comment": "Signature with special case values r=-1 and s=0", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "30060201ff020100", - "result" : "invalid" + "msg": "313233343030", + "sig": "30060201ff020100", + "result": "invalid" }, { - "tcId" : 181, - "comment" : "Signature with special case values r=-1 and s=1", - "flags" : [ + "tcId": 181, + "comment": "Signature with special case values r=-1 and s=1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "30060201ff020101", - "result" : "invalid" + "msg": "313233343030", + "sig": "30060201ff020101", + "result": "invalid" }, { - "tcId" : 182, - "comment" : "Signature with special case values r=-1 and s=-1", - "flags" : [ + "tcId": 182, + "comment": "Signature with special case values r=-1 and s=-1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "30060201ff0201ff", - "result" : "invalid" + "msg": "313233343030", + "sig": "30060201ff0201ff", + "result": "invalid" }, { - "tcId" : 183, - "comment" : "Signature with special case values r=-1 and s=n", - "flags" : [ + "tcId": 183, + "comment": "Signature with special case values r=-1 and s=n", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "30260201ff022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", - "result" : "invalid" + "msg": "313233343030", + "sig": "30260201ff022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", + "result": "invalid" }, { - "tcId" : 184, - "comment" : "Signature with special case values r=-1 and s=n - 1", - "flags" : [ + "tcId": 184, + "comment": "Signature with special case values r=-1 and s=n - 1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "30260201ff022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", - "result" : "invalid" + "msg": "313233343030", + "sig": "30260201ff022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", + "result": "invalid" }, { - "tcId" : 185, - "comment" : "Signature with special case values r=-1 and s=n + 1", - "flags" : [ + "tcId": 185, + "comment": "Signature with special case values r=-1 and s=n + 1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "30260201ff022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", - "result" : "invalid" + "msg": "313233343030", + "sig": "30260201ff022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", + "result": "invalid" }, { - "tcId" : 186, - "comment" : "Signature with special case values r=-1 and s=p", - "flags" : [ + "tcId": 186, + "comment": "Signature with special case values r=-1 and s=p", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "30260201ff022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", - "result" : "invalid" + "msg": "313233343030", + "sig": "30260201ff022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + "result": "invalid" }, { - "tcId" : 187, - "comment" : "Signature with special case values r=-1 and s=p + 1", - "flags" : [ + "tcId": 187, + "comment": "Signature with special case values r=-1 and s=p + 1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "30260201ff022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", - "result" : "invalid" + "msg": "313233343030", + "sig": "30260201ff022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", + "result": "invalid" }, { - "tcId" : 188, - "comment" : "Signature with special case values r=n and s=0", - "flags" : [ + "tcId": 188, + "comment": "Signature with special case values r=n and s=0", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141020100", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141020100", + "result": "invalid" }, { - "tcId" : 189, - "comment" : "Signature with special case values r=n and s=1", - "flags" : [ + "tcId": 189, + "comment": "Signature with special case values r=n and s=1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141020101", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141020101", + "result": "invalid" }, { - "tcId" : 190, - "comment" : "Signature with special case values r=n and s=-1", - "flags" : [ + "tcId": 190, + "comment": "Signature with special case values r=n and s=-1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641410201ff", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641410201ff", + "result": "invalid" }, { - "tcId" : 191, - "comment" : "Signature with special case values r=n and s=n", - "flags" : [ + "tcId": 191, + "comment": "Signature with special case values r=n and s=n", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", + "result": "invalid" }, { - "tcId" : 192, - "comment" : "Signature with special case values r=n and s=n - 1", - "flags" : [ + "tcId": 192, + "comment": "Signature with special case values r=n and s=n - 1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", + "result": "invalid" }, { - "tcId" : 193, - "comment" : "Signature with special case values r=n and s=n + 1", - "flags" : [ + "tcId": 193, + "comment": "Signature with special case values r=n and s=n + 1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", + "result": "invalid" }, { - "tcId" : 194, - "comment" : "Signature with special case values r=n and s=p", - "flags" : [ + "tcId": 194, + "comment": "Signature with special case values r=n and s=p", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + "result": "invalid" }, { - "tcId" : 195, - "comment" : "Signature with special case values r=n and s=p + 1", - "flags" : [ + "tcId": 195, + "comment": "Signature with special case values r=n and s=p + 1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", + "result": "invalid" }, { - "tcId" : 196, - "comment" : "Signature with special case values r=n - 1 and s=0", - "flags" : [ + "tcId": 196, + "comment": "Signature with special case values r=n - 1 and s=0", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140020100", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140020100", + "result": "invalid" }, { - "tcId" : 197, - "comment" : "Signature with special case values r=n - 1 and s=1", - "flags" : [ + "tcId": 197, + "comment": "Signature with special case values r=n - 1 and s=1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140020101", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140020101", + "result": "invalid" }, { - "tcId" : 198, - "comment" : "Signature with special case values r=n - 1 and s=-1", - "flags" : [ + "tcId": 198, + "comment": "Signature with special case values r=n - 1 and s=-1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641400201ff", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641400201ff", + "result": "invalid" }, { - "tcId" : 199, - "comment" : "Signature with special case values r=n - 1 and s=n", - "flags" : [ + "tcId": 199, + "comment": "Signature with special case values r=n - 1 and s=n", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", + "result": "invalid" }, { - "tcId" : 200, - "comment" : "Signature with special case values r=n - 1 and s=n - 1", - "flags" : [ + "tcId": 200, + "comment": "Signature with special case values r=n - 1 and s=n - 1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", + "result": "invalid" }, { - "tcId" : 201, - "comment" : "Signature with special case values r=n - 1 and s=n + 1", - "flags" : [ + "tcId": 201, + "comment": "Signature with special case values r=n - 1 and s=n + 1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", + "result": "invalid" }, { - "tcId" : 202, - "comment" : "Signature with special case values r=n - 1 and s=p", - "flags" : [ + "tcId": 202, + "comment": "Signature with special case values r=n - 1 and s=p", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + "result": "invalid" }, { - "tcId" : 203, - "comment" : "Signature with special case values r=n - 1 and s=p + 1", - "flags" : [ + "tcId": 203, + "comment": "Signature with special case values r=n - 1 and s=p + 1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", + "result": "invalid" }, { - "tcId" : 204, - "comment" : "Signature with special case values r=n + 1 and s=0", - "flags" : [ + "tcId": 204, + "comment": "Signature with special case values r=n + 1 and s=0", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142020100", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142020100", + "result": "invalid" }, { - "tcId" : 205, - "comment" : "Signature with special case values r=n + 1 and s=1", - "flags" : [ + "tcId": 205, + "comment": "Signature with special case values r=n + 1 and s=1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142020101", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142020101", + "result": "invalid" }, { - "tcId" : 206, - "comment" : "Signature with special case values r=n + 1 and s=-1", - "flags" : [ + "tcId": 206, + "comment": "Signature with special case values r=n + 1 and s=-1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641420201ff", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641420201ff", + "result": "invalid" }, { - "tcId" : 207, - "comment" : "Signature with special case values r=n + 1 and s=n", - "flags" : [ + "tcId": 207, + "comment": "Signature with special case values r=n + 1 and s=n", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", + "result": "invalid" }, { - "tcId" : 208, - "comment" : "Signature with special case values r=n + 1 and s=n - 1", - "flags" : [ + "tcId": 208, + "comment": "Signature with special case values r=n + 1 and s=n - 1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", + "result": "invalid" }, { - "tcId" : 209, - "comment" : "Signature with special case values r=n + 1 and s=n + 1", - "flags" : [ + "tcId": 209, + "comment": "Signature with special case values r=n + 1 and s=n + 1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", + "result": "invalid" }, { - "tcId" : 210, - "comment" : "Signature with special case values r=n + 1 and s=p", - "flags" : [ + "tcId": 210, + "comment": "Signature with special case values r=n + 1 and s=p", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + "result": "invalid" }, { - "tcId" : 211, - "comment" : "Signature with special case values r=n + 1 and s=p + 1", - "flags" : [ + "tcId": 211, + "comment": "Signature with special case values r=n + 1 and s=p + 1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", + "result": "invalid" }, { - "tcId" : 212, - "comment" : "Signature with special case values r=p and s=0", - "flags" : [ + "tcId": 212, + "comment": "Signature with special case values r=p and s=0", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f020100", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f020100", + "result": "invalid" }, { - "tcId" : 213, - "comment" : "Signature with special case values r=p and s=1", - "flags" : [ + "tcId": 213, + "comment": "Signature with special case values r=p and s=1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f020101", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f020101", + "result": "invalid" }, { - "tcId" : 214, - "comment" : "Signature with special case values r=p and s=-1", - "flags" : [ + "tcId": 214, + "comment": "Signature with special case values r=p and s=-1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f0201ff", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f0201ff", + "result": "invalid" }, { - "tcId" : 215, - "comment" : "Signature with special case values r=p and s=n", - "flags" : [ + "tcId": 215, + "comment": "Signature with special case values r=p and s=n", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", + "result": "invalid" }, { - "tcId" : 216, - "comment" : "Signature with special case values r=p and s=n - 1", - "flags" : [ + "tcId": 216, + "comment": "Signature with special case values r=p and s=n - 1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", + "result": "invalid" }, { - "tcId" : 217, - "comment" : "Signature with special case values r=p and s=n + 1", - "flags" : [ + "tcId": 217, + "comment": "Signature with special case values r=p and s=n + 1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", + "result": "invalid" }, { - "tcId" : 218, - "comment" : "Signature with special case values r=p and s=p", - "flags" : [ + "tcId": 218, + "comment": "Signature with special case values r=p and s=p", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + "result": "invalid" }, { - "tcId" : 219, - "comment" : "Signature with special case values r=p and s=p + 1", - "flags" : [ + "tcId": 219, + "comment": "Signature with special case values r=p and s=p + 1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", + "result": "invalid" }, { - "tcId" : 220, - "comment" : "Signature with special case values r=p + 1 and s=0", - "flags" : [ + "tcId": 220, + "comment": "Signature with special case values r=p + 1 and s=0", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30020100", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30020100", + "result": "invalid" }, { - "tcId" : 221, - "comment" : "Signature with special case values r=p + 1 and s=1", - "flags" : [ + "tcId": 221, + "comment": "Signature with special case values r=p + 1 and s=1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30020101", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30020101", + "result": "invalid" }, { - "tcId" : 222, - "comment" : "Signature with special case values r=p + 1 and s=-1", - "flags" : [ + "tcId": 222, + "comment": "Signature with special case values r=p + 1 and s=-1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc300201ff", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc300201ff", + "result": "invalid" }, { - "tcId" : 223, - "comment" : "Signature with special case values r=p + 1 and s=n", - "flags" : [ + "tcId": 223, + "comment": "Signature with special case values r=p + 1 and s=n", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", + "result": "invalid" }, { - "tcId" : 224, - "comment" : "Signature with special case values r=p + 1 and s=n - 1", - "flags" : [ + "tcId": 224, + "comment": "Signature with special case values r=p + 1 and s=n - 1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", + "result": "invalid" }, { - "tcId" : 225, - "comment" : "Signature with special case values r=p + 1 and s=n + 1", - "flags" : [ + "tcId": 225, + "comment": "Signature with special case values r=p + 1 and s=n + 1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", + "result": "invalid" }, { - "tcId" : 226, - "comment" : "Signature with special case values r=p + 1 and s=p", - "flags" : [ + "tcId": 226, + "comment": "Signature with special case values r=p + 1 and s=p", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + "result": "invalid" }, { - "tcId" : 227, - "comment" : "Signature with special case values r=p + 1 and s=p + 1", - "flags" : [ + "tcId": 227, + "comment": "Signature with special case values r=p + 1 and s=p + 1", + "flags": [ "InvalidSignature" ], - "msg" : "313233343030", - "sig" : "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", - "result" : "invalid" + "msg": "313233343030", + "sig": "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", + "result": "invalid" }, { - "tcId" : 228, - "comment" : "Signature encoding contains incorrect types: r=0, s=0.25", - "flags" : [ + "tcId": 228, + "comment": "Signature encoding contains incorrect types: r=0, s=0.25", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3008020100090380fe01", - "result" : "invalid" + "msg": "313233343030", + "sig": "3008020100090380fe01", + "result": "invalid" }, { - "tcId" : 229, - "comment" : "Signature encoding contains incorrect types: r=0, s=nan", - "flags" : [ + "tcId": 229, + "comment": "Signature encoding contains incorrect types: r=0, s=nan", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3006020100090142", - "result" : "invalid" + "msg": "313233343030", + "sig": "3006020100090142", + "result": "invalid" }, { - "tcId" : 230, - "comment" : "Signature encoding contains incorrect types: r=0, s=True", - "flags" : [ + "tcId": 230, + "comment": "Signature encoding contains incorrect types: r=0, s=True", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3006020100010101", - "result" : "invalid" + "msg": "313233343030", + "sig": "3006020100010101", + "result": "invalid" }, { - "tcId" : 231, - "comment" : "Signature encoding contains incorrect types: r=0, s=False", - "flags" : [ + "tcId": 231, + "comment": "Signature encoding contains incorrect types: r=0, s=False", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3006020100010100", - "result" : "invalid" + "msg": "313233343030", + "sig": "3006020100010100", + "result": "invalid" }, { - "tcId" : 232, - "comment" : "Signature encoding contains incorrect types: r=0, s=Null", - "flags" : [ + "tcId": 232, + "comment": "Signature encoding contains incorrect types: r=0, s=Null", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "30050201000500", - "result" : "invalid" + "msg": "313233343030", + "sig": "30050201000500", + "result": "invalid" }, { - "tcId" : 233, - "comment" : "Signature encoding contains incorrect types: r=0, s=empyt UTF-8 string", - "flags" : [ + "tcId": 233, + "comment": "Signature encoding contains incorrect types: r=0, s=empyt UTF-8 string", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "30050201000c00", - "result" : "invalid" + "msg": "313233343030", + "sig": "30050201000c00", + "result": "invalid" }, { - "tcId" : 234, - "comment" : "Signature encoding contains incorrect types: r=0, s=\"0\"", - "flags" : [ + "tcId": 234, + "comment": "Signature encoding contains incorrect types: r=0, s=\"0\"", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "30060201000c0130", - "result" : "invalid" + "msg": "313233343030", + "sig": "30060201000c0130", + "result": "invalid" }, { - "tcId" : 235, - "comment" : "Signature encoding contains incorrect types: r=0, s=empty list", - "flags" : [ + "tcId": 235, + "comment": "Signature encoding contains incorrect types: r=0, s=empty list", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "30050201003000", - "result" : "invalid" + "msg": "313233343030", + "sig": "30050201003000", + "result": "invalid" }, { - "tcId" : 236, - "comment" : "Signature encoding contains incorrect types: r=0, s=list containing 0", - "flags" : [ + "tcId": 236, + "comment": "Signature encoding contains incorrect types: r=0, s=list containing 0", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "30080201003003020100", - "result" : "invalid" + "msg": "313233343030", + "sig": "30080201003003020100", + "result": "invalid" }, { - "tcId" : 237, - "comment" : "Signature encoding contains incorrect types: r=1, s=0.25", - "flags" : [ + "tcId": 237, + "comment": "Signature encoding contains incorrect types: r=1, s=0.25", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3008020101090380fe01", - "result" : "invalid" + "msg": "313233343030", + "sig": "3008020101090380fe01", + "result": "invalid" }, { - "tcId" : 238, - "comment" : "Signature encoding contains incorrect types: r=1, s=nan", - "flags" : [ + "tcId": 238, + "comment": "Signature encoding contains incorrect types: r=1, s=nan", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3006020101090142", - "result" : "invalid" + "msg": "313233343030", + "sig": "3006020101090142", + "result": "invalid" }, { - "tcId" : 239, - "comment" : "Signature encoding contains incorrect types: r=1, s=True", - "flags" : [ + "tcId": 239, + "comment": "Signature encoding contains incorrect types: r=1, s=True", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3006020101010101", - "result" : "invalid" + "msg": "313233343030", + "sig": "3006020101010101", + "result": "invalid" }, { - "tcId" : 240, - "comment" : "Signature encoding contains incorrect types: r=1, s=False", - "flags" : [ + "tcId": 240, + "comment": "Signature encoding contains incorrect types: r=1, s=False", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3006020101010100", - "result" : "invalid" + "msg": "313233343030", + "sig": "3006020101010100", + "result": "invalid" }, { - "tcId" : 241, - "comment" : "Signature encoding contains incorrect types: r=1, s=Null", - "flags" : [ + "tcId": 241, + "comment": "Signature encoding contains incorrect types: r=1, s=Null", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "30050201010500", - "result" : "invalid" + "msg": "313233343030", + "sig": "30050201010500", + "result": "invalid" }, { - "tcId" : 242, - "comment" : "Signature encoding contains incorrect types: r=1, s=empyt UTF-8 string", - "flags" : [ + "tcId": 242, + "comment": "Signature encoding contains incorrect types: r=1, s=empyt UTF-8 string", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "30050201010c00", - "result" : "invalid" + "msg": "313233343030", + "sig": "30050201010c00", + "result": "invalid" }, { - "tcId" : 243, - "comment" : "Signature encoding contains incorrect types: r=1, s=\"0\"", - "flags" : [ + "tcId": 243, + "comment": "Signature encoding contains incorrect types: r=1, s=\"0\"", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "30060201010c0130", - "result" : "invalid" + "msg": "313233343030", + "sig": "30060201010c0130", + "result": "invalid" }, { - "tcId" : 244, - "comment" : "Signature encoding contains incorrect types: r=1, s=empty list", - "flags" : [ + "tcId": 244, + "comment": "Signature encoding contains incorrect types: r=1, s=empty list", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "30050201013000", - "result" : "invalid" + "msg": "313233343030", + "sig": "30050201013000", + "result": "invalid" }, { - "tcId" : 245, - "comment" : "Signature encoding contains incorrect types: r=1, s=list containing 0", - "flags" : [ + "tcId": 245, + "comment": "Signature encoding contains incorrect types: r=1, s=list containing 0", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "30080201013003020100", - "result" : "invalid" + "msg": "313233343030", + "sig": "30080201013003020100", + "result": "invalid" }, { - "tcId" : 246, - "comment" : "Signature encoding contains incorrect types: r=-1, s=0.25", - "flags" : [ + "tcId": 246, + "comment": "Signature encoding contains incorrect types: r=-1, s=0.25", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "30080201ff090380fe01", - "result" : "invalid" + "msg": "313233343030", + "sig": "30080201ff090380fe01", + "result": "invalid" }, { - "tcId" : 247, - "comment" : "Signature encoding contains incorrect types: r=-1, s=nan", - "flags" : [ + "tcId": 247, + "comment": "Signature encoding contains incorrect types: r=-1, s=nan", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "30060201ff090142", - "result" : "invalid" + "msg": "313233343030", + "sig": "30060201ff090142", + "result": "invalid" }, { - "tcId" : 248, - "comment" : "Signature encoding contains incorrect types: r=-1, s=True", - "flags" : [ + "tcId": 248, + "comment": "Signature encoding contains incorrect types: r=-1, s=True", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "30060201ff010101", - "result" : "invalid" + "msg": "313233343030", + "sig": "30060201ff010101", + "result": "invalid" }, { - "tcId" : 249, - "comment" : "Signature encoding contains incorrect types: r=-1, s=False", - "flags" : [ + "tcId": 249, + "comment": "Signature encoding contains incorrect types: r=-1, s=False", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "30060201ff010100", - "result" : "invalid" + "msg": "313233343030", + "sig": "30060201ff010100", + "result": "invalid" }, { - "tcId" : 250, - "comment" : "Signature encoding contains incorrect types: r=-1, s=Null", - "flags" : [ + "tcId": 250, + "comment": "Signature encoding contains incorrect types: r=-1, s=Null", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "30050201ff0500", - "result" : "invalid" + "msg": "313233343030", + "sig": "30050201ff0500", + "result": "invalid" }, { - "tcId" : 251, - "comment" : "Signature encoding contains incorrect types: r=-1, s=empyt UTF-8 string", - "flags" : [ + "tcId": 251, + "comment": "Signature encoding contains incorrect types: r=-1, s=empyt UTF-8 string", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "30050201ff0c00", - "result" : "invalid" + "msg": "313233343030", + "sig": "30050201ff0c00", + "result": "invalid" }, { - "tcId" : 252, - "comment" : "Signature encoding contains incorrect types: r=-1, s=\"0\"", - "flags" : [ + "tcId": 252, + "comment": "Signature encoding contains incorrect types: r=-1, s=\"0\"", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "30060201ff0c0130", - "result" : "invalid" + "msg": "313233343030", + "sig": "30060201ff0c0130", + "result": "invalid" }, { - "tcId" : 253, - "comment" : "Signature encoding contains incorrect types: r=-1, s=empty list", - "flags" : [ + "tcId": 253, + "comment": "Signature encoding contains incorrect types: r=-1, s=empty list", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "30050201ff3000", - "result" : "invalid" + "msg": "313233343030", + "sig": "30050201ff3000", + "result": "invalid" }, { - "tcId" : 254, - "comment" : "Signature encoding contains incorrect types: r=-1, s=list containing 0", - "flags" : [ + "tcId": 254, + "comment": "Signature encoding contains incorrect types: r=-1, s=list containing 0", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "30080201ff3003020100", - "result" : "invalid" + "msg": "313233343030", + "sig": "30080201ff3003020100", + "result": "invalid" }, { - "tcId" : 255, - "comment" : "Signature encoding contains incorrect types: r=n, s=0.25", - "flags" : [ + "tcId": 255, + "comment": "Signature encoding contains incorrect types: r=n, s=0.25", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3028022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141090380fe01", - "result" : "invalid" + "msg": "313233343030", + "sig": "3028022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141090380fe01", + "result": "invalid" }, { - "tcId" : 256, - "comment" : "Signature encoding contains incorrect types: r=n, s=nan", - "flags" : [ + "tcId": 256, + "comment": "Signature encoding contains incorrect types: r=n, s=nan", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141090142", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141090142", + "result": "invalid" }, { - "tcId" : 257, - "comment" : "Signature encoding contains incorrect types: r=n, s=True", - "flags" : [ + "tcId": 257, + "comment": "Signature encoding contains incorrect types: r=n, s=True", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141010101", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141010101", + "result": "invalid" }, { - "tcId" : 258, - "comment" : "Signature encoding contains incorrect types: r=n, s=False", - "flags" : [ + "tcId": 258, + "comment": "Signature encoding contains incorrect types: r=n, s=False", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141010100", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141010100", + "result": "invalid" }, { - "tcId" : 259, - "comment" : "Signature encoding contains incorrect types: r=n, s=Null", - "flags" : [ + "tcId": 259, + "comment": "Signature encoding contains incorrect types: r=n, s=Null", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3025022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641410500", - "result" : "invalid" + "msg": "313233343030", + "sig": "3025022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641410500", + "result": "invalid" }, { - "tcId" : 260, - "comment" : "Signature encoding contains incorrect types: r=n, s=empyt UTF-8 string", - "flags" : [ + "tcId": 260, + "comment": "Signature encoding contains incorrect types: r=n, s=empyt UTF-8 string", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3025022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641410c00", - "result" : "invalid" + "msg": "313233343030", + "sig": "3025022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641410c00", + "result": "invalid" }, { - "tcId" : 261, - "comment" : "Signature encoding contains incorrect types: r=n, s=\"0\"", - "flags" : [ + "tcId": 261, + "comment": "Signature encoding contains incorrect types: r=n, s=\"0\"", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641410c0130", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641410c0130", + "result": "invalid" }, { - "tcId" : 262, - "comment" : "Signature encoding contains incorrect types: r=n, s=empty list", - "flags" : [ + "tcId": 262, + "comment": "Signature encoding contains incorrect types: r=n, s=empty list", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3025022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641413000", - "result" : "invalid" + "msg": "313233343030", + "sig": "3025022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641413000", + "result": "invalid" }, { - "tcId" : 263, - "comment" : "Signature encoding contains incorrect types: r=n, s=list containing 0", - "flags" : [ + "tcId": 263, + "comment": "Signature encoding contains incorrect types: r=n, s=list containing 0", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3028022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641413003020100", - "result" : "invalid" + "msg": "313233343030", + "sig": "3028022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641413003020100", + "result": "invalid" }, { - "tcId" : 264, - "comment" : "Signature encoding contains incorrect types: r=p, s=0.25", - "flags" : [ + "tcId": 264, + "comment": "Signature encoding contains incorrect types: r=p, s=0.25", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3028022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f090380fe01", - "result" : "invalid" + "msg": "313233343030", + "sig": "3028022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f090380fe01", + "result": "invalid" }, { - "tcId" : 265, - "comment" : "Signature encoding contains incorrect types: r=p, s=nan", - "flags" : [ + "tcId": 265, + "comment": "Signature encoding contains incorrect types: r=p, s=nan", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f090142", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f090142", + "result": "invalid" }, { - "tcId" : 266, - "comment" : "Signature encoding contains incorrect types: r=p, s=True", - "flags" : [ + "tcId": 266, + "comment": "Signature encoding contains incorrect types: r=p, s=True", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f010101", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f010101", + "result": "invalid" }, { - "tcId" : 267, - "comment" : "Signature encoding contains incorrect types: r=p, s=False", - "flags" : [ + "tcId": 267, + "comment": "Signature encoding contains incorrect types: r=p, s=False", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f010100", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f010100", + "result": "invalid" }, { - "tcId" : 268, - "comment" : "Signature encoding contains incorrect types: r=p, s=Null", - "flags" : [ + "tcId": 268, + "comment": "Signature encoding contains incorrect types: r=p, s=Null", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3025022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f0500", - "result" : "invalid" + "msg": "313233343030", + "sig": "3025022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f0500", + "result": "invalid" }, { - "tcId" : 269, - "comment" : "Signature encoding contains incorrect types: r=p, s=empyt UTF-8 string", - "flags" : [ + "tcId": 269, + "comment": "Signature encoding contains incorrect types: r=p, s=empyt UTF-8 string", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3025022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f0c00", - "result" : "invalid" + "msg": "313233343030", + "sig": "3025022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f0c00", + "result": "invalid" }, { - "tcId" : 270, - "comment" : "Signature encoding contains incorrect types: r=p, s=\"0\"", - "flags" : [ + "tcId": 270, + "comment": "Signature encoding contains incorrect types: r=p, s=\"0\"", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f0c0130", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f0c0130", + "result": "invalid" }, { - "tcId" : 271, - "comment" : "Signature encoding contains incorrect types: r=p, s=empty list", - "flags" : [ + "tcId": 271, + "comment": "Signature encoding contains incorrect types: r=p, s=empty list", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3025022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f3000", - "result" : "invalid" + "msg": "313233343030", + "sig": "3025022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f3000", + "result": "invalid" }, { - "tcId" : 272, - "comment" : "Signature encoding contains incorrect types: r=p, s=list containing 0", - "flags" : [ + "tcId": 272, + "comment": "Signature encoding contains incorrect types: r=p, s=list containing 0", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3028022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f3003020100", - "result" : "invalid" + "msg": "313233343030", + "sig": "3028022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f3003020100", + "result": "invalid" }, { - "tcId" : 273, - "comment" : "Signature encoding contains incorrect types: r=0.25, s=0.25", - "flags" : [ + "tcId": 273, + "comment": "Signature encoding contains incorrect types: r=0.25, s=0.25", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "300a090380fe01090380fe01", - "result" : "invalid" + "msg": "313233343030", + "sig": "300a090380fe01090380fe01", + "result": "invalid" }, { - "tcId" : 274, - "comment" : "Signature encoding contains incorrect types: r=nan, s=nan", - "flags" : [ + "tcId": 274, + "comment": "Signature encoding contains incorrect types: r=nan, s=nan", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3006090142090142", - "result" : "invalid" + "msg": "313233343030", + "sig": "3006090142090142", + "result": "invalid" }, { - "tcId" : 275, - "comment" : "Signature encoding contains incorrect types: r=True, s=True", - "flags" : [ + "tcId": 275, + "comment": "Signature encoding contains incorrect types: r=True, s=True", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3006010101010101", - "result" : "invalid" + "msg": "313233343030", + "sig": "3006010101010101", + "result": "invalid" }, { - "tcId" : 276, - "comment" : "Signature encoding contains incorrect types: r=False, s=False", - "flags" : [ + "tcId": 276, + "comment": "Signature encoding contains incorrect types: r=False, s=False", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3006010100010100", - "result" : "invalid" + "msg": "313233343030", + "sig": "3006010100010100", + "result": "invalid" }, { - "tcId" : 277, - "comment" : "Signature encoding contains incorrect types: r=Null, s=Null", - "flags" : [ + "tcId": 277, + "comment": "Signature encoding contains incorrect types: r=Null, s=Null", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "300405000500", - "result" : "invalid" + "msg": "313233343030", + "sig": "300405000500", + "result": "invalid" }, { - "tcId" : 278, - "comment" : "Signature encoding contains incorrect types: r=empyt UTF-8 string, s=empyt UTF-8 string", - "flags" : [ + "tcId": 278, + "comment": "Signature encoding contains incorrect types: r=empyt UTF-8 string, s=empyt UTF-8 string", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "30040c000c00", - "result" : "invalid" + "msg": "313233343030", + "sig": "30040c000c00", + "result": "invalid" }, { - "tcId" : 279, - "comment" : "Signature encoding contains incorrect types: r=\"0\", s=\"0\"", - "flags" : [ + "tcId": 279, + "comment": "Signature encoding contains incorrect types: r=\"0\", s=\"0\"", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "30060c01300c0130", - "result" : "invalid" + "msg": "313233343030", + "sig": "30060c01300c0130", + "result": "invalid" }, { - "tcId" : 280, - "comment" : "Signature encoding contains incorrect types: r=empty list, s=empty list", - "flags" : [ + "tcId": 280, + "comment": "Signature encoding contains incorrect types: r=empty list, s=empty list", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "300430003000", - "result" : "invalid" + "msg": "313233343030", + "sig": "300430003000", + "result": "invalid" }, { - "tcId" : 281, - "comment" : "Signature encoding contains incorrect types: r=list containing 0, s=list containing 0", - "flags" : [ + "tcId": 281, + "comment": "Signature encoding contains incorrect types: r=list containing 0, s=list containing 0", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "300a30030201003003020100", - "result" : "invalid" + "msg": "313233343030", + "sig": "300a30030201003003020100", + "result": "invalid" }, { - "tcId" : 282, - "comment" : "Signature encoding contains incorrect types: r=0.25, s=0", - "flags" : [ + "tcId": 282, + "comment": "Signature encoding contains incorrect types: r=0.25, s=0", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3008090380fe01020100", - "result" : "invalid" + "msg": "313233343030", + "sig": "3008090380fe01020100", + "result": "invalid" }, { - "tcId" : 283, - "comment" : "Signature encoding contains incorrect types: r=nan, s=0", - "flags" : [ + "tcId": 283, + "comment": "Signature encoding contains incorrect types: r=nan, s=0", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3006090142020100", - "result" : "invalid" + "msg": "313233343030", + "sig": "3006090142020100", + "result": "invalid" }, { - "tcId" : 284, - "comment" : "Signature encoding contains incorrect types: r=True, s=0", - "flags" : [ + "tcId": 284, + "comment": "Signature encoding contains incorrect types: r=True, s=0", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3006010101020100", - "result" : "invalid" + "msg": "313233343030", + "sig": "3006010101020100", + "result": "invalid" }, { - "tcId" : 285, - "comment" : "Signature encoding contains incorrect types: r=False, s=0", - "flags" : [ + "tcId": 285, + "comment": "Signature encoding contains incorrect types: r=False, s=0", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "3006010100020100", - "result" : "invalid" + "msg": "313233343030", + "sig": "3006010100020100", + "result": "invalid" }, { - "tcId" : 286, - "comment" : "Signature encoding contains incorrect types: r=Null, s=0", - "flags" : [ + "tcId": 286, + "comment": "Signature encoding contains incorrect types: r=Null, s=0", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "30050500020100", - "result" : "invalid" + "msg": "313233343030", + "sig": "30050500020100", + "result": "invalid" }, { - "tcId" : 287, - "comment" : "Signature encoding contains incorrect types: r=empyt UTF-8 string, s=0", - "flags" : [ + "tcId": 287, + "comment": "Signature encoding contains incorrect types: r=empyt UTF-8 string, s=0", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "30050c00020100", - "result" : "invalid" + "msg": "313233343030", + "sig": "30050c00020100", + "result": "invalid" }, { - "tcId" : 288, - "comment" : "Signature encoding contains incorrect types: r=\"0\", s=0", - "flags" : [ + "tcId": 288, + "comment": "Signature encoding contains incorrect types: r=\"0\", s=0", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "30060c0130020100", - "result" : "invalid" + "msg": "313233343030", + "sig": "30060c0130020100", + "result": "invalid" }, { - "tcId" : 289, - "comment" : "Signature encoding contains incorrect types: r=empty list, s=0", - "flags" : [ + "tcId": 289, + "comment": "Signature encoding contains incorrect types: r=empty list, s=0", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "30053000020100", - "result" : "invalid" + "msg": "313233343030", + "sig": "30053000020100", + "result": "invalid" }, { - "tcId" : 290, - "comment" : "Signature encoding contains incorrect types: r=list containing 0, s=0", - "flags" : [ + "tcId": 290, + "comment": "Signature encoding contains incorrect types: r=list containing 0, s=0", + "flags": [ "InvalidTypesInSignature" ], - "msg" : "313233343030", - "sig" : "30083003020100020100", - "result" : "invalid" + "msg": "313233343030", + "sig": "30083003020100020100", + "result": "invalid" }, { - "tcId" : 291, - "comment" : "Edge case for Shamir multiplication", - "flags" : [ + "tcId": 291, + "comment": "Edge case for Shamir multiplication", + "flags": [ "EdgeCaseShamirMultiplication" ], - "msg" : "3235353835", - "sig" : "3045022100dd1b7d09a7bd8218961034a39a87fecf5314f00c4d25eb58a07ac85e85eab516022035138c401ef8d3493d65c9002fe62b43aee568731b744548358996d9cc427e06", - "result" : "valid" + "msg": "3235353835", + "sig": "3045022100dd1b7d09a7bd8218961034a39a87fecf5314f00c4d25eb58a07ac85e85eab516022035138c401ef8d3493d65c9002fe62b43aee568731b744548358996d9cc427e06", + "result": "valid" }, { - "tcId" : 292, - "comment" : "special case hash", - "flags" : [ + "tcId": 292, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "343236343739373234", - "sig" : "304502210095c29267d972a043d955224546222bba343fc1d4db0fec262a33ac61305696ae02206edfe96713aed56f8a28a6653f57e0b829712e5eddc67f34682b24f0676b2640", - "result" : "valid" + "msg": "343236343739373234", + "sig": "304502210095c29267d972a043d955224546222bba343fc1d4db0fec262a33ac61305696ae02206edfe96713aed56f8a28a6653f57e0b829712e5eddc67f34682b24f0676b2640", + "result": "valid" }, { - "tcId" : 293, - "comment" : "special case hash", - "flags" : [ + "tcId": 293, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "37313338363834383931", - "sig" : "3044022028f94a894e92024699e345fe66971e3edcd050023386135ab3939d550898fb25022032963e5bd41fa5911ed8f37deb86dae0a762bb6121c894615083c5d95ea01db3", - "result" : "valid" + "msg": "37313338363834383931", + "sig": "3044022028f94a894e92024699e345fe66971e3edcd050023386135ab3939d550898fb25022032963e5bd41fa5911ed8f37deb86dae0a762bb6121c894615083c5d95ea01db3", + "result": "valid" }, { - "tcId" : 294, - "comment" : "special case hash", - "flags" : [ + "tcId": 294, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "3130333539333331363638", - "sig" : "3045022100be26b18f9549f89f411a9b52536b15aa270b84548d0e859a1952a27af1a77ac6022070c1d4fa9cd03cc8eaa8d506edb97eed7b8358b453c88aefbb880a3f0e8d472f", - "result" : "valid" + "msg": "3130333539333331363638", + "sig": "3045022100be26b18f9549f89f411a9b52536b15aa270b84548d0e859a1952a27af1a77ac6022070c1d4fa9cd03cc8eaa8d506edb97eed7b8358b453c88aefbb880a3f0e8d472f", + "result": "valid" }, { - "tcId" : 295, - "comment" : "special case hash", - "flags" : [ + "tcId": 295, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "33393439343031323135", - "sig" : "3045022100b1a4b1478e65cc3eafdf225d1298b43f2da19e4bcff7eacc0a2e98cd4b74b1140220179aa31e304cc142cf5073171751b28f3f5e0fa88c994e7c55f1bc07b8d56c16", - "result" : "valid" + "msg": "33393439343031323135", + "sig": "3045022100b1a4b1478e65cc3eafdf225d1298b43f2da19e4bcff7eacc0a2e98cd4b74b1140220179aa31e304cc142cf5073171751b28f3f5e0fa88c994e7c55f1bc07b8d56c16", + "result": "valid" }, { - "tcId" : 296, - "comment" : "special case hash", - "flags" : [ + "tcId": 296, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "31333434323933303739", - "sig" : "30440220325332021261f1bd18f2712aa1e2252da23796da8a4b1ff6ea18cafec7e171f2022040b4f5e287ee61fc3c804186982360891eaa35c75f05a43ecd48b35d984a6648", - "result" : "valid" + "msg": "31333434323933303739", + "sig": "30440220325332021261f1bd18f2712aa1e2252da23796da8a4b1ff6ea18cafec7e171f2022040b4f5e287ee61fc3c804186982360891eaa35c75f05a43ecd48b35d984a6648", + "result": "valid" }, { - "tcId" : 297, - "comment" : "special case hash", - "flags" : [ + "tcId": 297, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "33373036323131373132", - "sig" : "3045022100a23ad18d8fc66d81af0903890cbd453a554cb04cdc1a8ca7f7f78e5367ed88a0022023e3eb2ce1c04ea748c389bd97374aa9413b9268851c04dcd9f88e78813fee56", - "result" : "valid" + "msg": "33373036323131373132", + "sig": "3045022100a23ad18d8fc66d81af0903890cbd453a554cb04cdc1a8ca7f7f78e5367ed88a0022023e3eb2ce1c04ea748c389bd97374aa9413b9268851c04dcd9f88e78813fee56", + "result": "valid" }, { - "tcId" : 298, - "comment" : "special case hash", - "flags" : [ + "tcId": 298, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "333433363838373132", - "sig" : "304402202bdea41cda63a2d14bf47353bd20880a690901de7cd6e3cc6d8ed5ba0cdb109102203cea66bccfc9f9bf8c7ca4e1c1457cc9145e13e936d90b3d9c7786b8b26cf4c7", - "result" : "valid" + "msg": "333433363838373132", + "sig": "304402202bdea41cda63a2d14bf47353bd20880a690901de7cd6e3cc6d8ed5ba0cdb109102203cea66bccfc9f9bf8c7ca4e1c1457cc9145e13e936d90b3d9c7786b8b26cf4c7", + "result": "valid" }, { - "tcId" : 299, - "comment" : "special case hash", - "flags" : [ + "tcId": 299, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "31333531353330333730", - "sig" : "3045022100d7cd76ec01c1b1079eba9e2aa2a397243c4758c98a1ba0b7404a340b9b00ced602203575001e19d922e6de8b3d6c84ea43b5c3338106cf29990134e7669a826f78e6", - "result" : "valid" + "msg": "31333531353330333730", + "sig": "3045022100d7cd76ec01c1b1079eba9e2aa2a397243c4758c98a1ba0b7404a340b9b00ced602203575001e19d922e6de8b3d6c84ea43b5c3338106cf29990134e7669a826f78e6", + "result": "valid" }, { - "tcId" : 300, - "comment" : "special case hash", - "flags" : [ + "tcId": 300, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "36353533323033313236", - "sig" : "3045022100a872c744d936db21a10c361dd5c9063355f84902219652f6fc56dc95a7139d960220400df7575d9756210e9ccc77162c6b593c7746cfb48ac263c42750b421ef4bb9", - "result" : "valid" + "msg": "36353533323033313236", + "sig": "3045022100a872c744d936db21a10c361dd5c9063355f84902219652f6fc56dc95a7139d960220400df7575d9756210e9ccc77162c6b593c7746cfb48ac263c42750b421ef4bb9", + "result": "valid" }, { - "tcId" : 301, - "comment" : "special case hash", - "flags" : [ + "tcId": 301, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "31353634333436363033", - "sig" : "30450221009fa9afe07752da10b36d3afcd0fe44bfc40244d75203599cf8f5047fa3453854022050e0a7c013bfbf51819736972d44b4b56bc2a2b2c180df6ec672df171410d77a", - "result" : "valid" + "msg": "31353634333436363033", + "sig": "30450221009fa9afe07752da10b36d3afcd0fe44bfc40244d75203599cf8f5047fa3453854022050e0a7c013bfbf51819736972d44b4b56bc2a2b2c180df6ec672df171410d77a", + "result": "valid" }, { - "tcId" : 302, - "comment" : "special case hash", - "flags" : [ + "tcId": 302, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "34343239353339313137", - "sig" : "3045022100885640384d0d910efb177b46be6c3dc5cac81f0b88c3190bb6b5f99c2641f2050220738ed9bff116306d9caa0f8fc608be243e0b567779d8dab03e8e19d553f1dc8e", - "result" : "valid" + "msg": "34343239353339313137", + "sig": "3045022100885640384d0d910efb177b46be6c3dc5cac81f0b88c3190bb6b5f99c2641f2050220738ed9bff116306d9caa0f8fc608be243e0b567779d8dab03e8e19d553f1dc8e", + "result": "valid" }, { - "tcId" : 303, - "comment" : "special case hash", - "flags" : [ + "tcId": 303, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "3130393533323631333531", - "sig" : "304402202d051f91c5a9d440c5676985710483bc4f1a6c611b10c95a2ff0363d90c2a45802206ddf94e6fba5be586833d0c53cf216ad3948f37953c26c1cf4968e9a9e8243dc", - "result" : "valid" + "msg": "3130393533323631333531", + "sig": "304402202d051f91c5a9d440c5676985710483bc4f1a6c611b10c95a2ff0363d90c2a45802206ddf94e6fba5be586833d0c53cf216ad3948f37953c26c1cf4968e9a9e8243dc", + "result": "valid" }, { - "tcId" : 304, - "comment" : "special case hash", - "flags" : [ + "tcId": 304, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "35393837333530303431", - "sig" : "3045022100f3ac2523967482f53d508522712d583f4379cd824101ff635ea0935117baa54f022027f10812227397e02cea96fb0e680761636dab2b080d1fc5d11685cbe8500cfe", - "result" : "valid" + "msg": "35393837333530303431", + "sig": "3045022100f3ac2523967482f53d508522712d583f4379cd824101ff635ea0935117baa54f022027f10812227397e02cea96fb0e680761636dab2b080d1fc5d11685cbe8500cfe", + "result": "valid" }, { - "tcId" : 305, - "comment" : "special case hash", - "flags" : [ + "tcId": 305, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "33343633303036383738", - "sig" : "304502210096447cf68c3ab7266ed7447de3ac52fed7cc08cbdfea391c18a9b8ab370bc91302200f5e7874d3ac0e918f01c885a1639177c923f8660d1ceba1ca1f301bc675cdbc", - "result" : "valid" + "msg": "33343633303036383738", + "sig": "304502210096447cf68c3ab7266ed7447de3ac52fed7cc08cbdfea391c18a9b8ab370bc91302200f5e7874d3ac0e918f01c885a1639177c923f8660d1ceba1ca1f301bc675cdbc", + "result": "valid" }, { - "tcId" : 306, - "comment" : "special case hash", - "flags" : [ + "tcId": 306, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "39383137333230323837", - "sig" : "30440220530a0832b691da0b5619a0b11de6877f3c0971baaa68ed122758c29caaf46b7202206c89e44f5eb33060ea4b46318c39138eaedec72de42ba576579a6a4690e339f3", - "result" : "valid" + "msg": "39383137333230323837", + "sig": "30440220530a0832b691da0b5619a0b11de6877f3c0971baaa68ed122758c29caaf46b7202206c89e44f5eb33060ea4b46318c39138eaedec72de42ba576579a6a4690e339f3", + "result": "valid" }, { - "tcId" : 307, - "comment" : "special case hash", - "flags" : [ + "tcId": 307, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "33323232303431303436", - "sig" : "30450221009c54c25500bde0b92d72d6ec483dc2482f3654294ca74de796b681255ed58a770220677453c6b56f527631c9f67b3f3eb621fd88582b4aff156d2f1567d6211a2a33", - "result" : "valid" + "msg": "33323232303431303436", + "sig": "30450221009c54c25500bde0b92d72d6ec483dc2482f3654294ca74de796b681255ed58a770220677453c6b56f527631c9f67b3f3eb621fd88582b4aff156d2f1567d6211a2a33", + "result": "valid" }, { - "tcId" : 308, - "comment" : "special case hash", - "flags" : [ + "tcId": 308, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "36363636333037313034", - "sig" : "3045022100e7909d41439e2f6af29136c7348ca2641a2b070d5b64f91ea9da7070c7a2618b022042d782f132fa1d36c2c88ba27c3d678d80184a5d1eccac7501f0b47e3d205008", - "result" : "valid" + "msg": "36363636333037313034", + "sig": "3045022100e7909d41439e2f6af29136c7348ca2641a2b070d5b64f91ea9da7070c7a2618b022042d782f132fa1d36c2c88ba27c3d678d80184a5d1eccac7501f0b47e3d205008", + "result": "valid" }, { - "tcId" : 309, - "comment" : "special case hash", - "flags" : [ + "tcId": 309, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "31303335393531383938", - "sig" : "304402205924873209593135a4c3da7bb381227f8a4b6aa9f34fe5bb7f8fbc131a039ffe02201f1bb11b441c8feaa40f44213d9a405ed792d59fb49d5bcdd9a4285ae5693022", - "result" : "valid" + "msg": "31303335393531383938", + "sig": "304402205924873209593135a4c3da7bb381227f8a4b6aa9f34fe5bb7f8fbc131a039ffe02201f1bb11b441c8feaa40f44213d9a405ed792d59fb49d5bcdd9a4285ae5693022", + "result": "valid" }, { - "tcId" : 310, - "comment" : "special case hash", - "flags" : [ + "tcId": 310, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "31383436353937313935", - "sig" : "3045022100eeb692c9b262969b231c38b5a7f60649e0c875cd64df88f33aa571fa3d29ab0e0220218b3a1eb06379c2c18cf51b06430786d1c64cd2d24c9b232b23e5bac7989acd", - "result" : "valid" + "msg": "31383436353937313935", + "sig": "3045022100eeb692c9b262969b231c38b5a7f60649e0c875cd64df88f33aa571fa3d29ab0e0220218b3a1eb06379c2c18cf51b06430786d1c64cd2d24c9b232b23e5bac7989acd", + "result": "valid" }, { - "tcId" : 311, - "comment" : "special case hash", - "flags" : [ + "tcId": 311, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "33313336303436313839", - "sig" : "3045022100a40034177f36091c2b653684a0e3eb5d4bff18e4d09f664c2800e7cafda1daf802203a3ec29853704e52031c58927a800a968353adc3d973beba9172cbbeab4dd149", - "result" : "valid" + "msg": "33313336303436313839", + "sig": "3045022100a40034177f36091c2b653684a0e3eb5d4bff18e4d09f664c2800e7cafda1daf802203a3ec29853704e52031c58927a800a968353adc3d973beba9172cbbeab4dd149", + "result": "valid" }, { - "tcId" : 312, - "comment" : "special case hash", - "flags" : [ + "tcId": 312, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "32363633373834323534", - "sig" : "3045022100b5d795cc75cea5c434fa4185180cd6bd21223f3d5a86da6670d71d95680dadbf022054e4d8810a001ecbb9f7ca1c2ebfdb9d009e9031a431aca3c20ab4e0d1374ec1", - "result" : "valid" + "msg": "32363633373834323534", + "sig": "3045022100b5d795cc75cea5c434fa4185180cd6bd21223f3d5a86da6670d71d95680dadbf022054e4d8810a001ecbb9f7ca1c2ebfdb9d009e9031a431aca3c20ab4e0d1374ec1", + "result": "valid" }, { - "tcId" : 313, - "comment" : "special case hash", - "flags" : [ + "tcId": 313, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "31363532313030353234", - "sig" : "3044022007dc2478d43c1232a4595608c64426c35510051a631ae6a5a6eb1161e57e42e102204a59ea0fdb72d12165cea3bf1ca86ba97517bd188db3dbd21a5a157850021984", - "result" : "valid" + "msg": "31363532313030353234", + "sig": "3044022007dc2478d43c1232a4595608c64426c35510051a631ae6a5a6eb1161e57e42e102204a59ea0fdb72d12165cea3bf1ca86ba97517bd188db3dbd21a5a157850021984", + "result": "valid" }, { - "tcId" : 314, - "comment" : "special case hash", - "flags" : [ + "tcId": 314, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "35373438303831363936", - "sig" : "3045022100ddd20c4a05596ca868b558839fce9f6511ddd83d1ccb53f82e5269d559a0155202205b91734729d93093ff22123c4a25819d7feb66a250663fc780cb66fc7b6e6d17", - "result" : "valid" + "msg": "35373438303831363936", + "sig": "3045022100ddd20c4a05596ca868b558839fce9f6511ddd83d1ccb53f82e5269d559a0155202205b91734729d93093ff22123c4a25819d7feb66a250663fc780cb66fc7b6e6d17", + "result": "valid" }, { - "tcId" : 315, - "comment" : "special case hash", - "flags" : [ + "tcId": 315, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "36333433393133343638", - "sig" : "30450221009cde6e0ede0a003f02fda0a01b59facfe5dec063318f279ce2de7a9b1062f7b702202886a5b8c679bdf8224c66f908fd6205492cb70b0068d46ae4f33a4149b12a52", - "result" : "valid" + "msg": "36333433393133343638", + "sig": "30450221009cde6e0ede0a003f02fda0a01b59facfe5dec063318f279ce2de7a9b1062f7b702202886a5b8c679bdf8224c66f908fd6205492cb70b0068d46ae4f33a4149b12a52", + "result": "valid" }, { - "tcId" : 316, - "comment" : "special case hash", - "flags" : [ + "tcId": 316, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "31353431313033353938", - "sig" : "3045022100c5771016d0dd6357143c89f684cd740423502554c0c59aa8c99584f1ff38f609022054b405f4477546686e464c5463b4fd4190572e58d0f7e7357f6e61947d20715c", - "result" : "valid" + "msg": "31353431313033353938", + "sig": "3045022100c5771016d0dd6357143c89f684cd740423502554c0c59aa8c99584f1ff38f609022054b405f4477546686e464c5463b4fd4190572e58d0f7e7357f6e61947d20715c", + "result": "valid" }, { - "tcId" : 317, - "comment" : "special case hash", - "flags" : [ + "tcId": 317, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "3130343738353830313238", - "sig" : "3045022100a24ebc0ec224bd67ae397cbe6fa37b3125adbd34891abe2d7c7356921916dfe6022034f6eb6374731bbbafc4924fb8b0bdcdda49456d724cdae6178d87014cb53d8c", - "result" : "valid" + "msg": "3130343738353830313238", + "sig": "3045022100a24ebc0ec224bd67ae397cbe6fa37b3125adbd34891abe2d7c7356921916dfe6022034f6eb6374731bbbafc4924fb8b0bdcdda49456d724cdae6178d87014cb53d8c", + "result": "valid" }, { - "tcId" : 318, - "comment" : "special case hash", - "flags" : [ + "tcId": 318, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "3130353336323835353638", - "sig" : "304402202557d64a7aee2e0931c012e4fea1cd3a2c334edae68cdeb7158caf21b68e5a2402207f06cdbb6a90023a973882ed97b080fe6b05af3ec93db6f1a4399a69edf7670d", - "result" : "valid" + "msg": "3130353336323835353638", + "sig": "304402202557d64a7aee2e0931c012e4fea1cd3a2c334edae68cdeb7158caf21b68e5a2402207f06cdbb6a90023a973882ed97b080fe6b05af3ec93db6f1a4399a69edf7670d", + "result": "valid" }, { - "tcId" : 319, - "comment" : "special case hash", - "flags" : [ + "tcId": 319, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "393533393034313035", - "sig" : "3045022100c4f2eccbb6a24350c8466450b9d61b207ee359e037b3dcedb42a3f2e6dd6aeb502203263c6b59a2f55cdd1c6e14894d5e5963b28bc3e2469ac9ba1197991ca7ff9c7", - "result" : "valid" + "msg": "393533393034313035", + "sig": "3045022100c4f2eccbb6a24350c8466450b9d61b207ee359e037b3dcedb42a3f2e6dd6aeb502203263c6b59a2f55cdd1c6e14894d5e5963b28bc3e2469ac9ba1197991ca7ff9c7", + "result": "valid" }, { - "tcId" : 320, - "comment" : "special case hash", - "flags" : [ + "tcId": 320, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "393738383438303339", - "sig" : "3045022100eff04781c9cbcd162d0a25a6e2ebcca43506c523385cb515d49ea38a1b12fcad022015acd73194c91a95478534f23015b672ebed213e45424dd2c8e26ac8b3eb34a5", - "result" : "valid" + "msg": "393738383438303339", + "sig": "3045022100eff04781c9cbcd162d0a25a6e2ebcca43506c523385cb515d49ea38a1b12fcad022015acd73194c91a95478534f23015b672ebed213e45424dd2c8e26ac8b3eb34a5", + "result": "valid" }, { - "tcId" : 321, - "comment" : "special case hash", - "flags" : [ + "tcId": 321, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "33363130363732343432", - "sig" : "3045022100f58b4e3110a64bf1b5db97639ee0e5a9c8dfa49dc59b679891f520fdf0584c8702202cd8fe51888aee9db3e075440fd4db73b5c732fb87b510e97093d66415f62af7", - "result" : "valid" + "msg": "33363130363732343432", + "sig": "3045022100f58b4e3110a64bf1b5db97639ee0e5a9c8dfa49dc59b679891f520fdf0584c8702202cd8fe51888aee9db3e075440fd4db73b5c732fb87b510e97093d66415f62af7", + "result": "valid" }, { - "tcId" : 322, - "comment" : "special case hash", - "flags" : [ + "tcId": 322, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "31303534323430373035", - "sig" : "3045022100f8abecaa4f0c502de4bf5903d48417f786bf92e8ad72fec0bd7fcb7800c0bbe302204c7f9e231076a30b7ae36b0cebe69ccef1cd194f7cce93a5588fd6814f437c0e", - "result" : "valid" + "msg": "31303534323430373035", + "sig": "3045022100f8abecaa4f0c502de4bf5903d48417f786bf92e8ad72fec0bd7fcb7800c0bbe302204c7f9e231076a30b7ae36b0cebe69ccef1cd194f7cce93a5588fd6814f437c0e", + "result": "valid" }, { - "tcId" : 323, - "comment" : "special case hash", - "flags" : [ + "tcId": 323, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "35313734343438313937", - "sig" : "304402205d5b38bd37ad498b2227a633268a8cca879a5c7c94a4e416bd0a614d09e606d2022012b8d664ea9991062ecbb834e58400e25c46007af84f6007d7f1685443269afe", - "result" : "valid" + "msg": "35313734343438313937", + "sig": "304402205d5b38bd37ad498b2227a633268a8cca879a5c7c94a4e416bd0a614d09e606d2022012b8d664ea9991062ecbb834e58400e25c46007af84f6007d7f1685443269afe", + "result": "valid" }, { - "tcId" : 324, - "comment" : "special case hash", - "flags" : [ + "tcId": 324, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "31393637353631323531", - "sig" : "304402200c1cd9fe4034f086a2b52d65b9d3834d72aebe7f33dfe8f976da82648177d8e3022013105782e3d0cfe85c2778dec1a848b27ac0ae071aa6da341a9553a946b41e59", - "result" : "valid" + "msg": "31393637353631323531", + "sig": "304402200c1cd9fe4034f086a2b52d65b9d3834d72aebe7f33dfe8f976da82648177d8e3022013105782e3d0cfe85c2778dec1a848b27ac0ae071aa6da341a9553a946b41e59", + "result": "valid" }, { - "tcId" : 325, - "comment" : "special case hash", - "flags" : [ + "tcId": 325, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "33343437323533333433", - "sig" : "3045022100ae7935fb96ff246b7b5d5662870d1ba587b03d6e1360baf47988b5c02ccc1a5b02205f00c323272083782d4a59f2dfd65e49de0693627016900ef7e61428056664b3", - "result" : "valid" + "msg": "33343437323533333433", + "sig": "3045022100ae7935fb96ff246b7b5d5662870d1ba587b03d6e1360baf47988b5c02ccc1a5b02205f00c323272083782d4a59f2dfd65e49de0693627016900ef7e61428056664b3", + "result": "valid" }, { - "tcId" : 326, - "comment" : "special case hash", - "flags" : [ + "tcId": 326, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "333638323634333138", - "sig" : "3044022000a134b5c6ccbcefd4c882b945baeb4933444172795fa6796aae1490675470980220566e46105d24d890151e3eea3ebf88f5b92b3f5ec93a217765a6dcbd94f2c55b", - "result" : "valid" + "msg": "333638323634333138", + "sig": "3044022000a134b5c6ccbcefd4c882b945baeb4933444172795fa6796aae1490675470980220566e46105d24d890151e3eea3ebf88f5b92b3f5ec93a217765a6dcbd94f2c55b", + "result": "valid" }, { - "tcId" : 327, - "comment" : "special case hash", - "flags" : [ + "tcId": 327, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "33323631313938363038", - "sig" : "304402202e4721363ad3992c139e5a1c26395d2c2d777824aa24fde075e0d7381171309d0220740f7c494418e1300dd4512f782a58800bff6a7abdfdd20fbbd4f05515ca1a4f", - "result" : "valid" + "msg": "33323631313938363038", + "sig": "304402202e4721363ad3992c139e5a1c26395d2c2d777824aa24fde075e0d7381171309d0220740f7c494418e1300dd4512f782a58800bff6a7abdfdd20fbbd4f05515ca1a4f", + "result": "valid" }, { - "tcId" : 328, - "comment" : "special case hash", - "flags" : [ + "tcId": 328, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "39363738373831303934", - "sig" : "304402206852e9d3cd9fe373c2d504877967d365ab1456707b6817a042864694e1960ccf0220064b27ea142b30887b84c86adccb2fa39a6911ad21fc7e819f593be52bc4f3bd", - "result" : "valid" + "msg": "39363738373831303934", + "sig": "304402206852e9d3cd9fe373c2d504877967d365ab1456707b6817a042864694e1960ccf0220064b27ea142b30887b84c86adccb2fa39a6911ad21fc7e819f593be52bc4f3bd", + "result": "valid" }, { - "tcId" : 329, - "comment" : "special case hash", - "flags" : [ + "tcId": 329, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "34393538383233383233", - "sig" : "30440220188a8c5648dc79eace158cf886c62b5468f05fd95f03a7635c5b4c31f09af4c5022036361a0b571a00c6cd5e686ccbfcfa703c4f97e48938346d0c103fdc76dc5867", - "result" : "valid" + "msg": "34393538383233383233", + "sig": "30440220188a8c5648dc79eace158cf886c62b5468f05fd95f03a7635c5b4c31f09af4c5022036361a0b571a00c6cd5e686ccbfcfa703c4f97e48938346d0c103fdc76dc5867", + "result": "valid" }, { - "tcId" : 330, - "comment" : "special case hash", - "flags" : [ + "tcId": 330, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "383234363337383337", - "sig" : "3045022100a74f1fb9a8263f62fc4416a5b7d584f4206f3996bb91f6fc8e73b9e92bad0e1302206815032e8c7d76c3ab06a86f33249ce9940148cb36d1f417c2e992e801afa3fa", - "result" : "valid" + "msg": "383234363337383337", + "sig": "3045022100a74f1fb9a8263f62fc4416a5b7d584f4206f3996bb91f6fc8e73b9e92bad0e1302206815032e8c7d76c3ab06a86f33249ce9940148cb36d1f417c2e992e801afa3fa", + "result": "valid" }, { - "tcId" : 331, - "comment" : "special case hash", - "flags" : [ + "tcId": 331, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "3131303230383333373736", - "sig" : "3044022007244865b72ff37e62e3146f0dc14682badd7197799135f0b00ade7671742bfe02200d80c2238edb4e4a7a86a8c57ca9af1711f406f7f5da0299aa04e2932d960754", - "result" : "valid" + "msg": "3131303230383333373736", + "sig": "3044022007244865b72ff37e62e3146f0dc14682badd7197799135f0b00ade7671742bfe02200d80c2238edb4e4a7a86a8c57ca9af1711f406f7f5da0299aa04e2932d960754", + "result": "valid" }, { - "tcId" : 332, - "comment" : "special case hash", - "flags" : [ + "tcId": 332, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "313333383731363438", - "sig" : "3045022100da7fdd05b5badabd619d805c4ee7d9a84f84ddd5cf9c5bf4d4338140d689ef08022028f1cf4fa1c3c5862cfa149c0013cf5fe6cf5076cae000511063e7de25bb38e5", - "result" : "valid" + "msg": "313333383731363438", + "sig": "3045022100da7fdd05b5badabd619d805c4ee7d9a84f84ddd5cf9c5bf4d4338140d689ef08022028f1cf4fa1c3c5862cfa149c0013cf5fe6cf5076cae000511063e7de25bb38e5", + "result": "valid" }, { - "tcId" : 333, - "comment" : "special case hash", - "flags" : [ + "tcId": 333, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "333232313434313632", - "sig" : "3045022100d3027c656f6d4fdfd8ede22093e3c303b0133c340d615e7756f6253aea927238022009aef060c8e4cef972974011558df144fed25ca69ae8d0b2eaf1a8feefbec417", - "result" : "valid" + "msg": "333232313434313632", + "sig": "3045022100d3027c656f6d4fdfd8ede22093e3c303b0133c340d615e7756f6253aea927238022009aef060c8e4cef972974011558df144fed25ca69ae8d0b2eaf1a8feefbec417", + "result": "valid" }, { - "tcId" : 334, - "comment" : "special case hash", - "flags" : [ + "tcId": 334, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "3130363836363535353436", - "sig" : "304402200bf6c0188dc9571cd0e21eecac5fbb19d2434988e9cc10244593ef3a98099f6902204864a562661f9221ec88e3dd0bc2f6e27ac128c30cc1a80f79ec670a22b042ee", - "result" : "valid" + "msg": "3130363836363535353436", + "sig": "304402200bf6c0188dc9571cd0e21eecac5fbb19d2434988e9cc10244593ef3a98099f6902204864a562661f9221ec88e3dd0bc2f6e27ac128c30cc1a80f79ec670a22b042ee", + "result": "valid" }, { - "tcId" : 335, - "comment" : "special case hash", - "flags" : [ + "tcId": 335, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "3632313535323436", - "sig" : "3045022100ae459640d5d1179be47a47fa538e16d94ddea5585e7a244804a51742c686443a02206c8e30e530a634fae80b3ceb062978b39edbe19777e0a24553b68886181fd897", - "result" : "valid" + "msg": "3632313535323436", + "sig": "3045022100ae459640d5d1179be47a47fa538e16d94ddea5585e7a244804a51742c686443a02206c8e30e530a634fae80b3ceb062978b39edbe19777e0a24553b68886181fd897", + "result": "valid" }, { - "tcId" : 336, - "comment" : "special case hash", - "flags" : [ + "tcId": 336, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "37303330383138373734", - "sig" : "304402201cf3517ba3bf2ab8b9ead4ebb6e866cb88a1deacb6a785d3b63b483ca02ac4950220249a798b73606f55f5f1c70de67cb1a0cff95d7dc50b3a617df861bad3c6b1c9", - "result" : "valid" + "msg": "37303330383138373734", + "sig": "304402201cf3517ba3bf2ab8b9ead4ebb6e866cb88a1deacb6a785d3b63b483ca02ac4950220249a798b73606f55f5f1c70de67cb1a0cff95d7dc50b3a617df861bad3c6b1c9", + "result": "valid" }, { - "tcId" : 337, - "comment" : "special case hash", - "flags" : [ + "tcId": 337, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "35393234353233373434", - "sig" : "3045022100e69b5238265ea35d77e4dd172288d8cea19810a10292617d5976519dc5757cb802204b03c5bc47e826bdb27328abd38d3056d77476b2130f3df6ec4891af08ba1e29", - "result" : "valid" + "msg": "35393234353233373434", + "sig": "3045022100e69b5238265ea35d77e4dd172288d8cea19810a10292617d5976519dc5757cb802204b03c5bc47e826bdb27328abd38d3056d77476b2130f3df6ec4891af08ba1e29", + "result": "valid" }, { - "tcId" : 338, - "comment" : "special case hash", - "flags" : [ + "tcId": 338, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "31343935353836363231", - "sig" : "304402205f9d7d7c870d085fc1d49fff69e4a275812800d2cf8973e7325866cb40fa2b6f02206d1f5491d9f717a597a15fd540406486d76a44697b3f0d9d6dcef6669f8a0a56", - "result" : "valid" + "msg": "31343935353836363231", + "sig": "304402205f9d7d7c870d085fc1d49fff69e4a275812800d2cf8973e7325866cb40fa2b6f02206d1f5491d9f717a597a15fd540406486d76a44697b3f0d9d6dcef6669f8a0a56", + "result": "valid" }, { - "tcId" : 339, - "comment" : "special case hash", - "flags" : [ + "tcId": 339, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "34303035333134343036", - "sig" : "304402200a7d5b1959f71df9f817146ee49bd5c89b431e7993e2fdecab6858957da685ae02200f8aad2d254690bdc13f34a4fec44a02fd745a422df05ccbb54635a8b86b9609", - "result" : "valid" + "msg": "34303035333134343036", + "sig": "304402200a7d5b1959f71df9f817146ee49bd5c89b431e7993e2fdecab6858957da685ae02200f8aad2d254690bdc13f34a4fec44a02fd745a422df05ccbb54635a8b86b9609", + "result": "valid" }, { - "tcId" : 340, - "comment" : "special case hash", - "flags" : [ + "tcId": 340, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "33303936343537353132", - "sig" : "3044022079e88bf576b74bc07ca142395fda28f03d3d5e640b0b4ff0752c6d94cd553408022032cea05bd2d706c8f6036a507e2ab7766004f0904e2e5c5862749c0073245d6a", - "result" : "valid" + "msg": "33303936343537353132", + "sig": "3044022079e88bf576b74bc07ca142395fda28f03d3d5e640b0b4ff0752c6d94cd553408022032cea05bd2d706c8f6036a507e2ab7766004f0904e2e5c5862749c0073245d6a", + "result": "valid" }, { - "tcId" : 341, - "comment" : "special case hash", - "flags" : [ + "tcId": 341, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "32373834303235363230", - "sig" : "30450221009d54e037a00212b377bc8874798b8da080564bbdf7e07591b861285809d01488022018b4e557667a82bd95965f0706f81a29243fbdd86968a7ebeb43069db3b18c7f", - "result" : "valid" + "msg": "32373834303235363230", + "sig": "30450221009d54e037a00212b377bc8874798b8da080564bbdf7e07591b861285809d01488022018b4e557667a82bd95965f0706f81a29243fbdd86968a7ebeb43069db3b18c7f", + "result": "valid" }, { - "tcId" : 342, - "comment" : "special case hash", - "flags" : [ + "tcId": 342, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "32363138373837343138", - "sig" : "304402202664f1ffa982fedbcc7cab1b8bc6e2cb420218d2a6077ad08e591ba9feab33bd022049f5c7cb515e83872a3d41b4cdb85f242ad9d61a5bfc01debfbb52c6c84ba728", - "result" : "valid" + "msg": "32363138373837343138", + "sig": "304402202664f1ffa982fedbcc7cab1b8bc6e2cb420218d2a6077ad08e591ba9feab33bd022049f5c7cb515e83872a3d41b4cdb85f242ad9d61a5bfc01debfbb52c6c84ba728", + "result": "valid" }, { - "tcId" : 343, - "comment" : "special case hash", - "flags" : [ + "tcId": 343, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "31363432363235323632", - "sig" : "304402205827518344844fd6a7de73cbb0a6befdea7b13d2dee4475317f0f18ffc81524b02204f5ccb4e0b488b5a5d760aacddb2d791970fe43da61eb30e2e90208a817e46db", - "result" : "valid" + "msg": "31363432363235323632", + "sig": "304402205827518344844fd6a7de73cbb0a6befdea7b13d2dee4475317f0f18ffc81524b02204f5ccb4e0b488b5a5d760aacddb2d791970fe43da61eb30e2e90208a817e46db", + "result": "valid" }, { - "tcId" : 344, - "comment" : "special case hash", - "flags" : [ + "tcId": 344, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "36383234313839343336", - "sig" : "304502210097ab19bd139cac319325869218b1bce111875d63fb12098a04b0cd59b6fdd3a30220431d9cea3a243847303cebda56476431d034339f31d785ee8852db4f040d4921", - "result" : "valid" + "msg": "36383234313839343336", + "sig": "304502210097ab19bd139cac319325869218b1bce111875d63fb12098a04b0cd59b6fdd3a30220431d9cea3a243847303cebda56476431d034339f31d785ee8852db4f040d4921", + "result": "valid" }, { - "tcId" : 345, - "comment" : "special case hash", - "flags" : [ + "tcId": 345, + "comment": "special case hash", + "flags": [ "SpecialCaseHash" ], - "msg" : "343834323435343235", - "sig" : "3044022052c683144e44119ae2013749d4964ef67509278f6d38ba869adcfa69970e123d02203479910167408f45bda420a626ec9c4ec711c1274be092198b4187c018b562ca", - "result" : "valid" + "msg": "343834323435343235", + "sig": "3044022052c683144e44119ae2013749d4964ef67509278f6d38ba869adcfa69970e123d02203479910167408f45bda420a626ec9c4ec711c1274be092198b4187c018b562ca", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "0407310f90a9eae149a08402f54194a0f7b4ac427bf8d9bd6c7681071dc47dc36226a6d37ac46d61fd600c0bf1bff87689ed117dda6b0e59318ae010a197a26ca0", - "wx" : "07310f90a9eae149a08402f54194a0f7b4ac427bf8d9bd6c7681071dc47dc362", - "wy" : "26a6d37ac46d61fd600c0bf1bff87689ed117dda6b0e59318ae010a197a26ca0" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "0407310f90a9eae149a08402f54194a0f7b4ac427bf8d9bd6c7681071dc47dc36226a6d37ac46d61fd600c0bf1bff87689ed117dda6b0e59318ae010a197a26ca0", + "wx": "07310f90a9eae149a08402f54194a0f7b4ac427bf8d9bd6c7681071dc47dc362", + "wy": "26a6d37ac46d61fd600c0bf1bff87689ed117dda6b0e59318ae010a197a26ca0" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000407310f90a9eae149a08402f54194a0f7b4ac427bf8d9bd6c7681071dc47dc36226a6d37ac46d61fd600c0bf1bff87689ed117dda6b0e59318ae010a197a26ca0", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEBzEPkKnq4UmghAL1QZSg97SsQnv42b1s\ndoEHHcR9w2ImptN6xG1h/WAMC/G/+HaJ7RF92msOWTGK4BChl6JsoA==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000407310f90a9eae149a08402f54194a0f7b4ac427bf8d9bd6c7681071dc47dc36226a6d37ac46d61fd600c0bf1bff87689ed117dda6b0e59318ae010a197a26ca0", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEBzEPkKnq4UmghAL1QZSg97SsQnv42b1s\ndoEHHcR9w2ImptN6xG1h/WAMC/G/+HaJ7RF92msOWTGK4BChl6JsoA==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 346, - "comment" : "k*G has a large x-coordinate", - "flags" : [ + "tcId": 346, + "comment": "k*G has a large x-coordinate", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "30160211014551231950b75fc4402da1722fc9baeb020103", - "result" : "valid" + "msg": "313233343030", + "sig": "30160211014551231950b75fc4402da1722fc9baeb020103", + "result": "valid" }, { - "tcId" : 347, - "comment" : "r too large", - "flags" : [ + "tcId": 347, + "comment": "r too large", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2c020103", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2c020103", + "result": "invalid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04bc97e7585eecad48e16683bc4091708e1a930c683fc47001d4b383594f2c4e22705989cf69daeadd4e4e4b8151ed888dfec20fb01728d89d56b3f38f2ae9c8c5", - "wx" : "00bc97e7585eecad48e16683bc4091708e1a930c683fc47001d4b383594f2c4e22", - "wy" : "705989cf69daeadd4e4e4b8151ed888dfec20fb01728d89d56b3f38f2ae9c8c5" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04bc97e7585eecad48e16683bc4091708e1a930c683fc47001d4b383594f2c4e22705989cf69daeadd4e4e4b8151ed888dfec20fb01728d89d56b3f38f2ae9c8c5", + "wx": "00bc97e7585eecad48e16683bc4091708e1a930c683fc47001d4b383594f2c4e22", + "wy": "705989cf69daeadd4e4e4b8151ed888dfec20fb01728d89d56b3f38f2ae9c8c5" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004bc97e7585eecad48e16683bc4091708e1a930c683fc47001d4b383594f2c4e22705989cf69daeadd4e4e4b8151ed888dfec20fb01728d89d56b3f38f2ae9c8c5", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEvJfnWF7srUjhZoO8QJFwjhqTDGg/xHAB\n1LODWU8sTiJwWYnPadrq3U5OS4FR7YiN/sIPsBco2J1Ws/OPKunIxQ==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004bc97e7585eecad48e16683bc4091708e1a930c683fc47001d4b383594f2c4e22705989cf69daeadd4e4e4b8151ed888dfec20fb01728d89d56b3f38f2ae9c8c5", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEvJfnWF7srUjhZoO8QJFwjhqTDGg/xHAB\n1LODWU8sTiJwWYnPadrq3U5OS4FR7YiN/sIPsBco2J1Ws/OPKunIxQ==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 348, - "comment" : "r,s are large", - "flags" : [ + "tcId": 348, + "comment": "r,s are large", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd036413f020103", - "result" : "valid" + "msg": "313233343030", + "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd036413f020103", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "0444ad339afbc21e9abf7b602a5ca535ea378135b6d10d81310bdd8293d1df3252b63ff7d0774770f8fe1d1722fa83acd02f434e4fc110a0cc8f6dddd37d56c463", - "wx" : "44ad339afbc21e9abf7b602a5ca535ea378135b6d10d81310bdd8293d1df3252", - "wy" : "00b63ff7d0774770f8fe1d1722fa83acd02f434e4fc110a0cc8f6dddd37d56c463" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "0444ad339afbc21e9abf7b602a5ca535ea378135b6d10d81310bdd8293d1df3252b63ff7d0774770f8fe1d1722fa83acd02f434e4fc110a0cc8f6dddd37d56c463", + "wx": "44ad339afbc21e9abf7b602a5ca535ea378135b6d10d81310bdd8293d1df3252", + "wy": "00b63ff7d0774770f8fe1d1722fa83acd02f434e4fc110a0cc8f6dddd37d56c463" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000444ad339afbc21e9abf7b602a5ca535ea378135b6d10d81310bdd8293d1df3252b63ff7d0774770f8fe1d1722fa83acd02f434e4fc110a0cc8f6dddd37d56c463", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAERK0zmvvCHpq/e2AqXKU16jeBNbbRDYEx\nC92Ck9HfMlK2P/fQd0dw+P4dFyL6g6zQL0NOT8EQoMyPbd3TfVbEYw==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000444ad339afbc21e9abf7b602a5ca535ea378135b6d10d81310bdd8293d1df3252b63ff7d0774770f8fe1d1722fa83acd02f434e4fc110a0cc8f6dddd37d56c463", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAERK0zmvvCHpq/e2AqXKU16jeBNbbRDYEx\nC92Ck9HfMlK2P/fQd0dw+P4dFyL6g6zQL0NOT8EQoMyPbd3TfVbEYw==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 349, - "comment" : "r and s^-1 have a large Hamming weight", - "flags" : [ + "tcId": 349, + "comment": "r and s^-1 have a large Hamming weight", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02203e9a7582886089c62fb840cf3b83061cd1cff3ae4341808bb5bdee6191174177", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02203e9a7582886089c62fb840cf3b83061cd1cff3ae4341808bb5bdee6191174177", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "041260c2122c9e244e1af5151bede0c3ae23b54d7c596881d3eebad21f37dd878c5c9a0c1a9ade76737a8811bd6a7f9287c978ee396aa89c11e47229d2ccb552f0", - "wx" : "1260c2122c9e244e1af5151bede0c3ae23b54d7c596881d3eebad21f37dd878c", - "wy" : "5c9a0c1a9ade76737a8811bd6a7f9287c978ee396aa89c11e47229d2ccb552f0" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "041260c2122c9e244e1af5151bede0c3ae23b54d7c596881d3eebad21f37dd878c5c9a0c1a9ade76737a8811bd6a7f9287c978ee396aa89c11e47229d2ccb552f0", + "wx": "1260c2122c9e244e1af5151bede0c3ae23b54d7c596881d3eebad21f37dd878c", + "wy": "5c9a0c1a9ade76737a8811bd6a7f9287c978ee396aa89c11e47229d2ccb552f0" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200041260c2122c9e244e1af5151bede0c3ae23b54d7c596881d3eebad21f37dd878c5c9a0c1a9ade76737a8811bd6a7f9287c978ee396aa89c11e47229d2ccb552f0", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEEmDCEiyeJE4a9RUb7eDDriO1TXxZaIHT\n7rrSHzfdh4xcmgwamt52c3qIEb1qf5KHyXjuOWqonBHkcinSzLVS8A==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200041260c2122c9e244e1af5151bede0c3ae23b54d7c596881d3eebad21f37dd878c5c9a0c1a9ade76737a8811bd6a7f9287c978ee396aa89c11e47229d2ccb552f0", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEEmDCEiyeJE4a9RUb7eDDriO1TXxZaIHT\n7rrSHzfdh4xcmgwamt52c3qIEb1qf5KHyXjuOWqonBHkcinSzLVS8A==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 350, - "comment" : "r and s^-1 have a large Hamming weight", - "flags" : [ + "tcId": 350, + "comment": "r and s^-1 have a large Hamming weight", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022024238e70b431b1a64efdf9032669939d4b77f249503fc6905feb7540dea3e6d2", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022024238e70b431b1a64efdf9032669939d4b77f249503fc6905feb7540dea3e6d2", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "041877045be25d34a1d0600f9d5c00d0645a2a54379b6ceefad2e6bf5c2a3352ce821a532cc1751ee1d36d41c3d6ab4e9b143e44ec46d73478ea6a79a5c0e54159", - "wx" : "1877045be25d34a1d0600f9d5c00d0645a2a54379b6ceefad2e6bf5c2a3352ce", - "wy" : "00821a532cc1751ee1d36d41c3d6ab4e9b143e44ec46d73478ea6a79a5c0e54159" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "041877045be25d34a1d0600f9d5c00d0645a2a54379b6ceefad2e6bf5c2a3352ce821a532cc1751ee1d36d41c3d6ab4e9b143e44ec46d73478ea6a79a5c0e54159", + "wx": "1877045be25d34a1d0600f9d5c00d0645a2a54379b6ceefad2e6bf5c2a3352ce", + "wy": "00821a532cc1751ee1d36d41c3d6ab4e9b143e44ec46d73478ea6a79a5c0e54159" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200041877045be25d34a1d0600f9d5c00d0645a2a54379b6ceefad2e6bf5c2a3352ce821a532cc1751ee1d36d41c3d6ab4e9b143e44ec46d73478ea6a79a5c0e54159", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEGHcEW+JdNKHQYA+dXADQZFoqVDebbO76\n0ua/XCozUs6CGlMswXUe4dNtQcPWq06bFD5E7EbXNHjqanmlwOVBWQ==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200041877045be25d34a1d0600f9d5c00d0645a2a54379b6ceefad2e6bf5c2a3352ce821a532cc1751ee1d36d41c3d6ab4e9b143e44ec46d73478ea6a79a5c0e54159", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEGHcEW+JdNKHQYA+dXADQZFoqVDebbO76\n0ua/XCozUs6CGlMswXUe4dNtQcPWq06bFD5E7EbXNHjqanmlwOVBWQ==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 351, - "comment" : "small r and s", - "flags" : [ + "tcId": 351, + "comment": "small r and s", + "flags": [ "SmallRandS", "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3006020101020101", - "result" : "valid" + "msg": "313233343030", + "sig": "3006020101020101", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04455439fcc3d2deeceddeaece60e7bd17304f36ebb602adf5a22e0b8f1db46a50aec38fb2baf221e9a8d1887c7bf6222dd1834634e77263315af6d23609d04f77", - "wx" : "455439fcc3d2deeceddeaece60e7bd17304f36ebb602adf5a22e0b8f1db46a50", - "wy" : "00aec38fb2baf221e9a8d1887c7bf6222dd1834634e77263315af6d23609d04f77" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04455439fcc3d2deeceddeaece60e7bd17304f36ebb602adf5a22e0b8f1db46a50aec38fb2baf221e9a8d1887c7bf6222dd1834634e77263315af6d23609d04f77", + "wx": "455439fcc3d2deeceddeaece60e7bd17304f36ebb602adf5a22e0b8f1db46a50", + "wy": "00aec38fb2baf221e9a8d1887c7bf6222dd1834634e77263315af6d23609d04f77" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004455439fcc3d2deeceddeaece60e7bd17304f36ebb602adf5a22e0b8f1db46a50aec38fb2baf221e9a8d1887c7bf6222dd1834634e77263315af6d23609d04f77", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAERVQ5/MPS3uzt3q7OYOe9FzBPNuu2Aq31\noi4Ljx20alCuw4+yuvIh6ajRiHx79iIt0YNGNOdyYzFa9tI2CdBPdw==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004455439fcc3d2deeceddeaece60e7bd17304f36ebb602adf5a22e0b8f1db46a50aec38fb2baf221e9a8d1887c7bf6222dd1834634e77263315af6d23609d04f77", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAERVQ5/MPS3uzt3q7OYOe9FzBPNuu2Aq31\noi4Ljx20alCuw4+yuvIh6ajRiHx79iIt0YNGNOdyYzFa9tI2CdBPdw==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 352, - "comment" : "small r and s", - "flags" : [ + "tcId": 352, + "comment": "small r and s", + "flags": [ "SmallRandS", "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3006020101020102", - "result" : "valid" + "msg": "313233343030", + "sig": "3006020101020102", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "042e1f466b024c0c3ace2437de09127fed04b706f94b19a21bb1c2acf35cece7180449ae3523d72534e964972cfd3b38af0bddd9619e5af223e4d1a40f34cf9f1d", - "wx" : "2e1f466b024c0c3ace2437de09127fed04b706f94b19a21bb1c2acf35cece718", - "wy" : "0449ae3523d72534e964972cfd3b38af0bddd9619e5af223e4d1a40f34cf9f1d" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "042e1f466b024c0c3ace2437de09127fed04b706f94b19a21bb1c2acf35cece7180449ae3523d72534e964972cfd3b38af0bddd9619e5af223e4d1a40f34cf9f1d", + "wx": "2e1f466b024c0c3ace2437de09127fed04b706f94b19a21bb1c2acf35cece718", + "wy": "0449ae3523d72534e964972cfd3b38af0bddd9619e5af223e4d1a40f34cf9f1d" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200042e1f466b024c0c3ace2437de09127fed04b706f94b19a21bb1c2acf35cece7180449ae3523d72534e964972cfd3b38af0bddd9619e5af223e4d1a40f34cf9f1d", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAELh9GawJMDDrOJDfeCRJ/7QS3BvlLGaIb\nscKs81zs5xgESa41I9clNOlklyz9OzivC93ZYZ5a8iPk0aQPNM+fHQ==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200042e1f466b024c0c3ace2437de09127fed04b706f94b19a21bb1c2acf35cece7180449ae3523d72534e964972cfd3b38af0bddd9619e5af223e4d1a40f34cf9f1d", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAELh9GawJMDDrOJDfeCRJ/7QS3BvlLGaIb\nscKs81zs5xgESa41I9clNOlklyz9OzivC93ZYZ5a8iPk0aQPNM+fHQ==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 353, - "comment" : "small r and s", - "flags" : [ + "tcId": 353, + "comment": "small r and s", + "flags": [ "SmallRandS", "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3006020101020103", - "result" : "valid" + "msg": "313233343030", + "sig": "3006020101020103", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "048e7abdbbd18de7452374c1879a1c3b01d13261e7d4571c3b47a1c76c55a2337326ed897cd517a4f5349db809780f6d2f2b9f6299d8b5a89077f1119a718fd7b3", - "wx" : "008e7abdbbd18de7452374c1879a1c3b01d13261e7d4571c3b47a1c76c55a23373", - "wy" : "26ed897cd517a4f5349db809780f6d2f2b9f6299d8b5a89077f1119a718fd7b3" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "048e7abdbbd18de7452374c1879a1c3b01d13261e7d4571c3b47a1c76c55a2337326ed897cd517a4f5349db809780f6d2f2b9f6299d8b5a89077f1119a718fd7b3", + "wx": "008e7abdbbd18de7452374c1879a1c3b01d13261e7d4571c3b47a1c76c55a23373", + "wy": "26ed897cd517a4f5349db809780f6d2f2b9f6299d8b5a89077f1119a718fd7b3" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200048e7abdbbd18de7452374c1879a1c3b01d13261e7d4571c3b47a1c76c55a2337326ed897cd517a4f5349db809780f6d2f2b9f6299d8b5a89077f1119a718fd7b3", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEjnq9u9GN50UjdMGHmhw7AdEyYefUVxw7\nR6HHbFWiM3Mm7Yl81Rek9TSduAl4D20vK59imdi1qJB38RGacY/Xsw==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200048e7abdbbd18de7452374c1879a1c3b01d13261e7d4571c3b47a1c76c55a2337326ed897cd517a4f5349db809780f6d2f2b9f6299d8b5a89077f1119a718fd7b3", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEjnq9u9GN50UjdMGHmhw7AdEyYefUVxw7\nR6HHbFWiM3Mm7Yl81Rek9TSduAl4D20vK59imdi1qJB38RGacY/Xsw==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 354, - "comment" : "small r and s", - "flags" : [ + "tcId": 354, + "comment": "small r and s", + "flags": [ "SmallRandS", "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3006020102020101", - "result" : "valid" + "msg": "313233343030", + "sig": "3006020102020101", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "047b333d4340d3d718dd3e6aff7de7bbf8b72bfd616c8420056052842376b9af1942117c5afeac755d6f376fc6329a7d76051b87123a4a5d0bc4a539380f03de7b", - "wx" : "7b333d4340d3d718dd3e6aff7de7bbf8b72bfd616c8420056052842376b9af19", - "wy" : "42117c5afeac755d6f376fc6329a7d76051b87123a4a5d0bc4a539380f03de7b" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "047b333d4340d3d718dd3e6aff7de7bbf8b72bfd616c8420056052842376b9af1942117c5afeac755d6f376fc6329a7d76051b87123a4a5d0bc4a539380f03de7b", + "wx": "7b333d4340d3d718dd3e6aff7de7bbf8b72bfd616c8420056052842376b9af19", + "wy": "42117c5afeac755d6f376fc6329a7d76051b87123a4a5d0bc4a539380f03de7b" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200047b333d4340d3d718dd3e6aff7de7bbf8b72bfd616c8420056052842376b9af1942117c5afeac755d6f376fc6329a7d76051b87123a4a5d0bc4a539380f03de7b", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEezM9Q0DT1xjdPmr/fee7+Lcr/WFshCAF\nYFKEI3a5rxlCEXxa/qx1XW83b8Yymn12BRuHEjpKXQvEpTk4DwPeew==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200047b333d4340d3d718dd3e6aff7de7bbf8b72bfd616c8420056052842376b9af1942117c5afeac755d6f376fc6329a7d76051b87123a4a5d0bc4a539380f03de7b", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEezM9Q0DT1xjdPmr/fee7+Lcr/WFshCAF\nYFKEI3a5rxlCEXxa/qx1XW83b8Yymn12BRuHEjpKXQvEpTk4DwPeew==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 355, - "comment" : "small r and s", - "flags" : [ + "tcId": 355, + "comment": "small r and s", + "flags": [ "SmallRandS", "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3006020102020102", - "result" : "valid" + "msg": "313233343030", + "sig": "3006020102020102", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04d30ca4a0ddb6616c851d30ced682c40f83c62758a1f2759988d6763a88f1c0e503a80d5415650d41239784e8e2fb1235e9fe991d112ebb81186cbf0da2de3aff", - "wx" : "00d30ca4a0ddb6616c851d30ced682c40f83c62758a1f2759988d6763a88f1c0e5", - "wy" : "03a80d5415650d41239784e8e2fb1235e9fe991d112ebb81186cbf0da2de3aff" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04d30ca4a0ddb6616c851d30ced682c40f83c62758a1f2759988d6763a88f1c0e503a80d5415650d41239784e8e2fb1235e9fe991d112ebb81186cbf0da2de3aff", + "wx": "00d30ca4a0ddb6616c851d30ced682c40f83c62758a1f2759988d6763a88f1c0e5", + "wy": "03a80d5415650d41239784e8e2fb1235e9fe991d112ebb81186cbf0da2de3aff" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004d30ca4a0ddb6616c851d30ced682c40f83c62758a1f2759988d6763a88f1c0e503a80d5415650d41239784e8e2fb1235e9fe991d112ebb81186cbf0da2de3aff", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE0wykoN22YWyFHTDO1oLED4PGJ1ih8nWZ\niNZ2OojxwOUDqA1UFWUNQSOXhOji+xI16f6ZHREuu4EYbL8Not46/w==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004d30ca4a0ddb6616c851d30ced682c40f83c62758a1f2759988d6763a88f1c0e503a80d5415650d41239784e8e2fb1235e9fe991d112ebb81186cbf0da2de3aff", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE0wykoN22YWyFHTDO1oLED4PGJ1ih8nWZ\niNZ2OojxwOUDqA1UFWUNQSOXhOji+xI16f6ZHREuu4EYbL8Not46/w==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 356, - "comment" : "small r and s", - "flags" : [ + "tcId": 356, + "comment": "small r and s", + "flags": [ "SmallRandS", "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3006020102020103", - "result" : "valid" + "msg": "313233343030", + "sig": "3006020102020103", + "result": "valid" }, { - "tcId" : 357, - "comment" : "r is larger than n", - "flags" : [ + "tcId": 357, + "comment": "r is larger than n", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364143020103", - "result" : "invalid" + "msg": "313233343030", + "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364143020103", + "result": "invalid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "0448969b39991297b332a652d3ee6e01e909b39904e71fa2354a7830c7750baf24b4012d1b830d199ccb1fc972b32bfded55f09cd62d257e5e844e27e57a1594ec", - "wx" : "48969b39991297b332a652d3ee6e01e909b39904e71fa2354a7830c7750baf24", - "wy" : "00b4012d1b830d199ccb1fc972b32bfded55f09cd62d257e5e844e27e57a1594ec" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "0448969b39991297b332a652d3ee6e01e909b39904e71fa2354a7830c7750baf24b4012d1b830d199ccb1fc972b32bfded55f09cd62d257e5e844e27e57a1594ec", + "wx": "48969b39991297b332a652d3ee6e01e909b39904e71fa2354a7830c7750baf24", + "wy": "00b4012d1b830d199ccb1fc972b32bfded55f09cd62d257e5e844e27e57a1594ec" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000448969b39991297b332a652d3ee6e01e909b39904e71fa2354a7830c7750baf24b4012d1b830d199ccb1fc972b32bfded55f09cd62d257e5e844e27e57a1594ec", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAESJabOZkSl7MyplLT7m4B6QmzmQTnH6I1\nSngwx3ULryS0AS0bgw0ZnMsfyXKzK/3tVfCc1i0lfl6ETiflehWU7A==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000448969b39991297b332a652d3ee6e01e909b39904e71fa2354a7830c7750baf24b4012d1b830d199ccb1fc972b32bfded55f09cd62d257e5e844e27e57a1594ec", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAESJabOZkSl7MyplLT7m4B6QmzmQTnH6I1\nSngwx3ULryS0AS0bgw0ZnMsfyXKzK/3tVfCc1i0lfl6ETiflehWU7A==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 358, - "comment" : "s is larger than n", - "flags" : [ + "tcId": 358, + "comment": "s is larger than n", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "30080201020203ed2979", - "result" : "invalid" + "msg": "313233343030", + "sig": "30080201020203ed2979", + "result": "invalid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "0402ef4d6d6cfd5a94f1d7784226e3e2a6c0a436c55839619f38fb4472b5f9ee777eb4acd4eebda5cd72875ffd2a2f26229c2dc6b46500919a432c86739f3ae866", - "wx" : "02ef4d6d6cfd5a94f1d7784226e3e2a6c0a436c55839619f38fb4472b5f9ee77", - "wy" : "7eb4acd4eebda5cd72875ffd2a2f26229c2dc6b46500919a432c86739f3ae866" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "0402ef4d6d6cfd5a94f1d7784226e3e2a6c0a436c55839619f38fb4472b5f9ee777eb4acd4eebda5cd72875ffd2a2f26229c2dc6b46500919a432c86739f3ae866", + "wx": "02ef4d6d6cfd5a94f1d7784226e3e2a6c0a436c55839619f38fb4472b5f9ee77", + "wy": "7eb4acd4eebda5cd72875ffd2a2f26229c2dc6b46500919a432c86739f3ae866" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000402ef4d6d6cfd5a94f1d7784226e3e2a6c0a436c55839619f38fb4472b5f9ee777eb4acd4eebda5cd72875ffd2a2f26229c2dc6b46500919a432c86739f3ae866", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEAu9NbWz9WpTx13hCJuPipsCkNsVYOWGf\nOPtEcrX57nd+tKzU7r2lzXKHX/0qLyYinC3GtGUAkZpDLIZznzroZg==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000402ef4d6d6cfd5a94f1d7784226e3e2a6c0a436c55839619f38fb4472b5f9ee777eb4acd4eebda5cd72875ffd2a2f26229c2dc6b46500919a432c86739f3ae866", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEAu9NbWz9WpTx13hCJuPipsCkNsVYOWGf\nOPtEcrX57nd+tKzU7r2lzXKHX/0qLyYinC3GtGUAkZpDLIZznzroZg==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 359, - "comment" : "small r and s^-1", - "flags" : [ + "tcId": 359, + "comment": "small r and s^-1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "30260202010102203a74e9d3a74e9d3a74e9d3a74e9d3a749f8ab3732a0a89604a09bce5b2916da4", - "result" : "valid" + "msg": "313233343030", + "sig": "30260202010102203a74e9d3a74e9d3a74e9d3a74e9d3a749f8ab3732a0a89604a09bce5b2916da4", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04464f4ff715729cae5072ca3bd801d3195b67aec65e9b01aad20a2943dcbcb584b1afd29d31a39a11d570aa1597439b3b2d1971bf2f1abf15432d0207b10d1d08", - "wx" : "464f4ff715729cae5072ca3bd801d3195b67aec65e9b01aad20a2943dcbcb584", - "wy" : "00b1afd29d31a39a11d570aa1597439b3b2d1971bf2f1abf15432d0207b10d1d08" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04464f4ff715729cae5072ca3bd801d3195b67aec65e9b01aad20a2943dcbcb584b1afd29d31a39a11d570aa1597439b3b2d1971bf2f1abf15432d0207b10d1d08", + "wx": "464f4ff715729cae5072ca3bd801d3195b67aec65e9b01aad20a2943dcbcb584", + "wy": "00b1afd29d31a39a11d570aa1597439b3b2d1971bf2f1abf15432d0207b10d1d08" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004464f4ff715729cae5072ca3bd801d3195b67aec65e9b01aad20a2943dcbcb584b1afd29d31a39a11d570aa1597439b3b2d1971bf2f1abf15432d0207b10d1d08", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAERk9P9xVynK5Qcso72AHTGVtnrsZemwGq\n0gopQ9y8tYSxr9KdMaOaEdVwqhWXQ5s7LRlxvy8avxVDLQIHsQ0dCA==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004464f4ff715729cae5072ca3bd801d3195b67aec65e9b01aad20a2943dcbcb584b1afd29d31a39a11d570aa1597439b3b2d1971bf2f1abf15432d0207b10d1d08", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAERk9P9xVynK5Qcso72AHTGVtnrsZemwGq\n0gopQ9y8tYSxr9KdMaOaEdVwqhWXQ5s7LRlxvy8avxVDLQIHsQ0dCA==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 360, - "comment" : "smallish r and s^-1", - "flags" : [ + "tcId": 360, + "comment": "smallish r and s^-1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "302b02072d9b4d347952cc02200343aefc2f25d98b882e86eb9e30d55a6eb508b516510b34024ae4b6362330b3", - "result" : "valid" + "msg": "313233343030", + "sig": "302b02072d9b4d347952cc02200343aefc2f25d98b882e86eb9e30d55a6eb508b516510b34024ae4b6362330b3", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04157f8fddf373eb5f49cfcf10d8b853cf91cbcd7d665c3522ba7dd738ddb79a4cdeadf1a5c448ea3c9f4191a8999abfcc757ac6d64567ef072c47fec613443b8f", - "wx" : "157f8fddf373eb5f49cfcf10d8b853cf91cbcd7d665c3522ba7dd738ddb79a4c", - "wy" : "00deadf1a5c448ea3c9f4191a8999abfcc757ac6d64567ef072c47fec613443b8f" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04157f8fddf373eb5f49cfcf10d8b853cf91cbcd7d665c3522ba7dd738ddb79a4cdeadf1a5c448ea3c9f4191a8999abfcc757ac6d64567ef072c47fec613443b8f", + "wx": "157f8fddf373eb5f49cfcf10d8b853cf91cbcd7d665c3522ba7dd738ddb79a4c", + "wy": "00deadf1a5c448ea3c9f4191a8999abfcc757ac6d64567ef072c47fec613443b8f" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004157f8fddf373eb5f49cfcf10d8b853cf91cbcd7d665c3522ba7dd738ddb79a4cdeadf1a5c448ea3c9f4191a8999abfcc757ac6d64567ef072c47fec613443b8f", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEFX+P3fNz619Jz88Q2LhTz5HLzX1mXDUi\nun3XON23mkzerfGlxEjqPJ9BkaiZmr/MdXrG1kVn7wcsR/7GE0Q7jw==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004157f8fddf373eb5f49cfcf10d8b853cf91cbcd7d665c3522ba7dd738ddb79a4cdeadf1a5c448ea3c9f4191a8999abfcc757ac6d64567ef072c47fec613443b8f", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEFX+P3fNz619Jz88Q2LhTz5HLzX1mXDUi\nun3XON23mkzerfGlxEjqPJ9BkaiZmr/MdXrG1kVn7wcsR/7GE0Q7jw==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 361, - "comment" : "100-bit r and small s^-1", - "flags" : [ + "tcId": 361, + "comment": "100-bit r and small s^-1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3031020d1033e67e37b32b445580bf4efc02206f906f906f906f906f906f906f906f8fe1cab5eefdb214061dce3b22789f1d6f", - "result" : "valid" + "msg": "313233343030", + "sig": "3031020d1033e67e37b32b445580bf4efc02206f906f906f906f906f906f906f906f8fe1cab5eefdb214061dce3b22789f1d6f", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "040934a537466c07430e2c48feb990bb19fb78cecc9cee424ea4d130291aa237f0d4f92d23b462804b5b68c52558c01c9996dbf727fccabbeedb9621a400535afa", - "wx" : "0934a537466c07430e2c48feb990bb19fb78cecc9cee424ea4d130291aa237f0", - "wy" : "00d4f92d23b462804b5b68c52558c01c9996dbf727fccabbeedb9621a400535afa" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "040934a537466c07430e2c48feb990bb19fb78cecc9cee424ea4d130291aa237f0d4f92d23b462804b5b68c52558c01c9996dbf727fccabbeedb9621a400535afa", + "wx": "0934a537466c07430e2c48feb990bb19fb78cecc9cee424ea4d130291aa237f0", + "wy": "00d4f92d23b462804b5b68c52558c01c9996dbf727fccabbeedb9621a400535afa" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200040934a537466c07430e2c48feb990bb19fb78cecc9cee424ea4d130291aa237f0d4f92d23b462804b5b68c52558c01c9996dbf727fccabbeedb9621a400535afa", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAECTSlN0ZsB0MOLEj+uZC7Gft4zsyc7kJO\npNEwKRqiN/DU+S0jtGKAS1toxSVYwByZltv3J/zKu+7bliGkAFNa+g==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200040934a537466c07430e2c48feb990bb19fb78cecc9cee424ea4d130291aa237f0d4f92d23b462804b5b68c52558c01c9996dbf727fccabbeedb9621a400535afa", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAECTSlN0ZsB0MOLEj+uZC7Gft4zsyc7kJO\npNEwKRqiN/DU+S0jtGKAS1toxSVYwByZltv3J/zKu+7bliGkAFNa+g==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 362, - "comment" : "small r and 100 bit s^-1", - "flags" : [ + "tcId": 362, + "comment": "small r and 100 bit s^-1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3026020201010220783266e90f43dafe5cd9b3b0be86de22f9de83677d0f50713a468ec72fcf5d57", - "result" : "valid" + "msg": "313233343030", + "sig": "3026020201010220783266e90f43dafe5cd9b3b0be86de22f9de83677d0f50713a468ec72fcf5d57", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04d6ef20be66c893f741a9bf90d9b74675d1c2a31296397acb3ef174fd0b300c654a0c95478ca00399162d7f0f2dc89efdc2b28a30fbabe285857295a4b0c4e265", - "wx" : "00d6ef20be66c893f741a9bf90d9b74675d1c2a31296397acb3ef174fd0b300c65", - "wy" : "4a0c95478ca00399162d7f0f2dc89efdc2b28a30fbabe285857295a4b0c4e265" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04d6ef20be66c893f741a9bf90d9b74675d1c2a31296397acb3ef174fd0b300c654a0c95478ca00399162d7f0f2dc89efdc2b28a30fbabe285857295a4b0c4e265", + "wx": "00d6ef20be66c893f741a9bf90d9b74675d1c2a31296397acb3ef174fd0b300c65", + "wy": "4a0c95478ca00399162d7f0f2dc89efdc2b28a30fbabe285857295a4b0c4e265" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004d6ef20be66c893f741a9bf90d9b74675d1c2a31296397acb3ef174fd0b300c654a0c95478ca00399162d7f0f2dc89efdc2b28a30fbabe285857295a4b0c4e265", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE1u8gvmbIk/dBqb+Q2bdGddHCoxKWOXrL\nPvF0/QswDGVKDJVHjKADmRYtfw8tyJ79wrKKMPur4oWFcpWksMTiZQ==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004d6ef20be66c893f741a9bf90d9b74675d1c2a31296397acb3ef174fd0b300c654a0c95478ca00399162d7f0f2dc89efdc2b28a30fbabe285857295a4b0c4e265", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE1u8gvmbIk/dBqb+Q2bdGddHCoxKWOXrL\nPvF0/QswDGVKDJVHjKADmRYtfw8tyJ79wrKKMPur4oWFcpWksMTiZQ==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 363, - "comment" : "100-bit r and s^-1", - "flags" : [ + "tcId": 363, + "comment": "100-bit r and s^-1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3031020d062522bbd3ecbe7c39e93e7c260220783266e90f43dafe5cd9b3b0be86de22f9de83677d0f50713a468ec72fcf5d57", - "result" : "valid" + "msg": "313233343030", + "sig": "3031020d062522bbd3ecbe7c39e93e7c260220783266e90f43dafe5cd9b3b0be86de22f9de83677d0f50713a468ec72fcf5d57", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04b7291d1404e0c0c07dab9372189f4bd58d2ceaa8d15ede544d9514545ba9ee0629c9a63d5e308769cc30ec276a410e6464a27eeafd9e599db10f053a4fe4a829", - "wx" : "00b7291d1404e0c0c07dab9372189f4bd58d2ceaa8d15ede544d9514545ba9ee06", - "wy" : "29c9a63d5e308769cc30ec276a410e6464a27eeafd9e599db10f053a4fe4a829" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04b7291d1404e0c0c07dab9372189f4bd58d2ceaa8d15ede544d9514545ba9ee0629c9a63d5e308769cc30ec276a410e6464a27eeafd9e599db10f053a4fe4a829", + "wx": "00b7291d1404e0c0c07dab9372189f4bd58d2ceaa8d15ede544d9514545ba9ee06", + "wy": "29c9a63d5e308769cc30ec276a410e6464a27eeafd9e599db10f053a4fe4a829" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004b7291d1404e0c0c07dab9372189f4bd58d2ceaa8d15ede544d9514545ba9ee0629c9a63d5e308769cc30ec276a410e6464a27eeafd9e599db10f053a4fe4a829", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEtykdFATgwMB9q5NyGJ9L1Y0s6qjRXt5U\nTZUUVFup7gYpyaY9XjCHacww7CdqQQ5kZKJ+6v2eWZ2xDwU6T+SoKQ==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004b7291d1404e0c0c07dab9372189f4bd58d2ceaa8d15ede544d9514545ba9ee0629c9a63d5e308769cc30ec276a410e6464a27eeafd9e599db10f053a4fe4a829", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEtykdFATgwMB9q5NyGJ9L1Y0s6qjRXt5U\nTZUUVFup7gYpyaY9XjCHacww7CdqQQ5kZKJ+6v2eWZ2xDwU6T+SoKQ==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 364, - "comment" : "r and s^-1 are close to n", - "flags" : [ + "tcId": 364, + "comment": "r and s^-1 are close to n", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3045022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03640c1022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c0", - "result" : "valid" + "msg": "313233343030", + "sig": "3045022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03640c1022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c0", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "046e28303305d642ccb923b722ea86b2a0bc8e3735ecb26e849b19c9f76b2fdbb8186e80d64d8cab164f5238f5318461bf89d4d96ee6544c816c7566947774e0f6", - "wx" : "6e28303305d642ccb923b722ea86b2a0bc8e3735ecb26e849b19c9f76b2fdbb8", - "wy" : "186e80d64d8cab164f5238f5318461bf89d4d96ee6544c816c7566947774e0f6" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "046e28303305d642ccb923b722ea86b2a0bc8e3735ecb26e849b19c9f76b2fdbb8186e80d64d8cab164f5238f5318461bf89d4d96ee6544c816c7566947774e0f6", + "wx": "6e28303305d642ccb923b722ea86b2a0bc8e3735ecb26e849b19c9f76b2fdbb8", + "wy": "186e80d64d8cab164f5238f5318461bf89d4d96ee6544c816c7566947774e0f6" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200046e28303305d642ccb923b722ea86b2a0bc8e3735ecb26e849b19c9f76b2fdbb8186e80d64d8cab164f5238f5318461bf89d4d96ee6544c816c7566947774e0f6", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEbigwMwXWQsy5I7ci6oayoLyONzXssm6E\nmxnJ92sv27gYboDWTYyrFk9SOPUxhGG/idTZbuZUTIFsdWaUd3Tg9g==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200046e28303305d642ccb923b722ea86b2a0bc8e3735ecb26e849b19c9f76b2fdbb8186e80d64d8cab164f5238f5318461bf89d4d96ee6544c816c7566947774e0f6", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEbigwMwXWQsy5I7ci6oayoLyONzXssm6E\nmxnJ92sv27gYboDWTYyrFk9SOPUxhGG/idTZbuZUTIFsdWaUd3Tg9g==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 365, - "comment" : "r and s are 64-bit integer", - "flags" : [ + "tcId": 365, + "comment": "r and s are 64-bit integer", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "30160209009c44febf31c3594d020900839ed28247c2b06b", - "result" : "valid" + "msg": "313233343030", + "sig": "30160209009c44febf31c3594d020900839ed28247c2b06b", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04375bda93f6af92fb5f8f4b1b5f0534e3bafab34cb7ad9fb9d0b722e4a5c302a9a00b9f387a5a396097aa2162fc5bbcf4a5263372f681c94da51e9799120990fd", - "wx" : "375bda93f6af92fb5f8f4b1b5f0534e3bafab34cb7ad9fb9d0b722e4a5c302a9", - "wy" : "00a00b9f387a5a396097aa2162fc5bbcf4a5263372f681c94da51e9799120990fd" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04375bda93f6af92fb5f8f4b1b5f0534e3bafab34cb7ad9fb9d0b722e4a5c302a9a00b9f387a5a396097aa2162fc5bbcf4a5263372f681c94da51e9799120990fd", + "wx": "375bda93f6af92fb5f8f4b1b5f0534e3bafab34cb7ad9fb9d0b722e4a5c302a9", + "wy": "00a00b9f387a5a396097aa2162fc5bbcf4a5263372f681c94da51e9799120990fd" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004375bda93f6af92fb5f8f4b1b5f0534e3bafab34cb7ad9fb9d0b722e4a5c302a9a00b9f387a5a396097aa2162fc5bbcf4a5263372f681c94da51e9799120990fd", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEN1vak/avkvtfj0sbXwU047r6s0y3rZ+5\n0Lci5KXDAqmgC584elo5YJeqIWL8W7z0pSYzcvaByU2lHpeZEgmQ/Q==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004375bda93f6af92fb5f8f4b1b5f0534e3bafab34cb7ad9fb9d0b722e4a5c302a9a00b9f387a5a396097aa2162fc5bbcf4a5263372f681c94da51e9799120990fd", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEN1vak/avkvtfj0sbXwU047r6s0y3rZ+5\n0Lci5KXDAqmgC584elo5YJeqIWL8W7z0pSYzcvaByU2lHpeZEgmQ/Q==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 366, - "comment" : "r and s are 100-bit integer", - "flags" : [ + "tcId": 366, + "comment": "r and s are 100-bit integer", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "301e020d09df8b682430beef6f5fd7c7cf020d0fd0a62e13778f4222a0d61c8a", - "result" : "valid" + "msg": "313233343030", + "sig": "301e020d09df8b682430beef6f5fd7c7cf020d0fd0a62e13778f4222a0d61c8a", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04d75b68216babe03ae257e94b4e3bf1c52f44e3df266d1524ff8c5ea69da73197da4bff9ed1c53f44917a67d7b978598e89df359e3d5913eaea24f3ae259abc44", - "wx" : "00d75b68216babe03ae257e94b4e3bf1c52f44e3df266d1524ff8c5ea69da73197", - "wy" : "00da4bff9ed1c53f44917a67d7b978598e89df359e3d5913eaea24f3ae259abc44" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04d75b68216babe03ae257e94b4e3bf1c52f44e3df266d1524ff8c5ea69da73197da4bff9ed1c53f44917a67d7b978598e89df359e3d5913eaea24f3ae259abc44", + "wx": "00d75b68216babe03ae257e94b4e3bf1c52f44e3df266d1524ff8c5ea69da73197", + "wy": "00da4bff9ed1c53f44917a67d7b978598e89df359e3d5913eaea24f3ae259abc44" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004d75b68216babe03ae257e94b4e3bf1c52f44e3df266d1524ff8c5ea69da73197da4bff9ed1c53f44917a67d7b978598e89df359e3d5913eaea24f3ae259abc44", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE11toIWur4DriV+lLTjvxxS9E498mbRUk\n/4xepp2nMZfaS/+e0cU/RJF6Z9e5eFmOid81nj1ZE+rqJPOuJZq8RA==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004d75b68216babe03ae257e94b4e3bf1c52f44e3df266d1524ff8c5ea69da73197da4bff9ed1c53f44917a67d7b978598e89df359e3d5913eaea24f3ae259abc44", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE11toIWur4DriV+lLTjvxxS9E498mbRUk\n/4xepp2nMZfaS/+e0cU/RJF6Z9e5eFmOid81nj1ZE+rqJPOuJZq8RA==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 367, - "comment" : "r and s are 128-bit integer", - "flags" : [ + "tcId": 367, + "comment": "r and s are 128-bit integer", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "30260211008a598e563a89f526c32ebec8de26367a02110084f633e2042630e99dd0f1e16f7a04bf", - "result" : "valid" + "msg": "313233343030", + "sig": "30260211008a598e563a89f526c32ebec8de26367a02110084f633e2042630e99dd0f1e16f7a04bf", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "0478bcda140aed23d430cb23c3dc0d01f423db134ee94a3a8cb483f2deac2ac653118114f6f33045d4e9ed9107085007bfbddf8f58fe7a1a2445d66a990045476e", - "wx" : "78bcda140aed23d430cb23c3dc0d01f423db134ee94a3a8cb483f2deac2ac653", - "wy" : "118114f6f33045d4e9ed9107085007bfbddf8f58fe7a1a2445d66a990045476e" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "0478bcda140aed23d430cb23c3dc0d01f423db134ee94a3a8cb483f2deac2ac653118114f6f33045d4e9ed9107085007bfbddf8f58fe7a1a2445d66a990045476e", + "wx": "78bcda140aed23d430cb23c3dc0d01f423db134ee94a3a8cb483f2deac2ac653", + "wy": "118114f6f33045d4e9ed9107085007bfbddf8f58fe7a1a2445d66a990045476e" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000478bcda140aed23d430cb23c3dc0d01f423db134ee94a3a8cb483f2deac2ac653118114f6f33045d4e9ed9107085007bfbddf8f58fe7a1a2445d66a990045476e", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEeLzaFArtI9QwyyPD3A0B9CPbE07pSjqM\ntIPy3qwqxlMRgRT28zBF1OntkQcIUAe/vd+PWP56GiRF1mqZAEVHbg==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000478bcda140aed23d430cb23c3dc0d01f423db134ee94a3a8cb483f2deac2ac653118114f6f33045d4e9ed9107085007bfbddf8f58fe7a1a2445d66a990045476e", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEeLzaFArtI9QwyyPD3A0B9CPbE07pSjqM\ntIPy3qwqxlMRgRT28zBF1OntkQcIUAe/vd+PWP56GiRF1mqZAEVHbg==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 368, - "comment" : "r and s are 160-bit integer", - "flags" : [ + "tcId": 368, + "comment": "r and s are 160-bit integer", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "302e021500aa6eeb5823f7fa31b466bb473797f0d0314c0bdf021500e2977c479e6d25703cebbc6bd561938cc9d1bfb9", - "result" : "valid" + "msg": "313233343030", + "sig": "302e021500aa6eeb5823f7fa31b466bb473797f0d0314c0bdf021500e2977c479e6d25703cebbc6bd561938cc9d1bfb9", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04bb79f61857f743bfa1b6e7111ce4094377256969e4e15159123d9548acc3be6c1f9d9f8860dcffd3eb36dd6c31ff2e7226c2009c4c94d8d7d2b5686bf7abd677", - "wx" : "00bb79f61857f743bfa1b6e7111ce4094377256969e4e15159123d9548acc3be6c", - "wy" : "1f9d9f8860dcffd3eb36dd6c31ff2e7226c2009c4c94d8d7d2b5686bf7abd677" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04bb79f61857f743bfa1b6e7111ce4094377256969e4e15159123d9548acc3be6c1f9d9f8860dcffd3eb36dd6c31ff2e7226c2009c4c94d8d7d2b5686bf7abd677", + "wx": "00bb79f61857f743bfa1b6e7111ce4094377256969e4e15159123d9548acc3be6c", + "wy": "1f9d9f8860dcffd3eb36dd6c31ff2e7226c2009c4c94d8d7d2b5686bf7abd677" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004bb79f61857f743bfa1b6e7111ce4094377256969e4e15159123d9548acc3be6c1f9d9f8860dcffd3eb36dd6c31ff2e7226c2009c4c94d8d7d2b5686bf7abd677", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEu3n2GFf3Q7+htucRHOQJQ3claWnk4VFZ\nEj2VSKzDvmwfnZ+IYNz/0+s23Wwx/y5yJsIAnEyU2NfStWhr96vWdw==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004bb79f61857f743bfa1b6e7111ce4094377256969e4e15159123d9548acc3be6c1f9d9f8860dcffd3eb36dd6c31ff2e7226c2009c4c94d8d7d2b5686bf7abd677", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEu3n2GFf3Q7+htucRHOQJQ3claWnk4VFZ\nEj2VSKzDvmwfnZ+IYNz/0+s23Wwx/y5yJsIAnEyU2NfStWhr96vWdw==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 369, - "comment" : "s == 1", - "flags" : [ + "tcId": 369, + "comment": "s == 1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3025022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c1020101", - "result" : "valid" + "msg": "313233343030", + "sig": "3025022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c1020101", + "result": "valid" }, { - "tcId" : 370, - "comment" : "s == 0", - "flags" : [ + "tcId": 370, + "comment": "s == 0", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3025022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c1020100", - "result" : "invalid" + "msg": "313233343030", + "sig": "3025022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c1020100", + "result": "invalid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "0493591827d9e6713b4e9faea62c72b28dfefa68e0c05160b5d6aae88fd2e36c36073f5545ad5af410af26afff68654cf72d45e493489311203247347a890f4518", - "wx" : "0093591827d9e6713b4e9faea62c72b28dfefa68e0c05160b5d6aae88fd2e36c36", - "wy" : "073f5545ad5af410af26afff68654cf72d45e493489311203247347a890f4518" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "0493591827d9e6713b4e9faea62c72b28dfefa68e0c05160b5d6aae88fd2e36c36073f5545ad5af410af26afff68654cf72d45e493489311203247347a890f4518", + "wx": "0093591827d9e6713b4e9faea62c72b28dfefa68e0c05160b5d6aae88fd2e36c36", + "wy": "073f5545ad5af410af26afff68654cf72d45e493489311203247347a890f4518" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000493591827d9e6713b4e9faea62c72b28dfefa68e0c05160b5d6aae88fd2e36c36073f5545ad5af410af26afff68654cf72d45e493489311203247347a890f4518", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEk1kYJ9nmcTtOn66mLHKyjf76aODAUWC1\n1qroj9LjbDYHP1VFrVr0EK8mr/9oZUz3LUXkk0iTESAyRzR6iQ9FGA==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000493591827d9e6713b4e9faea62c72b28dfefa68e0c05160b5d6aae88fd2e36c36073f5545ad5af410af26afff68654cf72d45e493489311203247347a890f4518", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEk1kYJ9nmcTtOn66mLHKyjf76aODAUWC1\n1qroj9LjbDYHP1VFrVr0EK8mr/9oZUz3LUXkk0iTESAyRzR6iQ9FGA==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 371, - "comment" : "edge case modular inverse", - "flags" : [ + "tcId": 371, + "comment": "edge case modular inverse", + "flags": [ "ModularInverse", "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c10220419d981c515af8cc82545aac0c85e9e308fbb2eab6acd7ed497e0b4145a18fd9", - "result" : "valid" + "msg": "313233343030", + "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c10220419d981c515af8cc82545aac0c85e9e308fbb2eab6acd7ed497e0b4145a18fd9", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "0431ed3081aefe001eb6402069ee2ccc1862937b85995144dba9503943587bf0dada01b8cc4df34f5ab3b1a359615208946e5ee35f98ee775b8ccecd86ccc1650f", - "wx" : "31ed3081aefe001eb6402069ee2ccc1862937b85995144dba9503943587bf0da", - "wy" : "00da01b8cc4df34f5ab3b1a359615208946e5ee35f98ee775b8ccecd86ccc1650f" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "0431ed3081aefe001eb6402069ee2ccc1862937b85995144dba9503943587bf0dada01b8cc4df34f5ab3b1a359615208946e5ee35f98ee775b8ccecd86ccc1650f", + "wx": "31ed3081aefe001eb6402069ee2ccc1862937b85995144dba9503943587bf0da", + "wy": "00da01b8cc4df34f5ab3b1a359615208946e5ee35f98ee775b8ccecd86ccc1650f" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000431ed3081aefe001eb6402069ee2ccc1862937b85995144dba9503943587bf0dada01b8cc4df34f5ab3b1a359615208946e5ee35f98ee775b8ccecd86ccc1650f", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEMe0wga7+AB62QCBp7izMGGKTe4WZUUTb\nqVA5Q1h78NraAbjMTfNPWrOxo1lhUgiUbl7jX5jud1uMzs2GzMFlDw==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000431ed3081aefe001eb6402069ee2ccc1862937b85995144dba9503943587bf0dada01b8cc4df34f5ab3b1a359615208946e5ee35f98ee775b8ccecd86ccc1650f", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEMe0wga7+AB62QCBp7izMGGKTe4WZUUTb\nqVA5Q1h78NraAbjMTfNPWrOxo1lhUgiUbl7jX5jud1uMzs2GzMFlDw==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 372, - "comment" : "edge case modular inverse", - "flags" : [ + "tcId": 372, + "comment": "edge case modular inverse", + "flags": [ "ModularInverse", "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102201b21717ad71d23bbac60a9ad0baf75b063c9fdf52a00ebf99d022172910993c9", - "result" : "valid" + "msg": "313233343030", + "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102201b21717ad71d23bbac60a9ad0baf75b063c9fdf52a00ebf99d022172910993c9", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "047dff66fa98509ff3e2e51045f4390523dccda43a3bc2885e58c248090990eea854c76c2b9adeb6bb571823e07fd7c65c8639cf9d905260064c8e7675ce6d98b4", - "wx" : "7dff66fa98509ff3e2e51045f4390523dccda43a3bc2885e58c248090990eea8", - "wy" : "54c76c2b9adeb6bb571823e07fd7c65c8639cf9d905260064c8e7675ce6d98b4" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "047dff66fa98509ff3e2e51045f4390523dccda43a3bc2885e58c248090990eea854c76c2b9adeb6bb571823e07fd7c65c8639cf9d905260064c8e7675ce6d98b4", + "wx": "7dff66fa98509ff3e2e51045f4390523dccda43a3bc2885e58c248090990eea8", + "wy": "54c76c2b9adeb6bb571823e07fd7c65c8639cf9d905260064c8e7675ce6d98b4" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200047dff66fa98509ff3e2e51045f4390523dccda43a3bc2885e58c248090990eea854c76c2b9adeb6bb571823e07fd7c65c8639cf9d905260064c8e7675ce6d98b4", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEff9m+phQn/Pi5RBF9DkFI9zNpDo7wohe\nWMJICQmQ7qhUx2wrmt62u1cYI+B/18ZchjnPnZBSYAZMjnZ1zm2YtA==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200047dff66fa98509ff3e2e51045f4390523dccda43a3bc2885e58c248090990eea854c76c2b9adeb6bb571823e07fd7c65c8639cf9d905260064c8e7675ce6d98b4", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEff9m+phQn/Pi5RBF9DkFI9zNpDo7wohe\nWMJICQmQ7qhUx2wrmt62u1cYI+B/18ZchjnPnZBSYAZMjnZ1zm2YtA==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 373, - "comment" : "edge case modular inverse", - "flags" : [ + "tcId": 373, + "comment": "edge case modular inverse", + "flags": [ "ModularInverse", "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102202f588f66018f3dd14db3e28e77996487e32486b521ed8e5a20f06591951777e9", - "result" : "valid" + "msg": "313233343030", + "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102202f588f66018f3dd14db3e28e77996487e32486b521ed8e5a20f06591951777e9", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "044280509aab64edfc0b4a2967e4cbce849cb544e4a77313c8e6ece579fbd7420a2e89fe5cc1927d554e6a3bb14033ea7c922cd75cba2c7415fdab52f20b1860f1", - "wx" : "4280509aab64edfc0b4a2967e4cbce849cb544e4a77313c8e6ece579fbd7420a", - "wy" : "2e89fe5cc1927d554e6a3bb14033ea7c922cd75cba2c7415fdab52f20b1860f1" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "044280509aab64edfc0b4a2967e4cbce849cb544e4a77313c8e6ece579fbd7420a2e89fe5cc1927d554e6a3bb14033ea7c922cd75cba2c7415fdab52f20b1860f1", + "wx": "4280509aab64edfc0b4a2967e4cbce849cb544e4a77313c8e6ece579fbd7420a", + "wy": "2e89fe5cc1927d554e6a3bb14033ea7c922cd75cba2c7415fdab52f20b1860f1" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200044280509aab64edfc0b4a2967e4cbce849cb544e4a77313c8e6ece579fbd7420a2e89fe5cc1927d554e6a3bb14033ea7c922cd75cba2c7415fdab52f20b1860f1", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEQoBQmqtk7fwLSiln5MvOhJy1ROSncxPI\n5uzlefvXQgouif5cwZJ9VU5qO7FAM+p8kizXXLosdBX9q1LyCxhg8Q==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200044280509aab64edfc0b4a2967e4cbce849cb544e4a77313c8e6ece579fbd7420a2e89fe5cc1927d554e6a3bb14033ea7c922cd75cba2c7415fdab52f20b1860f1", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEQoBQmqtk7fwLSiln5MvOhJy1ROSncxPI\n5uzlefvXQgouif5cwZJ9VU5qO7FAM+p8kizXXLosdBX9q1LyCxhg8Q==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 374, - "comment" : "edge case modular inverse", - "flags" : [ + "tcId": 374, + "comment": "edge case modular inverse", + "flags": [ "ModularInverse", "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c10220091a08870ff4daf9123b30c20e8c4fc8505758dcf4074fcaff2170c9bfcf74f4", - "result" : "valid" + "msg": "313233343030", + "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c10220091a08870ff4daf9123b30c20e8c4fc8505758dcf4074fcaff2170c9bfcf74f4", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "044f8df145194e3c4fc3eea26d43ce75b402d6b17472ddcbb254b8a79b0bf3d9cb2aa20d82844cb266344e71ca78f2ad27a75a09e5bc0fa57e4efd9d465a0888db", - "wx" : "4f8df145194e3c4fc3eea26d43ce75b402d6b17472ddcbb254b8a79b0bf3d9cb", - "wy" : "2aa20d82844cb266344e71ca78f2ad27a75a09e5bc0fa57e4efd9d465a0888db" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "044f8df145194e3c4fc3eea26d43ce75b402d6b17472ddcbb254b8a79b0bf3d9cb2aa20d82844cb266344e71ca78f2ad27a75a09e5bc0fa57e4efd9d465a0888db", + "wx": "4f8df145194e3c4fc3eea26d43ce75b402d6b17472ddcbb254b8a79b0bf3d9cb", + "wy": "2aa20d82844cb266344e71ca78f2ad27a75a09e5bc0fa57e4efd9d465a0888db" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200044f8df145194e3c4fc3eea26d43ce75b402d6b17472ddcbb254b8a79b0bf3d9cb2aa20d82844cb266344e71ca78f2ad27a75a09e5bc0fa57e4efd9d465a0888db", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAET43xRRlOPE/D7qJtQ851tALWsXRy3cuy\nVLinmwvz2csqog2ChEyyZjROccp48q0np1oJ5bwPpX5O/Z1GWgiI2w==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200044f8df145194e3c4fc3eea26d43ce75b402d6b17472ddcbb254b8a79b0bf3d9cb2aa20d82844cb266344e71ca78f2ad27a75a09e5bc0fa57e4efd9d465a0888db", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAET43xRRlOPE/D7qJtQ851tALWsXRy3cuy\nVLinmwvz2csqog2ChEyyZjROccp48q0np1oJ5bwPpX5O/Z1GWgiI2w==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 375, - "comment" : "edge case modular inverse", - "flags" : [ + "tcId": 375, + "comment": "edge case modular inverse", + "flags": [ "ModularInverse", "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102207c370dc0ce8c59a8b273cba44a7c1191fc3186dc03cab96b0567312df0d0b250", - "result" : "valid" + "msg": "313233343030", + "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102207c370dc0ce8c59a8b273cba44a7c1191fc3186dc03cab96b0567312df0d0b250", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "049598a57dd67ec3e16b587a338aa3a10a3a3913b41a3af32e3ed3ff01358c6b14122819edf8074bbc521f7d4cdce82fef7a516706affba1d93d9dea9ccae1a207", - "wx" : "009598a57dd67ec3e16b587a338aa3a10a3a3913b41a3af32e3ed3ff01358c6b14", - "wy" : "122819edf8074bbc521f7d4cdce82fef7a516706affba1d93d9dea9ccae1a207" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "049598a57dd67ec3e16b587a338aa3a10a3a3913b41a3af32e3ed3ff01358c6b14122819edf8074bbc521f7d4cdce82fef7a516706affba1d93d9dea9ccae1a207", + "wx": "009598a57dd67ec3e16b587a338aa3a10a3a3913b41a3af32e3ed3ff01358c6b14", + "wy": "122819edf8074bbc521f7d4cdce82fef7a516706affba1d93d9dea9ccae1a207" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200049598a57dd67ec3e16b587a338aa3a10a3a3913b41a3af32e3ed3ff01358c6b14122819edf8074bbc521f7d4cdce82fef7a516706affba1d93d9dea9ccae1a207", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAElZilfdZ+w+FrWHoziqOhCjo5E7QaOvMu\nPtP/ATWMaxQSKBnt+AdLvFIffUzc6C/velFnBq/7odk9neqcyuGiBw==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200049598a57dd67ec3e16b587a338aa3a10a3a3913b41a3af32e3ed3ff01358c6b14122819edf8074bbc521f7d4cdce82fef7a516706affba1d93d9dea9ccae1a207", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAElZilfdZ+w+FrWHoziqOhCjo5E7QaOvMu\nPtP/ATWMaxQSKBnt+AdLvFIffUzc6C/velFnBq/7odk9neqcyuGiBw==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 376, - "comment" : "edge case modular inverse", - "flags" : [ + "tcId": 376, + "comment": "edge case modular inverse", + "flags": [ "ModularInverse", "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c1022070b59a7d1ee77a2f9e0491c2a7cfcd0ed04df4a35192f6132dcc668c79a6160e", - "result" : "valid" + "msg": "313233343030", + "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c1022070b59a7d1ee77a2f9e0491c2a7cfcd0ed04df4a35192f6132dcc668c79a6160e", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "049171fec3ca20806bc084f12f0760911b60990bd80e5b2a71ca03a048b20f837e634fd17863761b2958d2be4e149f8d3d7abbdc18be03f451ab6c17fa0a1f8330", - "wx" : "009171fec3ca20806bc084f12f0760911b60990bd80e5b2a71ca03a048b20f837e", - "wy" : "634fd17863761b2958d2be4e149f8d3d7abbdc18be03f451ab6c17fa0a1f8330" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "049171fec3ca20806bc084f12f0760911b60990bd80e5b2a71ca03a048b20f837e634fd17863761b2958d2be4e149f8d3d7abbdc18be03f451ab6c17fa0a1f8330", + "wx": "009171fec3ca20806bc084f12f0760911b60990bd80e5b2a71ca03a048b20f837e", + "wy": "634fd17863761b2958d2be4e149f8d3d7abbdc18be03f451ab6c17fa0a1f8330" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200049171fec3ca20806bc084f12f0760911b60990bd80e5b2a71ca03a048b20f837e634fd17863761b2958d2be4e149f8d3d7abbdc18be03f451ab6c17fa0a1f8330", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEkXH+w8oggGvAhPEvB2CRG2CZC9gOWypx\nygOgSLIPg35jT9F4Y3YbKVjSvk4Un409ervcGL4D9FGrbBf6Ch+DMA==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200049171fec3ca20806bc084f12f0760911b60990bd80e5b2a71ca03a048b20f837e634fd17863761b2958d2be4e149f8d3d7abbdc18be03f451ab6c17fa0a1f8330", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEkXH+w8oggGvAhPEvB2CRG2CZC9gOWypx\nygOgSLIPg35jT9F4Y3YbKVjSvk4Un409ervcGL4D9FGrbBf6Ch+DMA==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 377, - "comment" : "edge case modular inverse", - "flags" : [ + "tcId": 377, + "comment": "edge case modular inverse", + "flags": [ "ModularInverse", "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102202736d76e412246e097148e2bf62915614eb7c428913a58eb5e9cd4674a9423de", - "result" : "valid" + "msg": "313233343030", + "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102202736d76e412246e097148e2bf62915614eb7c428913a58eb5e9cd4674a9423de", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04777c8930b6e1d271100fe68ce93f163fa37612c5fff67f4a62fc3bafaf3d17a9ed73d86f60a51b5ed91353a3b054edc0aa92c9ebcbd0b75d188fdc882791d68d", - "wx" : "777c8930b6e1d271100fe68ce93f163fa37612c5fff67f4a62fc3bafaf3d17a9", - "wy" : "00ed73d86f60a51b5ed91353a3b054edc0aa92c9ebcbd0b75d188fdc882791d68d" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04777c8930b6e1d271100fe68ce93f163fa37612c5fff67f4a62fc3bafaf3d17a9ed73d86f60a51b5ed91353a3b054edc0aa92c9ebcbd0b75d188fdc882791d68d", + "wx": "777c8930b6e1d271100fe68ce93f163fa37612c5fff67f4a62fc3bafaf3d17a9", + "wy": "00ed73d86f60a51b5ed91353a3b054edc0aa92c9ebcbd0b75d188fdc882791d68d" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004777c8930b6e1d271100fe68ce93f163fa37612c5fff67f4a62fc3bafaf3d17a9ed73d86f60a51b5ed91353a3b054edc0aa92c9ebcbd0b75d188fdc882791d68d", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEd3yJMLbh0nEQD+aM6T8WP6N2EsX/9n9K\nYvw7r689F6ntc9hvYKUbXtkTU6OwVO3AqpLJ68vQt10Yj9yIJ5HWjQ==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004777c8930b6e1d271100fe68ce93f163fa37612c5fff67f4a62fc3bafaf3d17a9ed73d86f60a51b5ed91353a3b054edc0aa92c9ebcbd0b75d188fdc882791d68d", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEd3yJMLbh0nEQD+aM6T8WP6N2EsX/9n9K\nYvw7r689F6ntc9hvYKUbXtkTU6OwVO3AqpLJ68vQt10Yj9yIJ5HWjQ==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 378, - "comment" : "edge case modular inverse", - "flags" : [ + "tcId": 378, + "comment": "edge case modular inverse", + "flags": [ "ModularInverse", "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102204a1e12831fbe93627b02d6e7f24bccdd6ef4b2d0f46739eaf3b1eaf0ca117770", - "result" : "valid" + "msg": "313233343030", + "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102204a1e12831fbe93627b02d6e7f24bccdd6ef4b2d0f46739eaf3b1eaf0ca117770", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04eabc248f626e0a63e1eb81c43d461a39a1dba881eb6ee2152b07c32d71bcf4700603caa8b9d33db13af44c6efbec8a198ed6124ac9eb17eaafd2824a545ec000", - "wx" : "00eabc248f626e0a63e1eb81c43d461a39a1dba881eb6ee2152b07c32d71bcf470", - "wy" : "0603caa8b9d33db13af44c6efbec8a198ed6124ac9eb17eaafd2824a545ec000" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04eabc248f626e0a63e1eb81c43d461a39a1dba881eb6ee2152b07c32d71bcf4700603caa8b9d33db13af44c6efbec8a198ed6124ac9eb17eaafd2824a545ec000", + "wx": "00eabc248f626e0a63e1eb81c43d461a39a1dba881eb6ee2152b07c32d71bcf470", + "wy": "0603caa8b9d33db13af44c6efbec8a198ed6124ac9eb17eaafd2824a545ec000" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004eabc248f626e0a63e1eb81c43d461a39a1dba881eb6ee2152b07c32d71bcf4700603caa8b9d33db13af44c6efbec8a198ed6124ac9eb17eaafd2824a545ec000", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE6rwkj2JuCmPh64HEPUYaOaHbqIHrbuIV\nKwfDLXG89HAGA8qoudM9sTr0TG777IoZjtYSSsnrF+qv0oJKVF7AAA==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004eabc248f626e0a63e1eb81c43d461a39a1dba881eb6ee2152b07c32d71bcf4700603caa8b9d33db13af44c6efbec8a198ed6124ac9eb17eaafd2824a545ec000", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE6rwkj2JuCmPh64HEPUYaOaHbqIHrbuIV\nKwfDLXG89HAGA8qoudM9sTr0TG777IoZjtYSSsnrF+qv0oJKVF7AAA==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 379, - "comment" : "edge case modular inverse", - "flags" : [ + "tcId": 379, + "comment": "edge case modular inverse", + "flags": [ "ModularInverse", "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c1022006c778d4dfff7dee06ed88bc4e0ed34fc553aad67caf796f2a1c6487c1b2e877", - "result" : "valid" + "msg": "313233343030", + "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c1022006c778d4dfff7dee06ed88bc4e0ed34fc553aad67caf796f2a1c6487c1b2e877", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "049f7a13ada158a55f9ddf1a45f044f073d9b80030efdcfc9f9f58418fbceaf001f8ada0175090f80d47227d6713b6740f9a0091d88a837d0a1cd77b58a8f28d73", - "wx" : "009f7a13ada158a55f9ddf1a45f044f073d9b80030efdcfc9f9f58418fbceaf001", - "wy" : "00f8ada0175090f80d47227d6713b6740f9a0091d88a837d0a1cd77b58a8f28d73" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "049f7a13ada158a55f9ddf1a45f044f073d9b80030efdcfc9f9f58418fbceaf001f8ada0175090f80d47227d6713b6740f9a0091d88a837d0a1cd77b58a8f28d73", + "wx": "009f7a13ada158a55f9ddf1a45f044f073d9b80030efdcfc9f9f58418fbceaf001", + "wy": "00f8ada0175090f80d47227d6713b6740f9a0091d88a837d0a1cd77b58a8f28d73" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200049f7a13ada158a55f9ddf1a45f044f073d9b80030efdcfc9f9f58418fbceaf001f8ada0175090f80d47227d6713b6740f9a0091d88a837d0a1cd77b58a8f28d73", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEn3oTraFYpV+d3xpF8ETwc9m4ADDv3Pyf\nn1hBj7zq8AH4raAXUJD4DUcifWcTtnQPmgCR2IqDfQoc13tYqPKNcw==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200049f7a13ada158a55f9ddf1a45f044f073d9b80030efdcfc9f9f58418fbceaf001f8ada0175090f80d47227d6713b6740f9a0091d88a837d0a1cd77b58a8f28d73", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEn3oTraFYpV+d3xpF8ETwc9m4ADDv3Pyf\nn1hBj7zq8AH4raAXUJD4DUcifWcTtnQPmgCR2IqDfQoc13tYqPKNcw==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 380, - "comment" : "edge case modular inverse", - "flags" : [ + "tcId": 380, + "comment": "edge case modular inverse", + "flags": [ "ModularInverse", "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102204de459ef9159afa057feb3ec40fef01c45b809f4ab296ea48c206d4249a2b451", - "result" : "valid" + "msg": "313233343030", + "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102204de459ef9159afa057feb3ec40fef01c45b809f4ab296ea48c206d4249a2b451", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "0411c4f3e461cd019b5c06ea0cea4c4090c3cc3e3c5d9f3c6d65b436826da9b4dbbbeb7a77e4cbfda207097c43423705f72c80476da3dac40a483b0ab0f2ead1cb", - "wx" : "11c4f3e461cd019b5c06ea0cea4c4090c3cc3e3c5d9f3c6d65b436826da9b4db", - "wy" : "00bbeb7a77e4cbfda207097c43423705f72c80476da3dac40a483b0ab0f2ead1cb" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "0411c4f3e461cd019b5c06ea0cea4c4090c3cc3e3c5d9f3c6d65b436826da9b4dbbbeb7a77e4cbfda207097c43423705f72c80476da3dac40a483b0ab0f2ead1cb", + "wx": "11c4f3e461cd019b5c06ea0cea4c4090c3cc3e3c5d9f3c6d65b436826da9b4db", + "wy": "00bbeb7a77e4cbfda207097c43423705f72c80476da3dac40a483b0ab0f2ead1cb" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000411c4f3e461cd019b5c06ea0cea4c4090c3cc3e3c5d9f3c6d65b436826da9b4dbbbeb7a77e4cbfda207097c43423705f72c80476da3dac40a483b0ab0f2ead1cb", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEEcTz5GHNAZtcBuoM6kxAkMPMPjxdnzxt\nZbQ2gm2ptNu763p35Mv9ogcJfENCNwX3LIBHbaPaxApIOwqw8urRyw==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000411c4f3e461cd019b5c06ea0cea4c4090c3cc3e3c5d9f3c6d65b436826da9b4dbbbeb7a77e4cbfda207097c43423705f72c80476da3dac40a483b0ab0f2ead1cb", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEEcTz5GHNAZtcBuoM6kxAkMPMPjxdnzxt\nZbQ2gm2ptNu763p35Mv9ogcJfENCNwX3LIBHbaPaxApIOwqw8urRyw==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 381, - "comment" : "edge case modular inverse", - "flags" : [ + "tcId": 381, + "comment": "edge case modular inverse", + "flags": [ "ModularInverse", "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c10220745d294978007302033502e1acc48b63ae6500be43adbea1b258d6b423dbb416", - "result" : "valid" + "msg": "313233343030", + "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c10220745d294978007302033502e1acc48b63ae6500be43adbea1b258d6b423dbb416", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04e2e18682d53123aa01a6c5d00b0c623d671b462ea80bddd65227fd5105988aa4161907b3fd25044a949ea41c8e2ea8459dc6f1654856b8b61b31543bb1b45bdb", - "wx" : "00e2e18682d53123aa01a6c5d00b0c623d671b462ea80bddd65227fd5105988aa4", - "wy" : "161907b3fd25044a949ea41c8e2ea8459dc6f1654856b8b61b31543bb1b45bdb" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04e2e18682d53123aa01a6c5d00b0c623d671b462ea80bddd65227fd5105988aa4161907b3fd25044a949ea41c8e2ea8459dc6f1654856b8b61b31543bb1b45bdb", + "wx": "00e2e18682d53123aa01a6c5d00b0c623d671b462ea80bddd65227fd5105988aa4", + "wy": "161907b3fd25044a949ea41c8e2ea8459dc6f1654856b8b61b31543bb1b45bdb" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004e2e18682d53123aa01a6c5d00b0c623d671b462ea80bddd65227fd5105988aa4161907b3fd25044a949ea41c8e2ea8459dc6f1654856b8b61b31543bb1b45bdb", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE4uGGgtUxI6oBpsXQCwxiPWcbRi6oC93W\nUif9UQWYiqQWGQez/SUESpSepByOLqhFncbxZUhWuLYbMVQ7sbRb2w==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004e2e18682d53123aa01a6c5d00b0c623d671b462ea80bddd65227fd5105988aa4161907b3fd25044a949ea41c8e2ea8459dc6f1654856b8b61b31543bb1b45bdb", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE4uGGgtUxI6oBpsXQCwxiPWcbRi6oC93W\nUif9UQWYiqQWGQez/SUESpSepByOLqhFncbxZUhWuLYbMVQ7sbRb2w==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 382, - "comment" : "edge case modular inverse", - "flags" : [ + "tcId": 382, + "comment": "edge case modular inverse", + "flags": [ "ModularInverse", "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102207b2a785e3896f59b2d69da57648e80ad3c133a750a2847fd2098ccd902042b6c", - "result" : "valid" + "msg": "313233343030", + "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102207b2a785e3896f59b2d69da57648e80ad3c133a750a2847fd2098ccd902042b6c", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "0490f8d4ca73de08a6564aaf005247b6f0ffe978504dce52605f46b7c3e56197dafadbe528eb70d9ee7ea0e70702db54f721514c7b8604ac2cb214f1decb7e383d", - "wx" : "0090f8d4ca73de08a6564aaf005247b6f0ffe978504dce52605f46b7c3e56197da", - "wy" : "00fadbe528eb70d9ee7ea0e70702db54f721514c7b8604ac2cb214f1decb7e383d" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "0490f8d4ca73de08a6564aaf005247b6f0ffe978504dce52605f46b7c3e56197dafadbe528eb70d9ee7ea0e70702db54f721514c7b8604ac2cb214f1decb7e383d", + "wx": "0090f8d4ca73de08a6564aaf005247b6f0ffe978504dce52605f46b7c3e56197da", + "wy": "00fadbe528eb70d9ee7ea0e70702db54f721514c7b8604ac2cb214f1decb7e383d" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000490f8d4ca73de08a6564aaf005247b6f0ffe978504dce52605f46b7c3e56197dafadbe528eb70d9ee7ea0e70702db54f721514c7b8604ac2cb214f1decb7e383d", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEkPjUynPeCKZWSq8AUke28P/peFBNzlJg\nX0a3w+Vhl9r62+Uo63DZ7n6g5wcC21T3IVFMe4YErCyyFPHey344PQ==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000490f8d4ca73de08a6564aaf005247b6f0ffe978504dce52605f46b7c3e56197dafadbe528eb70d9ee7ea0e70702db54f721514c7b8604ac2cb214f1decb7e383d", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEkPjUynPeCKZWSq8AUke28P/peFBNzlJg\nX0a3w+Vhl9r62+Uo63DZ7n6g5wcC21T3IVFMe4YErCyyFPHey344PQ==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 383, - "comment" : "edge case modular inverse", - "flags" : [ + "tcId": 383, + "comment": "edge case modular inverse", + "flags": [ "ModularInverse", "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c1022071ae94a72ca896875e7aa4a4c3d29afdb4b35b6996273e63c47ac519256c5eb1", - "result" : "valid" + "msg": "313233343030", + "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c1022071ae94a72ca896875e7aa4a4c3d29afdb4b35b6996273e63c47ac519256c5eb1", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04824c195c73cffdf038d101bce1687b5c3b6146f395c885976f7753b2376b948e3cdefa6fc347d13e4dcbc63a0b03a165180cd2be1431a0cf74ce1ea25082d2bc", - "wx" : "00824c195c73cffdf038d101bce1687b5c3b6146f395c885976f7753b2376b948e", - "wy" : "3cdefa6fc347d13e4dcbc63a0b03a165180cd2be1431a0cf74ce1ea25082d2bc" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04824c195c73cffdf038d101bce1687b5c3b6146f395c885976f7753b2376b948e3cdefa6fc347d13e4dcbc63a0b03a165180cd2be1431a0cf74ce1ea25082d2bc", + "wx": "00824c195c73cffdf038d101bce1687b5c3b6146f395c885976f7753b2376b948e", + "wy": "3cdefa6fc347d13e4dcbc63a0b03a165180cd2be1431a0cf74ce1ea25082d2bc" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004824c195c73cffdf038d101bce1687b5c3b6146f395c885976f7753b2376b948e3cdefa6fc347d13e4dcbc63a0b03a165180cd2be1431a0cf74ce1ea25082d2bc", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEgkwZXHPP/fA40QG84Wh7XDthRvOVyIWX\nb3dTsjdrlI483vpvw0fRPk3LxjoLA6FlGAzSvhQxoM90zh6iUILSvA==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004824c195c73cffdf038d101bce1687b5c3b6146f395c885976f7753b2376b948e3cdefa6fc347d13e4dcbc63a0b03a165180cd2be1431a0cf74ce1ea25082d2bc", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEgkwZXHPP/fA40QG84Wh7XDthRvOVyIWX\nb3dTsjdrlI483vpvw0fRPk3LxjoLA6FlGAzSvhQxoM90zh6iUILSvA==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 384, - "comment" : "edge case modular inverse", - "flags" : [ + "tcId": 384, + "comment": "edge case modular inverse", + "flags": [ "ModularInverse", "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102200fa527fa7343c0bc9ec35a6278bfbff4d83301b154fc4bd14aee7eb93445b5f9", - "result" : "valid" + "msg": "313233343030", + "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102200fa527fa7343c0bc9ec35a6278bfbff4d83301b154fc4bd14aee7eb93445b5f9", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "042788a52f078eb3f202c4fa73e0d3386faf3df6be856003636f599922d4f5268f30b4f207c919bbdf5e67a8be4265a8174754b3aba8f16e575b77ff4d5a7eb64f", - "wx" : "2788a52f078eb3f202c4fa73e0d3386faf3df6be856003636f599922d4f5268f", - "wy" : "30b4f207c919bbdf5e67a8be4265a8174754b3aba8f16e575b77ff4d5a7eb64f" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "042788a52f078eb3f202c4fa73e0d3386faf3df6be856003636f599922d4f5268f30b4f207c919bbdf5e67a8be4265a8174754b3aba8f16e575b77ff4d5a7eb64f", + "wx": "2788a52f078eb3f202c4fa73e0d3386faf3df6be856003636f599922d4f5268f", + "wy": "30b4f207c919bbdf5e67a8be4265a8174754b3aba8f16e575b77ff4d5a7eb64f" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200042788a52f078eb3f202c4fa73e0d3386faf3df6be856003636f599922d4f5268f30b4f207c919bbdf5e67a8be4265a8174754b3aba8f16e575b77ff4d5a7eb64f", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEJ4ilLweOs/ICxPpz4NM4b6899r6FYANj\nb1mZItT1Jo8wtPIHyRm7315nqL5CZagXR1Szq6jxbldbd/9NWn62Tw==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200042788a52f078eb3f202c4fa73e0d3386faf3df6be856003636f599922d4f5268f30b4f207c919bbdf5e67a8be4265a8174754b3aba8f16e575b77ff4d5a7eb64f", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEJ4ilLweOs/ICxPpz4NM4b6899r6FYANj\nb1mZItT1Jo8wtPIHyRm7315nqL5CZagXR1Szq6jxbldbd/9NWn62Tw==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 385, - "comment" : "edge case modular inverse", - "flags" : [ + "tcId": 385, + "comment": "edge case modular inverse", + "flags": [ "ModularInverse", "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102206539c0adadd0525ff42622164ce9314348bd0863b4c80e936b23ca0414264671", - "result" : "valid" + "msg": "313233343030", + "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102206539c0adadd0525ff42622164ce9314348bd0863b4c80e936b23ca0414264671", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04d533b789a4af890fa7a82a1fae58c404f9a62a50b49adafab349c513b415087401b4171b803e76b34a9861e10f7bc289a066fd01bd29f84c987a10a5fb18c2d4", - "wx" : "00d533b789a4af890fa7a82a1fae58c404f9a62a50b49adafab349c513b4150874", - "wy" : "01b4171b803e76b34a9861e10f7bc289a066fd01bd29f84c987a10a5fb18c2d4" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04d533b789a4af890fa7a82a1fae58c404f9a62a50b49adafab349c513b415087401b4171b803e76b34a9861e10f7bc289a066fd01bd29f84c987a10a5fb18c2d4", + "wx": "00d533b789a4af890fa7a82a1fae58c404f9a62a50b49adafab349c513b4150874", + "wy": "01b4171b803e76b34a9861e10f7bc289a066fd01bd29f84c987a10a5fb18c2d4" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004d533b789a4af890fa7a82a1fae58c404f9a62a50b49adafab349c513b415087401b4171b803e76b34a9861e10f7bc289a066fd01bd29f84c987a10a5fb18c2d4", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE1TO3iaSviQ+nqCofrljEBPmmKlC0mtr6\ns0nFE7QVCHQBtBcbgD52s0qYYeEPe8KJoGb9Ab0p+EyYehCl+xjC1A==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004d533b789a4af890fa7a82a1fae58c404f9a62a50b49adafab349c513b415087401b4171b803e76b34a9861e10f7bc289a066fd01bd29f84c987a10a5fb18c2d4", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE1TO3iaSviQ+nqCofrljEBPmmKlC0mtr6\ns0nFE7QVCHQBtBcbgD52s0qYYeEPe8KJoGb9Ab0p+EyYehCl+xjC1A==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 386, - "comment" : "point at infinity during verify", - "flags" : [ + "tcId": 386, + "comment": "point at infinity during verify", + "flags": [ "PointDuplication", "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a0022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c0", - "result" : "invalid" + "msg": "313233343030", + "sig": "304402207fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a0022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c0", + "result": "invalid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "043a3150798c8af69d1e6e981f3a45402ba1d732f4be8330c5164f49e10ec555b4221bd842bc5e4d97eff37165f60e3998a424d72a450cf95ea477c78287d0343a", - "wx" : "3a3150798c8af69d1e6e981f3a45402ba1d732f4be8330c5164f49e10ec555b4", - "wy" : "221bd842bc5e4d97eff37165f60e3998a424d72a450cf95ea477c78287d0343a" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "043a3150798c8af69d1e6e981f3a45402ba1d732f4be8330c5164f49e10ec555b4221bd842bc5e4d97eff37165f60e3998a424d72a450cf95ea477c78287d0343a", + "wx": "3a3150798c8af69d1e6e981f3a45402ba1d732f4be8330c5164f49e10ec555b4", + "wy": "221bd842bc5e4d97eff37165f60e3998a424d72a450cf95ea477c78287d0343a" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200043a3150798c8af69d1e6e981f3a45402ba1d732f4be8330c5164f49e10ec555b4221bd842bc5e4d97eff37165f60e3998a424d72a450cf95ea477c78287d0343a", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEOjFQeYyK9p0ebpgfOkVAK6HXMvS+gzDF\nFk9J4Q7FVbQiG9hCvF5Nl+/zcWX2DjmYpCTXKkUM+V6kd8eCh9A0Og==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200043a3150798c8af69d1e6e981f3a45402ba1d732f4be8330c5164f49e10ec555b4221bd842bc5e4d97eff37165f60e3998a424d72a450cf95ea477c78287d0343a", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEOjFQeYyK9p0ebpgfOkVAK6HXMvS+gzDF\nFk9J4Q7FVbQiG9hCvF5Nl+/zcWX2DjmYpCTXKkUM+V6kd8eCh9A0Og==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 387, - "comment" : "edge case for signature malleability", - "flags" : [ + "tcId": 387, + "comment": "edge case for signature malleability", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a002207fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a0", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a002207fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a0", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "043b37df5fb347c69a0f17d85c0c7ca83736883a825e13143d0fcfc8101e851e800de3c090b6ca21ba543517330c04b12f948c6badf14a63abffdf4ef8c7537026", - "wx" : "3b37df5fb347c69a0f17d85c0c7ca83736883a825e13143d0fcfc8101e851e80", - "wy" : "0de3c090b6ca21ba543517330c04b12f948c6badf14a63abffdf4ef8c7537026" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "043b37df5fb347c69a0f17d85c0c7ca83736883a825e13143d0fcfc8101e851e800de3c090b6ca21ba543517330c04b12f948c6badf14a63abffdf4ef8c7537026", + "wx": "3b37df5fb347c69a0f17d85c0c7ca83736883a825e13143d0fcfc8101e851e80", + "wy": "0de3c090b6ca21ba543517330c04b12f948c6badf14a63abffdf4ef8c7537026" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200043b37df5fb347c69a0f17d85c0c7ca83736883a825e13143d0fcfc8101e851e800de3c090b6ca21ba543517330c04b12f948c6badf14a63abffdf4ef8c7537026", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEOzffX7NHxpoPF9hcDHyoNzaIOoJeExQ9\nD8/IEB6FHoAN48CQtsohulQ1FzMMBLEvlIxrrfFKY6v/3074x1NwJg==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200043b37df5fb347c69a0f17d85c0c7ca83736883a825e13143d0fcfc8101e851e800de3c090b6ca21ba543517330c04b12f948c6badf14a63abffdf4ef8c7537026", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEOzffX7NHxpoPF9hcDHyoNzaIOoJeExQ9\nD8/IEB6FHoAN48CQtsohulQ1FzMMBLEvlIxrrfFKY6v/3074x1NwJg==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 388, - "comment" : "edge case for signature malleability", - "flags" : [ + "tcId": 388, + "comment": "edge case for signature malleability", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a002207fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a1", - "result" : "invalid" + "msg": "313233343030", + "sig": "304402207fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a002207fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a1", + "result": "invalid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04feb5163b0ece30ff3e03c7d55c4380fa2fa81ee2c0354942ff6f08c99d0cd82ce87de05ee1bda089d3e4e248fa0f721102acfffdf50e654be281433999df897e", - "wx" : "00feb5163b0ece30ff3e03c7d55c4380fa2fa81ee2c0354942ff6f08c99d0cd82c", - "wy" : "00e87de05ee1bda089d3e4e248fa0f721102acfffdf50e654be281433999df897e" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04feb5163b0ece30ff3e03c7d55c4380fa2fa81ee2c0354942ff6f08c99d0cd82ce87de05ee1bda089d3e4e248fa0f721102acfffdf50e654be281433999df897e", + "wx": "00feb5163b0ece30ff3e03c7d55c4380fa2fa81ee2c0354942ff6f08c99d0cd82c", + "wy": "00e87de05ee1bda089d3e4e248fa0f721102acfffdf50e654be281433999df897e" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004feb5163b0ece30ff3e03c7d55c4380fa2fa81ee2c0354942ff6f08c99d0cd82ce87de05ee1bda089d3e4e248fa0f721102acfffdf50e654be281433999df897e", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE/rUWOw7OMP8+A8fVXEOA+i+oHuLANUlC\n/28IyZ0M2CzofeBe4b2gidPk4kj6D3IRAqz//fUOZUvigUM5md+Jfg==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004feb5163b0ece30ff3e03c7d55c4380fa2fa81ee2c0354942ff6f08c99d0cd82ce87de05ee1bda089d3e4e248fa0f721102acfffdf50e654be281433999df897e", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE/rUWOw7OMP8+A8fVXEOA+i+oHuLANUlC\n/28IyZ0M2CzofeBe4b2gidPk4kj6D3IRAqz//fUOZUvigUM5md+Jfg==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 389, - "comment" : "u1 == 1", - "flags" : [ + "tcId": 389, + "comment": "u1 == 1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215b8022044a5ad0bd0636d9e12bc9e0a6bdd5e1bba77f523842193b3b82e448e05d5f11e", - "result" : "valid" + "msg": "313233343030", + "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215b8022044a5ad0bd0636d9e12bc9e0a6bdd5e1bba77f523842193b3b82e448e05d5f11e", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04238ced001cf22b8853e02edc89cbeca5050ba7e042a7a77f9382cd414922897640683d3094643840f295890aa4c18aa39b41d77dd0fb3bb2700e4f9ec284ffc2", - "wx" : "238ced001cf22b8853e02edc89cbeca5050ba7e042a7a77f9382cd4149228976", - "wy" : "40683d3094643840f295890aa4c18aa39b41d77dd0fb3bb2700e4f9ec284ffc2" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04238ced001cf22b8853e02edc89cbeca5050ba7e042a7a77f9382cd414922897640683d3094643840f295890aa4c18aa39b41d77dd0fb3bb2700e4f9ec284ffc2", + "wx": "238ced001cf22b8853e02edc89cbeca5050ba7e042a7a77f9382cd4149228976", + "wy": "40683d3094643840f295890aa4c18aa39b41d77dd0fb3bb2700e4f9ec284ffc2" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004238ced001cf22b8853e02edc89cbeca5050ba7e042a7a77f9382cd414922897640683d3094643840f295890aa4c18aa39b41d77dd0fb3bb2700e4f9ec284ffc2", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEI4ztABzyK4hT4C7cicvspQULp+BCp6d/\nk4LNQUkiiXZAaD0wlGQ4QPKViQqkwYqjm0HXfdD7O7JwDk+ewoT/wg==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004238ced001cf22b8853e02edc89cbeca5050ba7e042a7a77f9382cd414922897640683d3094643840f295890aa4c18aa39b41d77dd0fb3bb2700e4f9ec284ffc2", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEI4ztABzyK4hT4C7cicvspQULp+BCp6d/\nk4LNQUkiiXZAaD0wlGQ4QPKViQqkwYqjm0HXfdD7O7JwDk+ewoT/wg==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 390, - "comment" : "u1 == n - 1", - "flags" : [ + "tcId": 390, + "comment": "u1 == n - 1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215b8022044a5ad0bd0636d9e12bc9e0a6bdd5e1bba77f523842193b3b82e448e05d5f11e", - "result" : "valid" + "msg": "313233343030", + "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215b8022044a5ad0bd0636d9e12bc9e0a6bdd5e1bba77f523842193b3b82e448e05d5f11e", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04961cf64817c06c0e51b3c2736c922fde18bd8c4906fcd7f5ef66c4678508f35ed2c5d18168cfbe70f2f123bd7419232bb92dd69113e2941061889481c5a027bf", - "wx" : "00961cf64817c06c0e51b3c2736c922fde18bd8c4906fcd7f5ef66c4678508f35e", - "wy" : "00d2c5d18168cfbe70f2f123bd7419232bb92dd69113e2941061889481c5a027bf" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04961cf64817c06c0e51b3c2736c922fde18bd8c4906fcd7f5ef66c4678508f35ed2c5d18168cfbe70f2f123bd7419232bb92dd69113e2941061889481c5a027bf", + "wx": "00961cf64817c06c0e51b3c2736c922fde18bd8c4906fcd7f5ef66c4678508f35e", + "wy": "00d2c5d18168cfbe70f2f123bd7419232bb92dd69113e2941061889481c5a027bf" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004961cf64817c06c0e51b3c2736c922fde18bd8c4906fcd7f5ef66c4678508f35ed2c5d18168cfbe70f2f123bd7419232bb92dd69113e2941061889481c5a027bf", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAElhz2SBfAbA5Rs8JzbJIv3hi9jEkG/Nf1\n72bEZ4UI817SxdGBaM++cPLxI710GSMruS3WkRPilBBhiJSBxaAnvw==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004961cf64817c06c0e51b3c2736c922fde18bd8c4906fcd7f5ef66c4678508f35ed2c5d18168cfbe70f2f123bd7419232bb92dd69113e2941061889481c5a027bf", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAElhz2SBfAbA5Rs8JzbJIv3hi9jEkG/Nf1\n72bEZ4UI817SxdGBaM++cPLxI710GSMruS3WkRPilBBhiJSBxaAnvw==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 391, - "comment" : "u2 == 1", - "flags" : [ + "tcId": 391, + "comment": "u2 == 1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215b8022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215b8", - "result" : "valid" + "msg": "313233343030", + "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215b8022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215b8", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "0413681eae168cd4ea7cf2e2a45d052742d10a9f64e796867dbdcb829fe0b1028816528760d177376c09df79de39557c329cc1753517acffe8fa2ec298026b8384", - "wx" : "13681eae168cd4ea7cf2e2a45d052742d10a9f64e796867dbdcb829fe0b10288", - "wy" : "16528760d177376c09df79de39557c329cc1753517acffe8fa2ec298026b8384" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "0413681eae168cd4ea7cf2e2a45d052742d10a9f64e796867dbdcb829fe0b1028816528760d177376c09df79de39557c329cc1753517acffe8fa2ec298026b8384", + "wx": "13681eae168cd4ea7cf2e2a45d052742d10a9f64e796867dbdcb829fe0b10288", + "wy": "16528760d177376c09df79de39557c329cc1753517acffe8fa2ec298026b8384" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000413681eae168cd4ea7cf2e2a45d052742d10a9f64e796867dbdcb829fe0b1028816528760d177376c09df79de39557c329cc1753517acffe8fa2ec298026b8384", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEE2gerhaM1Op88uKkXQUnQtEKn2TnloZ9\nvcuCn+CxAogWUodg0Xc3bAnfed45VXwynMF1NRes/+j6LsKYAmuDhA==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000413681eae168cd4ea7cf2e2a45d052742d10a9f64e796867dbdcb829fe0b1028816528760d177376c09df79de39557c329cc1753517acffe8fa2ec298026b8384", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEE2gerhaM1Op88uKkXQUnQtEKn2TnloZ9\nvcuCn+CxAogWUodg0Xc3bAnfed45VXwynMF1NRes/+j6LsKYAmuDhA==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 392, - "comment" : "u2 == n - 1", - "flags" : [ + "tcId": 392, + "comment": "u2 == n - 1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215b8022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215b8", - "result" : "valid" + "msg": "313233343030", + "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215b8022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215b8", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "045aa7abfdb6b4086d543325e5d79c6e95ce42f866d2bb84909633a04bb1aa31c291c80088794905e1da33336d874e2f91ccf45cc59185bede5dd6f3f7acaae18b", - "wx" : "5aa7abfdb6b4086d543325e5d79c6e95ce42f866d2bb84909633a04bb1aa31c2", - "wy" : "0091c80088794905e1da33336d874e2f91ccf45cc59185bede5dd6f3f7acaae18b" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "045aa7abfdb6b4086d543325e5d79c6e95ce42f866d2bb84909633a04bb1aa31c291c80088794905e1da33336d874e2f91ccf45cc59185bede5dd6f3f7acaae18b", + "wx": "5aa7abfdb6b4086d543325e5d79c6e95ce42f866d2bb84909633a04bb1aa31c2", + "wy": "0091c80088794905e1da33336d874e2f91ccf45cc59185bede5dd6f3f7acaae18b" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200045aa7abfdb6b4086d543325e5d79c6e95ce42f866d2bb84909633a04bb1aa31c291c80088794905e1da33336d874e2f91ccf45cc59185bede5dd6f3f7acaae18b", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEWqer/ba0CG1UMyXl15xulc5C+GbSu4SQ\nljOgS7GqMcKRyACIeUkF4dozM22HTi+RzPRcxZGFvt5d1vP3rKrhiw==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200045aa7abfdb6b4086d543325e5d79c6e95ce42f866d2bb84909633a04bb1aa31c291c80088794905e1da33336d874e2f91ccf45cc59185bede5dd6f3f7acaae18b", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEWqer/ba0CG1UMyXl15xulc5C+GbSu4SQ\nljOgS7GqMcKRyACIeUkF4dozM22HTi+RzPRcxZGFvt5d1vP3rKrhiw==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 393, - "comment" : "edge case for u1", - "flags" : [ + "tcId": 393, + "comment": "edge case for u1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022016e1e459457679df5b9434ae23f474b3e8d2a70bd6b5dbe692ba16da01f1fb0a", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022016e1e459457679df5b9434ae23f474b3e8d2a70bd6b5dbe692ba16da01f1fb0a", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "0400277791b305a45b2b39590b2f05d3392a6c8182cef4eb540120e0f5c206c3e464108233fb0b8c3ac892d79ef8e0fbf92ed133addb4554270132584dc52eef41", - "wx" : "277791b305a45b2b39590b2f05d3392a6c8182cef4eb540120e0f5c206c3e4", - "wy" : "64108233fb0b8c3ac892d79ef8e0fbf92ed133addb4554270132584dc52eef41" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "0400277791b305a45b2b39590b2f05d3392a6c8182cef4eb540120e0f5c206c3e464108233fb0b8c3ac892d79ef8e0fbf92ed133addb4554270132584dc52eef41", + "wx": "277791b305a45b2b39590b2f05d3392a6c8182cef4eb540120e0f5c206c3e4", + "wy": "64108233fb0b8c3ac892d79ef8e0fbf92ed133addb4554270132584dc52eef41" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000400277791b305a45b2b39590b2f05d3392a6c8182cef4eb540120e0f5c206c3e464108233fb0b8c3ac892d79ef8e0fbf92ed133addb4554270132584dc52eef41", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEACd3kbMFpFsrOVkLLwXTOSpsgYLO9OtU\nASDg9cIGw+RkEIIz+wuMOsiS15744Pv5LtEzrdtFVCcBMlhNxS7vQQ==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000400277791b305a45b2b39590b2f05d3392a6c8182cef4eb540120e0f5c206c3e464108233fb0b8c3ac892d79ef8e0fbf92ed133addb4554270132584dc52eef41", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEACd3kbMFpFsrOVkLLwXTOSpsgYLO9OtU\nASDg9cIGw+RkEIIz+wuMOsiS15744Pv5LtEzrdtFVCcBMlhNxS7vQQ==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 394, - "comment" : "edge case for u1", - "flags" : [ + "tcId": 394, + "comment": "edge case for u1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02201c940f313f92647be257eccd7ed08b0baef3f0478f25871b53635302c5f6314a", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02201c940f313f92647be257eccd7ed08b0baef3f0478f25871b53635302c5f6314a", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "046efa092b68de9460f0bcc919005a5f6e80e19de98968be3cd2c770a9949bfb1ac75e6e5087d6550d5f9beb1e79e5029307bc255235e2d5dc99241ac3ab886c49", - "wx" : "6efa092b68de9460f0bcc919005a5f6e80e19de98968be3cd2c770a9949bfb1a", - "wy" : "00c75e6e5087d6550d5f9beb1e79e5029307bc255235e2d5dc99241ac3ab886c49" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "046efa092b68de9460f0bcc919005a5f6e80e19de98968be3cd2c770a9949bfb1ac75e6e5087d6550d5f9beb1e79e5029307bc255235e2d5dc99241ac3ab886c49", + "wx": "6efa092b68de9460f0bcc919005a5f6e80e19de98968be3cd2c770a9949bfb1a", + "wy": "00c75e6e5087d6550d5f9beb1e79e5029307bc255235e2d5dc99241ac3ab886c49" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200046efa092b68de9460f0bcc919005a5f6e80e19de98968be3cd2c770a9949bfb1ac75e6e5087d6550d5f9beb1e79e5029307bc255235e2d5dc99241ac3ab886c49", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEbvoJK2jelGDwvMkZAFpfboDhnemJaL48\n0sdwqZSb+xrHXm5Qh9ZVDV+b6x555QKTB7wlUjXi1dyZJBrDq4hsSQ==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200046efa092b68de9460f0bcc919005a5f6e80e19de98968be3cd2c770a9949bfb1ac75e6e5087d6550d5f9beb1e79e5029307bc255235e2d5dc99241ac3ab886c49", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEbvoJK2jelGDwvMkZAFpfboDhnemJaL48\n0sdwqZSb+xrHXm5Qh9ZVDV+b6x555QKTB7wlUjXi1dyZJBrDq4hsSQ==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 395, - "comment" : "edge case for u1", - "flags" : [ + "tcId": 395, + "comment": "edge case for u1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022015d94a85077b493f91cb7101ec63e1b01be58b594e855f45050a8c14062d689b", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022015d94a85077b493f91cb7101ec63e1b01be58b594e855f45050a8c14062d689b", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "0472d4a19c4f9d2cf5848ea40445b70d4696b5f02d632c0c654cc7d7eeb0c6d058e8c4cd9943e459174c7ac01fa742198e47e6c19a6bdb0c4f6c237831c1b3f942", - "wx" : "72d4a19c4f9d2cf5848ea40445b70d4696b5f02d632c0c654cc7d7eeb0c6d058", - "wy" : "00e8c4cd9943e459174c7ac01fa742198e47e6c19a6bdb0c4f6c237831c1b3f942" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "0472d4a19c4f9d2cf5848ea40445b70d4696b5f02d632c0c654cc7d7eeb0c6d058e8c4cd9943e459174c7ac01fa742198e47e6c19a6bdb0c4f6c237831c1b3f942", + "wx": "72d4a19c4f9d2cf5848ea40445b70d4696b5f02d632c0c654cc7d7eeb0c6d058", + "wy": "00e8c4cd9943e459174c7ac01fa742198e47e6c19a6bdb0c4f6c237831c1b3f942" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000472d4a19c4f9d2cf5848ea40445b70d4696b5f02d632c0c654cc7d7eeb0c6d058e8c4cd9943e459174c7ac01fa742198e47e6c19a6bdb0c4f6c237831c1b3f942", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEctShnE+dLPWEjqQERbcNRpa18C1jLAxl\nTMfX7rDG0FjoxM2ZQ+RZF0x6wB+nQhmOR+bBmmvbDE9sI3gxwbP5Qg==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000472d4a19c4f9d2cf5848ea40445b70d4696b5f02d632c0c654cc7d7eeb0c6d058e8c4cd9943e459174c7ac01fa742198e47e6c19a6bdb0c4f6c237831c1b3f942", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEctShnE+dLPWEjqQERbcNRpa18C1jLAxl\nTMfX7rDG0FjoxM2ZQ+RZF0x6wB+nQhmOR+bBmmvbDE9sI3gxwbP5Qg==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 396, - "comment" : "edge case for u1", - "flags" : [ + "tcId": 396, + "comment": "edge case for u1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02205b1d27a7694c146244a5ad0bd0636d9d9ef3b9fb58385418d9c982105077d1b7", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02205b1d27a7694c146244a5ad0bd0636d9d9ef3b9fb58385418d9c982105077d1b7", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "042a8ea2f50dcced0c217575bdfa7cd47d1c6f100041ec0e35512794c1be7e740258f8c17122ed303fda7143eb58bede70295b653266013b0b0ebd3f053137f6ec", - "wx" : "2a8ea2f50dcced0c217575bdfa7cd47d1c6f100041ec0e35512794c1be7e7402", - "wy" : "58f8c17122ed303fda7143eb58bede70295b653266013b0b0ebd3f053137f6ec" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "042a8ea2f50dcced0c217575bdfa7cd47d1c6f100041ec0e35512794c1be7e740258f8c17122ed303fda7143eb58bede70295b653266013b0b0ebd3f053137f6ec", + "wx": "2a8ea2f50dcced0c217575bdfa7cd47d1c6f100041ec0e35512794c1be7e7402", + "wy": "58f8c17122ed303fda7143eb58bede70295b653266013b0b0ebd3f053137f6ec" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200042a8ea2f50dcced0c217575bdfa7cd47d1c6f100041ec0e35512794c1be7e740258f8c17122ed303fda7143eb58bede70295b653266013b0b0ebd3f053137f6ec", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEKo6i9Q3M7QwhdXW9+nzUfRxvEABB7A41\nUSeUwb5+dAJY+MFxIu0wP9pxQ+tYvt5wKVtlMmYBOwsOvT8FMTf27A==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200042a8ea2f50dcced0c217575bdfa7cd47d1c6f100041ec0e35512794c1be7e740258f8c17122ed303fda7143eb58bede70295b653266013b0b0ebd3f053137f6ec", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEKo6i9Q3M7QwhdXW9+nzUfRxvEABB7A41\nUSeUwb5+dAJY+MFxIu0wP9pxQ+tYvt5wKVtlMmYBOwsOvT8FMTf27A==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 397, - "comment" : "edge case for u1", - "flags" : [ + "tcId": 397, + "comment": "edge case for u1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02202d85896b3eb9dbb5a52f42f9c9261ed3fc46644ec65f06ade3fd78f257e43432", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02202d85896b3eb9dbb5a52f42f9c9261ed3fc46644ec65f06ade3fd78f257e43432", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "0488de689ce9af1e94be6a2089c8a8b1253ffdbb6c8e9c86249ba220001a4ad3b80c4998e54842f413b9edb1825acbb6335e81e4d184b2b01c8bebdc85d1f28946", - "wx" : "0088de689ce9af1e94be6a2089c8a8b1253ffdbb6c8e9c86249ba220001a4ad3b8", - "wy" : "0c4998e54842f413b9edb1825acbb6335e81e4d184b2b01c8bebdc85d1f28946" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "0488de689ce9af1e94be6a2089c8a8b1253ffdbb6c8e9c86249ba220001a4ad3b80c4998e54842f413b9edb1825acbb6335e81e4d184b2b01c8bebdc85d1f28946", + "wx": "0088de689ce9af1e94be6a2089c8a8b1253ffdbb6c8e9c86249ba220001a4ad3b8", + "wy": "0c4998e54842f413b9edb1825acbb6335e81e4d184b2b01c8bebdc85d1f28946" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000488de689ce9af1e94be6a2089c8a8b1253ffdbb6c8e9c86249ba220001a4ad3b80c4998e54842f413b9edb1825acbb6335e81e4d184b2b01c8bebdc85d1f28946", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEiN5onOmvHpS+aiCJyKixJT/9u2yOnIYk\nm6IgABpK07gMSZjlSEL0E7ntsYJay7YzXoHk0YSysByL69yF0fKJRg==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000488de689ce9af1e94be6a2089c8a8b1253ffdbb6c8e9c86249ba220001a4ad3b80c4998e54842f413b9edb1825acbb6335e81e4d184b2b01c8bebdc85d1f28946", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEiN5onOmvHpS+aiCJyKixJT/9u2yOnIYk\nm6IgABpK07gMSZjlSEL0E7ntsYJay7YzXoHk0YSysByL69yF0fKJRg==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 398, - "comment" : "edge case for u1", - "flags" : [ + "tcId": 398, + "comment": "edge case for u1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02205b0b12d67d73b76b4a5e85f3924c3da7f88cc89d8cbe0d5bc7faf1e4afc86864", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02205b0b12d67d73b76b4a5e85f3924c3da7f88cc89d8cbe0d5bc7faf1e4afc86864", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04fea2d31f70f90d5fb3e00e186ac42ab3c1615cee714e0b4e1131b3d4d8225bf7b037a18df2ac15343f30f74067ddf29e817d5f77f8dce05714da59c094f0cda9", - "wx" : "00fea2d31f70f90d5fb3e00e186ac42ab3c1615cee714e0b4e1131b3d4d8225bf7", - "wy" : "00b037a18df2ac15343f30f74067ddf29e817d5f77f8dce05714da59c094f0cda9" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04fea2d31f70f90d5fb3e00e186ac42ab3c1615cee714e0b4e1131b3d4d8225bf7b037a18df2ac15343f30f74067ddf29e817d5f77f8dce05714da59c094f0cda9", + "wx": "00fea2d31f70f90d5fb3e00e186ac42ab3c1615cee714e0b4e1131b3d4d8225bf7", + "wy": "00b037a18df2ac15343f30f74067ddf29e817d5f77f8dce05714da59c094f0cda9" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004fea2d31f70f90d5fb3e00e186ac42ab3c1615cee714e0b4e1131b3d4d8225bf7b037a18df2ac15343f30f74067ddf29e817d5f77f8dce05714da59c094f0cda9", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE/qLTH3D5DV+z4A4YasQqs8FhXO5xTgtO\nETGz1NgiW/ewN6GN8qwVND8w90Bn3fKegX1fd/jc4FcU2lnAlPDNqQ==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004fea2d31f70f90d5fb3e00e186ac42ab3c1615cee714e0b4e1131b3d4d8225bf7b037a18df2ac15343f30f74067ddf29e817d5f77f8dce05714da59c094f0cda9", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE/qLTH3D5DV+z4A4YasQqs8FhXO5xTgtO\nETGz1NgiW/ewN6GN8qwVND8w90Bn3fKegX1fd/jc4FcU2lnAlPDNqQ==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 399, - "comment" : "edge case for u1", - "flags" : [ + "tcId": 399, + "comment": "edge case for u1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc0220694c146244a5ad0bd0636d9e12bc9e09e60e68b90d0b5e6c5dddd0cb694d8799", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc0220694c146244a5ad0bd0636d9e12bc9e09e60e68b90d0b5e6c5dddd0cb694d8799", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "047258911e3d423349166479dbe0b8341af7fbd03d0a7e10edccb36b6ceea5a3db17ac2b8992791128fa3b96dc2fbd4ca3bfa782ef2832fc6656943db18e7346b0", - "wx" : "7258911e3d423349166479dbe0b8341af7fbd03d0a7e10edccb36b6ceea5a3db", - "wy" : "17ac2b8992791128fa3b96dc2fbd4ca3bfa782ef2832fc6656943db18e7346b0" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "047258911e3d423349166479dbe0b8341af7fbd03d0a7e10edccb36b6ceea5a3db17ac2b8992791128fa3b96dc2fbd4ca3bfa782ef2832fc6656943db18e7346b0", + "wx": "7258911e3d423349166479dbe0b8341af7fbd03d0a7e10edccb36b6ceea5a3db", + "wy": "17ac2b8992791128fa3b96dc2fbd4ca3bfa782ef2832fc6656943db18e7346b0" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200047258911e3d423349166479dbe0b8341af7fbd03d0a7e10edccb36b6ceea5a3db17ac2b8992791128fa3b96dc2fbd4ca3bfa782ef2832fc6656943db18e7346b0", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEcliRHj1CM0kWZHnb4Lg0Gvf70D0KfhDt\nzLNrbO6lo9sXrCuJknkRKPo7ltwvvUyjv6eC7ygy/GZWlD2xjnNGsA==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200047258911e3d423349166479dbe0b8341af7fbd03d0a7e10edccb36b6ceea5a3db17ac2b8992791128fa3b96dc2fbd4ca3bfa782ef2832fc6656943db18e7346b0", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEcliRHj1CM0kWZHnb4Lg0Gvf70D0KfhDt\nzLNrbO6lo9sXrCuJknkRKPo7ltwvvUyjv6eC7ygy/GZWlD2xjnNGsA==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 400, - "comment" : "edge case for u1", - "flags" : [ + "tcId": 400, + "comment": "edge case for u1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02203d7f487c07bfc5f30846938a3dcef696444707cf9677254a92b06c63ab867d22", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02203d7f487c07bfc5f30846938a3dcef696444707cf9677254a92b06c63ab867d22", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "044f28461dea64474d6bb34d1499c97d37b9e95633df1ceeeaacd45016c98b3914c8818810b8cc06ddb40e8a1261c528faa589455d5a6df93b77bc5e0e493c7470", - "wx" : "4f28461dea64474d6bb34d1499c97d37b9e95633df1ceeeaacd45016c98b3914", - "wy" : "00c8818810b8cc06ddb40e8a1261c528faa589455d5a6df93b77bc5e0e493c7470" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "044f28461dea64474d6bb34d1499c97d37b9e95633df1ceeeaacd45016c98b3914c8818810b8cc06ddb40e8a1261c528faa589455d5a6df93b77bc5e0e493c7470", + "wx": "4f28461dea64474d6bb34d1499c97d37b9e95633df1ceeeaacd45016c98b3914", + "wy": "00c8818810b8cc06ddb40e8a1261c528faa589455d5a6df93b77bc5e0e493c7470" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200044f28461dea64474d6bb34d1499c97d37b9e95633df1ceeeaacd45016c98b3914c8818810b8cc06ddb40e8a1261c528faa589455d5a6df93b77bc5e0e493c7470", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAETyhGHepkR01rs00Umcl9N7npVjPfHO7q\nrNRQFsmLORTIgYgQuMwG3bQOihJhxSj6pYlFXVpt+Tt3vF4OSTx0cA==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200044f28461dea64474d6bb34d1499c97d37b9e95633df1ceeeaacd45016c98b3914c8818810b8cc06ddb40e8a1261c528faa589455d5a6df93b77bc5e0e493c7470", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAETyhGHepkR01rs00Umcl9N7npVjPfHO7q\nrNRQFsmLORTIgYgQuMwG3bQOihJhxSj6pYlFXVpt+Tt3vF4OSTx0cA==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 401, - "comment" : "edge case for u1", - "flags" : [ + "tcId": 401, + "comment": "edge case for u1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02206c7648fc0fbf8a06adb8b839f97b4ff7a800f11b1e37c593b261394599792ba4", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02206c7648fc0fbf8a06adb8b839f97b4ff7a800f11b1e37c593b261394599792ba4", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "0474f2a814fb5d8eca91a69b5e60712732b3937de32829be974ed7b68c5c2f5d66eff0f07c56f987a657f42196205f588c0f1d96fd8a63a5f238b48f478788fe3b", - "wx" : "74f2a814fb5d8eca91a69b5e60712732b3937de32829be974ed7b68c5c2f5d66", - "wy" : "00eff0f07c56f987a657f42196205f588c0f1d96fd8a63a5f238b48f478788fe3b" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "0474f2a814fb5d8eca91a69b5e60712732b3937de32829be974ed7b68c5c2f5d66eff0f07c56f987a657f42196205f588c0f1d96fd8a63a5f238b48f478788fe3b", + "wx": "74f2a814fb5d8eca91a69b5e60712732b3937de32829be974ed7b68c5c2f5d66", + "wy": "00eff0f07c56f987a657f42196205f588c0f1d96fd8a63a5f238b48f478788fe3b" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000474f2a814fb5d8eca91a69b5e60712732b3937de32829be974ed7b68c5c2f5d66eff0f07c56f987a657f42196205f588c0f1d96fd8a63a5f238b48f478788fe3b", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEdPKoFPtdjsqRppteYHEnMrOTfeMoKb6X\nTte2jFwvXWbv8PB8VvmHplf0IZYgX1iMDx2W/YpjpfI4tI9Hh4j+Ow==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000474f2a814fb5d8eca91a69b5e60712732b3937de32829be974ed7b68c5c2f5d66eff0f07c56f987a657f42196205f588c0f1d96fd8a63a5f238b48f478788fe3b", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEdPKoFPtdjsqRppteYHEnMrOTfeMoKb6X\nTte2jFwvXWbv8PB8VvmHplf0IZYgX1iMDx2W/YpjpfI4tI9Hh4j+Ow==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 402, - "comment" : "edge case for u1", - "flags" : [ + "tcId": 402, + "comment": "edge case for u1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc0220641c9c5d790dc09cdd3dfabb62cdf453e69747a7e3d7aa1a714189ef53171a99", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc0220641c9c5d790dc09cdd3dfabb62cdf453e69747a7e3d7aa1a714189ef53171a99", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04195b51a7cc4a21b8274a70a90de779814c3c8ca358328208c09a29f336b82d6ab2416b7c92fffdc29c3b1282dd2a77a4d04df7f7452047393d849989c5cee9ad", - "wx" : "195b51a7cc4a21b8274a70a90de779814c3c8ca358328208c09a29f336b82d6a", - "wy" : "00b2416b7c92fffdc29c3b1282dd2a77a4d04df7f7452047393d849989c5cee9ad" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04195b51a7cc4a21b8274a70a90de779814c3c8ca358328208c09a29f336b82d6ab2416b7c92fffdc29c3b1282dd2a77a4d04df7f7452047393d849989c5cee9ad", + "wx": "195b51a7cc4a21b8274a70a90de779814c3c8ca358328208c09a29f336b82d6a", + "wy": "00b2416b7c92fffdc29c3b1282dd2a77a4d04df7f7452047393d849989c5cee9ad" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004195b51a7cc4a21b8274a70a90de779814c3c8ca358328208c09a29f336b82d6ab2416b7c92fffdc29c3b1282dd2a77a4d04df7f7452047393d849989c5cee9ad", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEGVtRp8xKIbgnSnCpDed5gUw8jKNYMoII\nwJop8za4LWqyQWt8kv/9wpw7EoLdKnek0E3390UgRzk9hJmJxc7prQ==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004195b51a7cc4a21b8274a70a90de779814c3c8ca358328208c09a29f336b82d6ab2416b7c92fffdc29c3b1282dd2a77a4d04df7f7452047393d849989c5cee9ad", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEGVtRp8xKIbgnSnCpDed5gUw8jKNYMoII\nwJop8za4LWqyQWt8kv/9wpw7EoLdKnek0E3390UgRzk9hJmJxc7prQ==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 403, - "comment" : "edge case for u1", - "flags" : [ + "tcId": 403, + "comment": "edge case for u1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022029798c5c45bdf58b4a7b2fdc2c46ab4af1218c7eeb9f0f27a88f1267674de3b0", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022029798c5c45bdf58b4a7b2fdc2c46ab4af1218c7eeb9f0f27a88f1267674de3b0", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04622fc74732034bec2ddf3bc16d34b3d1f7a327dd2a8c19bab4bb4fe3a24b58aa736b2f2fae76f4dfaecc9096333b01328d51eb3fda9c9227e90d0b449983c4f0", - "wx" : "622fc74732034bec2ddf3bc16d34b3d1f7a327dd2a8c19bab4bb4fe3a24b58aa", - "wy" : "736b2f2fae76f4dfaecc9096333b01328d51eb3fda9c9227e90d0b449983c4f0" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04622fc74732034bec2ddf3bc16d34b3d1f7a327dd2a8c19bab4bb4fe3a24b58aa736b2f2fae76f4dfaecc9096333b01328d51eb3fda9c9227e90d0b449983c4f0", + "wx": "622fc74732034bec2ddf3bc16d34b3d1f7a327dd2a8c19bab4bb4fe3a24b58aa", + "wy": "736b2f2fae76f4dfaecc9096333b01328d51eb3fda9c9227e90d0b449983c4f0" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004622fc74732034bec2ddf3bc16d34b3d1f7a327dd2a8c19bab4bb4fe3a24b58aa736b2f2fae76f4dfaecc9096333b01328d51eb3fda9c9227e90d0b449983c4f0", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEYi/HRzIDS+wt3zvBbTSz0fejJ90qjBm6\ntLtP46JLWKpzay8vrnb0367MkJYzOwEyjVHrP9qckifpDQtEmYPE8A==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004622fc74732034bec2ddf3bc16d34b3d1f7a327dd2a8c19bab4bb4fe3a24b58aa736b2f2fae76f4dfaecc9096333b01328d51eb3fda9c9227e90d0b449983c4f0", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEYi/HRzIDS+wt3zvBbTSz0fejJ90qjBm6\ntLtP46JLWKpzay8vrnb0367MkJYzOwEyjVHrP9qckifpDQtEmYPE8A==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 404, - "comment" : "edge case for u1", - "flags" : [ + "tcId": 404, + "comment": "edge case for u1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02200b70f22ca2bb3cefadca1a5711fa3a59f4695385eb5aedf3495d0b6d00f8fd85", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02200b70f22ca2bb3cefadca1a5711fa3a59f4695385eb5aedf3495d0b6d00f8fd85", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "041f7f85caf2d7550e7af9b65023ebb4dce3450311692309db269969b834b611c70827f45b78020ecbbaf484fdd5bfaae6870f1184c21581baf6ef82bd7b530f93", - "wx" : "1f7f85caf2d7550e7af9b65023ebb4dce3450311692309db269969b834b611c7", - "wy" : "0827f45b78020ecbbaf484fdd5bfaae6870f1184c21581baf6ef82bd7b530f93" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "041f7f85caf2d7550e7af9b65023ebb4dce3450311692309db269969b834b611c70827f45b78020ecbbaf484fdd5bfaae6870f1184c21581baf6ef82bd7b530f93", + "wx": "1f7f85caf2d7550e7af9b65023ebb4dce3450311692309db269969b834b611c7", + "wy": "0827f45b78020ecbbaf484fdd5bfaae6870f1184c21581baf6ef82bd7b530f93" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200041f7f85caf2d7550e7af9b65023ebb4dce3450311692309db269969b834b611c70827f45b78020ecbbaf484fdd5bfaae6870f1184c21581baf6ef82bd7b530f93", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEH3+FyvLXVQ56+bZQI+u03ONFAxFpIwnb\nJplpuDS2EccIJ/RbeAIOy7r0hP3Vv6rmhw8RhMIVgbr274K9e1MPkw==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200041f7f85caf2d7550e7af9b65023ebb4dce3450311692309db269969b834b611c70827f45b78020ecbbaf484fdd5bfaae6870f1184c21581baf6ef82bd7b530f93", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEH3+FyvLXVQ56+bZQI+u03ONFAxFpIwnb\nJplpuDS2EccIJ/RbeAIOy7r0hP3Vv6rmhw8RhMIVgbr274K9e1MPkw==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 405, - "comment" : "edge case for u1", - "flags" : [ + "tcId": 405, + "comment": "edge case for u1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022016e1e459457679df5b9434ae23f474b3e8d2a70bd6b5dbe692ba16da01f1fb0a", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022016e1e459457679df5b9434ae23f474b3e8d2a70bd6b5dbe692ba16da01f1fb0a", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "0449c197dc80ad1da47a4342b93893e8e1fb0bb94fc33a83e783c00b24c781377aefc20da92bac762951f72474becc734d4cc22ba81b895e282fdac4df7af0f37d", - "wx" : "49c197dc80ad1da47a4342b93893e8e1fb0bb94fc33a83e783c00b24c781377a", - "wy" : "00efc20da92bac762951f72474becc734d4cc22ba81b895e282fdac4df7af0f37d" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "0449c197dc80ad1da47a4342b93893e8e1fb0bb94fc33a83e783c00b24c781377aefc20da92bac762951f72474becc734d4cc22ba81b895e282fdac4df7af0f37d", + "wx": "49c197dc80ad1da47a4342b93893e8e1fb0bb94fc33a83e783c00b24c781377a", + "wy": "00efc20da92bac762951f72474becc734d4cc22ba81b895e282fdac4df7af0f37d" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000449c197dc80ad1da47a4342b93893e8e1fb0bb94fc33a83e783c00b24c781377aefc20da92bac762951f72474becc734d4cc22ba81b895e282fdac4df7af0f37d", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEScGX3ICtHaR6Q0K5OJPo4fsLuU/DOoPn\ng8ALJMeBN3rvwg2pK6x2KVH3JHS+zHNNTMIrqBuJXigv2sTfevDzfQ==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000449c197dc80ad1da47a4342b93893e8e1fb0bb94fc33a83e783c00b24c781377aefc20da92bac762951f72474becc734d4cc22ba81b895e282fdac4df7af0f37d", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEScGX3ICtHaR6Q0K5OJPo4fsLuU/DOoPn\ng8ALJMeBN3rvwg2pK6x2KVH3JHS+zHNNTMIrqBuJXigv2sTfevDzfQ==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 406, - "comment" : "edge case for u1", - "flags" : [ + "tcId": 406, + "comment": "edge case for u1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02202252d685e831b6cf095e4f0535eeaf0ddd3bfa91c210c9d9dc17224702eaf88f", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02202252d685e831b6cf095e4f0535eeaf0ddd3bfa91c210c9d9dc17224702eaf88f", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04d8cb68517b616a56400aa3868635e54b6f699598a2f6167757654980baf6acbe7ec8cf449c849aa03461a30efada41453c57c6e6fbc93bbc6fa49ada6dc0555c", - "wx" : "00d8cb68517b616a56400aa3868635e54b6f699598a2f6167757654980baf6acbe", - "wy" : "7ec8cf449c849aa03461a30efada41453c57c6e6fbc93bbc6fa49ada6dc0555c" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04d8cb68517b616a56400aa3868635e54b6f699598a2f6167757654980baf6acbe7ec8cf449c849aa03461a30efada41453c57c6e6fbc93bbc6fa49ada6dc0555c", + "wx": "00d8cb68517b616a56400aa3868635e54b6f699598a2f6167757654980baf6acbe", + "wy": "7ec8cf449c849aa03461a30efada41453c57c6e6fbc93bbc6fa49ada6dc0555c" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004d8cb68517b616a56400aa3868635e54b6f699598a2f6167757654980baf6acbe7ec8cf449c849aa03461a30efada41453c57c6e6fbc93bbc6fa49ada6dc0555c", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE2MtoUXthalZACqOGhjXlS29plZii9hZ3\nV2VJgLr2rL5+yM9EnISaoDRhow762kFFPFfG5vvJO7xvpJrabcBVXA==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004d8cb68517b616a56400aa3868635e54b6f699598a2f6167757654980baf6acbe7ec8cf449c849aa03461a30efada41453c57c6e6fbc93bbc6fa49ada6dc0555c", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE2MtoUXthalZACqOGhjXlS29plZii9hZ3\nV2VJgLr2rL5+yM9EnISaoDRhow762kFFPFfG5vvJO7xvpJrabcBVXA==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 407, - "comment" : "edge case for u1", - "flags" : [ + "tcId": 407, + "comment": "edge case for u1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022075135abd7c425b60371a477f09ce0f274f64a8c6b061a07b5d63e93c65046c53", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022075135abd7c425b60371a477f09ce0f274f64a8c6b061a07b5d63e93c65046c53", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04030713fb63f2aa6fe2cadf1b20efc259c77445dafa87dac398b84065ca347df3b227818de1a39b589cb071d83e5317cccdc2338e51e312fe31d8dc34a4801750", - "wx" : "030713fb63f2aa6fe2cadf1b20efc259c77445dafa87dac398b84065ca347df3", - "wy" : "00b227818de1a39b589cb071d83e5317cccdc2338e51e312fe31d8dc34a4801750" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04030713fb63f2aa6fe2cadf1b20efc259c77445dafa87dac398b84065ca347df3b227818de1a39b589cb071d83e5317cccdc2338e51e312fe31d8dc34a4801750", + "wx": "030713fb63f2aa6fe2cadf1b20efc259c77445dafa87dac398b84065ca347df3", + "wy": "00b227818de1a39b589cb071d83e5317cccdc2338e51e312fe31d8dc34a4801750" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004030713fb63f2aa6fe2cadf1b20efc259c77445dafa87dac398b84065ca347df3b227818de1a39b589cb071d83e5317cccdc2338e51e312fe31d8dc34a4801750", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEAwcT+2Pyqm/iyt8bIO/CWcd0Rdr6h9rD\nmLhAZco0ffOyJ4GN4aObWJywcdg+UxfMzcIzjlHjEv4x2Nw0pIAXUA==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004030713fb63f2aa6fe2cadf1b20efc259c77445dafa87dac398b84065ca347df3b227818de1a39b589cb071d83e5317cccdc2338e51e312fe31d8dc34a4801750", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEAwcT+2Pyqm/iyt8bIO/CWcd0Rdr6h9rD\nmLhAZco0ffOyJ4GN4aObWJywcdg+UxfMzcIzjlHjEv4x2Nw0pIAXUA==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 408, - "comment" : "edge case for u2", - "flags" : [ + "tcId": 408, + "comment": "edge case for u2", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02202aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa3e3a49a23a6d8abe95461f8445676b17", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02202aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa3e3a49a23a6d8abe95461f8445676b17", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04babb3677b0955802d8e929a41355640eaf1ea1353f8a771331c4946e3480afa7252f196c87ed3d2a59d3b1b559137fed0013fecefc19fb5a92682b9bca51b950", - "wx" : "00babb3677b0955802d8e929a41355640eaf1ea1353f8a771331c4946e3480afa7", - "wy" : "252f196c87ed3d2a59d3b1b559137fed0013fecefc19fb5a92682b9bca51b950" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04babb3677b0955802d8e929a41355640eaf1ea1353f8a771331c4946e3480afa7252f196c87ed3d2a59d3b1b559137fed0013fecefc19fb5a92682b9bca51b950", + "wx": "00babb3677b0955802d8e929a41355640eaf1ea1353f8a771331c4946e3480afa7", + "wy": "252f196c87ed3d2a59d3b1b559137fed0013fecefc19fb5a92682b9bca51b950" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004babb3677b0955802d8e929a41355640eaf1ea1353f8a771331c4946e3480afa7252f196c87ed3d2a59d3b1b559137fed0013fecefc19fb5a92682b9bca51b950", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEurs2d7CVWALY6SmkE1VkDq8eoTU/incT\nMcSUbjSAr6clLxlsh+09KlnTsbVZE3/tABP+zvwZ+1qSaCubylG5UA==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004babb3677b0955802d8e929a41355640eaf1ea1353f8a771331c4946e3480afa7252f196c87ed3d2a59d3b1b559137fed0013fecefc19fb5a92682b9bca51b950", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEurs2d7CVWALY6SmkE1VkDq8eoTU/incT\nMcSUbjSAr6clLxlsh+09KlnTsbVZE3/tABP+zvwZ+1qSaCubylG5UA==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 409, - "comment" : "edge case for u2", - "flags" : [ + "tcId": 409, + "comment": "edge case for u2", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02203e888377ac6c71ac9dec3fdb9b56c9feaf0cfaca9f827fc5eb65fc3eac811210", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02203e888377ac6c71ac9dec3fdb9b56c9feaf0cfaca9f827fc5eb65fc3eac811210", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "041aab2018793471111a8a0e9b143fde02fc95920796d3a63de329b424396fba60bbe4130705174792441b318d3aa31dfe8577821e9b446ec573d272e036c4ebe9", - "wx" : "1aab2018793471111a8a0e9b143fde02fc95920796d3a63de329b424396fba60", - "wy" : "00bbe4130705174792441b318d3aa31dfe8577821e9b446ec573d272e036c4ebe9" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "041aab2018793471111a8a0e9b143fde02fc95920796d3a63de329b424396fba60bbe4130705174792441b318d3aa31dfe8577821e9b446ec573d272e036c4ebe9", + "wx": "1aab2018793471111a8a0e9b143fde02fc95920796d3a63de329b424396fba60", + "wy": "00bbe4130705174792441b318d3aa31dfe8577821e9b446ec573d272e036c4ebe9" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200041aab2018793471111a8a0e9b143fde02fc95920796d3a63de329b424396fba60bbe4130705174792441b318d3aa31dfe8577821e9b446ec573d272e036c4ebe9", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEGqsgGHk0cREaig6bFD/eAvyVkgeW06Y9\n4ym0JDlvumC75BMHBRdHkkQbMY06ox3+hXeCHptEbsVz0nLgNsTr6Q==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200041aab2018793471111a8a0e9b143fde02fc95920796d3a63de329b424396fba60bbe4130705174792441b318d3aa31dfe8577821e9b446ec573d272e036c4ebe9", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEGqsgGHk0cREaig6bFD/eAvyVkgeW06Y9\n4ym0JDlvumC75BMHBRdHkkQbMY06ox3+hXeCHptEbsVz0nLgNsTr6Q==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 410, - "comment" : "edge case for u2", - "flags" : [ + "tcId": 410, + "comment": "edge case for u2", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022030bbb794db588363b40679f6c182a50d3ce9679acdd3ffbe36d7813dacbdc818", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022030bbb794db588363b40679f6c182a50d3ce9679acdd3ffbe36d7813dacbdc818", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "048cb0b909499c83ea806cd885b1dd467a0119f06a88a0276eb0cfda274535a8ff47b5428833bc3f2c8bf9d9041158cf33718a69961cd01729bc0011d1e586ab75", - "wx" : "008cb0b909499c83ea806cd885b1dd467a0119f06a88a0276eb0cfda274535a8ff", - "wy" : "47b5428833bc3f2c8bf9d9041158cf33718a69961cd01729bc0011d1e586ab75" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "048cb0b909499c83ea806cd885b1dd467a0119f06a88a0276eb0cfda274535a8ff47b5428833bc3f2c8bf9d9041158cf33718a69961cd01729bc0011d1e586ab75", + "wx": "008cb0b909499c83ea806cd885b1dd467a0119f06a88a0276eb0cfda274535a8ff", + "wy": "47b5428833bc3f2c8bf9d9041158cf33718a69961cd01729bc0011d1e586ab75" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200048cb0b909499c83ea806cd885b1dd467a0119f06a88a0276eb0cfda274535a8ff47b5428833bc3f2c8bf9d9041158cf33718a69961cd01729bc0011d1e586ab75", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEjLC5CUmcg+qAbNiFsd1GegEZ8GqIoCdu\nsM/aJ0U1qP9HtUKIM7w/LIv52QQRWM8zcYpplhzQFym8ABHR5YardQ==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200048cb0b909499c83ea806cd885b1dd467a0119f06a88a0276eb0cfda274535a8ff47b5428833bc3f2c8bf9d9041158cf33718a69961cd01729bc0011d1e586ab75", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEjLC5CUmcg+qAbNiFsd1GegEZ8GqIoCdu\nsM/aJ0U1qP9HtUKIM7w/LIv52QQRWM8zcYpplhzQFym8ABHR5YardQ==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 411, - "comment" : "edge case for u2", - "flags" : [ + "tcId": 411, + "comment": "edge case for u2", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02202c37fd995622c4fb7fffffffffffffffc7cee745110cb45ab558ed7c90c15a2f", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02202c37fd995622c4fb7fffffffffffffffc7cee745110cb45ab558ed7c90c15a2f", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "048f03cf1a42272bb1532723093f72e6feeac85e1700e9fbe9a6a2dd642d74bf5d3b89a7189dad8cf75fc22f6f158aa27f9c2ca00daca785be3358f2bda3862ca0", - "wx" : "008f03cf1a42272bb1532723093f72e6feeac85e1700e9fbe9a6a2dd642d74bf5d", - "wy" : "3b89a7189dad8cf75fc22f6f158aa27f9c2ca00daca785be3358f2bda3862ca0" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "048f03cf1a42272bb1532723093f72e6feeac85e1700e9fbe9a6a2dd642d74bf5d3b89a7189dad8cf75fc22f6f158aa27f9c2ca00daca785be3358f2bda3862ca0", + "wx": "008f03cf1a42272bb1532723093f72e6feeac85e1700e9fbe9a6a2dd642d74bf5d", + "wy": "3b89a7189dad8cf75fc22f6f158aa27f9c2ca00daca785be3358f2bda3862ca0" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200048f03cf1a42272bb1532723093f72e6feeac85e1700e9fbe9a6a2dd642d74bf5d3b89a7189dad8cf75fc22f6f158aa27f9c2ca00daca785be3358f2bda3862ca0", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEjwPPGkInK7FTJyMJP3Lm/urIXhcA6fvp\npqLdZC10v107iacYna2M91/CL28ViqJ/nCygDaynhb4zWPK9o4YsoA==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200048f03cf1a42272bb1532723093f72e6feeac85e1700e9fbe9a6a2dd642d74bf5d3b89a7189dad8cf75fc22f6f158aa27f9c2ca00daca785be3358f2bda3862ca0", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEjwPPGkInK7FTJyMJP3Lm/urIXhcA6fvp\npqLdZC10v107iacYna2M91/CL28ViqJ/nCygDaynhb4zWPK9o4YsoA==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 412, - "comment" : "edge case for u2", - "flags" : [ + "tcId": 412, + "comment": "edge case for u2", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02207fd995622c4fb7ffffffffffffffffff5d883ffab5b32652ccdcaa290fccb97d", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02207fd995622c4fb7ffffffffffffffffff5d883ffab5b32652ccdcaa290fccb97d", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "0444de3b9c7a57a8c9e820952753421e7d987bb3d79f71f013805c897e018f8acea2460758c8f98d3fdce121a943659e372c326fff2e5fc2ae7fa3f79daae13c12", - "wx" : "44de3b9c7a57a8c9e820952753421e7d987bb3d79f71f013805c897e018f8ace", - "wy" : "00a2460758c8f98d3fdce121a943659e372c326fff2e5fc2ae7fa3f79daae13c12" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "0444de3b9c7a57a8c9e820952753421e7d987bb3d79f71f013805c897e018f8acea2460758c8f98d3fdce121a943659e372c326fff2e5fc2ae7fa3f79daae13c12", + "wx": "44de3b9c7a57a8c9e820952753421e7d987bb3d79f71f013805c897e018f8ace", + "wy": "00a2460758c8f98d3fdce121a943659e372c326fff2e5fc2ae7fa3f79daae13c12" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000444de3b9c7a57a8c9e820952753421e7d987bb3d79f71f013805c897e018f8acea2460758c8f98d3fdce121a943659e372c326fff2e5fc2ae7fa3f79daae13c12", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAERN47nHpXqMnoIJUnU0IefZh7s9efcfAT\ngFyJfgGPis6iRgdYyPmNP9zhIalDZZ43LDJv/y5fwq5/o/edquE8Eg==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000444de3b9c7a57a8c9e820952753421e7d987bb3d79f71f013805c897e018f8acea2460758c8f98d3fdce121a943659e372c326fff2e5fc2ae7fa3f79daae13c12", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAERN47nHpXqMnoIJUnU0IefZh7s9efcfAT\ngFyJfgGPis6iRgdYyPmNP9zhIalDZZ43LDJv/y5fwq5/o/edquE8Eg==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 413, - "comment" : "edge case for u2", - "flags" : [ + "tcId": 413, + "comment": "edge case for u2", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304302207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc021f4cd53ba7608fffffffffffffffffffff9e5cf143e2539626190a3ab09cce47", - "result" : "valid" + "msg": "313233343030", + "sig": "304302207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc021f4cd53ba7608fffffffffffffffffffff9e5cf143e2539626190a3ab09cce47", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "046fb8b2b48e33031268ad6a517484dc8839ea90f6669ea0c7ac3233e2ac31394a0ac8bbe7f73c2ff4df9978727ac1dfc2fd58647d20f31f99105316b64671f204", - "wx" : "6fb8b2b48e33031268ad6a517484dc8839ea90f6669ea0c7ac3233e2ac31394a", - "wy" : "0ac8bbe7f73c2ff4df9978727ac1dfc2fd58647d20f31f99105316b64671f204" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "046fb8b2b48e33031268ad6a517484dc8839ea90f6669ea0c7ac3233e2ac31394a0ac8bbe7f73c2ff4df9978727ac1dfc2fd58647d20f31f99105316b64671f204", + "wx": "6fb8b2b48e33031268ad6a517484dc8839ea90f6669ea0c7ac3233e2ac31394a", + "wy": "0ac8bbe7f73c2ff4df9978727ac1dfc2fd58647d20f31f99105316b64671f204" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200046fb8b2b48e33031268ad6a517484dc8839ea90f6669ea0c7ac3233e2ac31394a0ac8bbe7f73c2ff4df9978727ac1dfc2fd58647d20f31f99105316b64671f204", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEb7iytI4zAxJorWpRdITciDnqkPZmnqDH\nrDIz4qwxOUoKyLvn9zwv9N+ZeHJ6wd/C/VhkfSDzH5kQUxa2RnHyBA==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200046fb8b2b48e33031268ad6a517484dc8839ea90f6669ea0c7ac3233e2ac31394a0ac8bbe7f73c2ff4df9978727ac1dfc2fd58647d20f31f99105316b64671f204", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEb7iytI4zAxJorWpRdITciDnqkPZmnqDH\nrDIz4qwxOUoKyLvn9zwv9N+ZeHJ6wd/C/VhkfSDzH5kQUxa2RnHyBA==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 414, - "comment" : "edge case for u2", - "flags" : [ + "tcId": 414, + "comment": "edge case for u2", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02205622c4fb7fffffffffffffffffffffff928a8f1c7ac7bec1808b9f61c01ec327", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02205622c4fb7fffffffffffffffffffffff928a8f1c7ac7bec1808b9f61c01ec327", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04bea71122a048693e905ff602b3cf9dd18af69b9fc9d8431d2b1dd26b942c95e6f43c7b8b95eb62082c12db9dbda7fe38e45cbe4a4886907fb81bdb0c5ea9246c", - "wx" : "00bea71122a048693e905ff602b3cf9dd18af69b9fc9d8431d2b1dd26b942c95e6", - "wy" : "00f43c7b8b95eb62082c12db9dbda7fe38e45cbe4a4886907fb81bdb0c5ea9246c" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04bea71122a048693e905ff602b3cf9dd18af69b9fc9d8431d2b1dd26b942c95e6f43c7b8b95eb62082c12db9dbda7fe38e45cbe4a4886907fb81bdb0c5ea9246c", + "wx": "00bea71122a048693e905ff602b3cf9dd18af69b9fc9d8431d2b1dd26b942c95e6", + "wy": "00f43c7b8b95eb62082c12db9dbda7fe38e45cbe4a4886907fb81bdb0c5ea9246c" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004bea71122a048693e905ff602b3cf9dd18af69b9fc9d8431d2b1dd26b942c95e6f43c7b8b95eb62082c12db9dbda7fe38e45cbe4a4886907fb81bdb0c5ea9246c", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEvqcRIqBIaT6QX/YCs8+d0Yr2m5/J2EMd\nKx3Sa5Qsleb0PHuLletiCCwS2529p/445Fy+SkiGkH+4G9sMXqkkbA==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004bea71122a048693e905ff602b3cf9dd18af69b9fc9d8431d2b1dd26b942c95e6f43c7b8b95eb62082c12db9dbda7fe38e45cbe4a4886907fb81bdb0c5ea9246c", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEvqcRIqBIaT6QX/YCs8+d0Yr2m5/J2EMd\nKx3Sa5Qsleb0PHuLletiCCwS2529p/445Fy+SkiGkH+4G9sMXqkkbA==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 415, - "comment" : "edge case for u2", - "flags" : [ + "tcId": 415, + "comment": "edge case for u2", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022044104104104104104104104104104103b87853fd3b7d3f8e175125b4382f25ed", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022044104104104104104104104104104103b87853fd3b7d3f8e175125b4382f25ed", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04da918c731ba06a20cb94ef33b778e981a404a305f1941fe33666b45b03353156e2bb2694f575b45183be78e5c9b5210bf3bf488fd4c8294516d89572ca4f5391", - "wx" : "00da918c731ba06a20cb94ef33b778e981a404a305f1941fe33666b45b03353156", - "wy" : "00e2bb2694f575b45183be78e5c9b5210bf3bf488fd4c8294516d89572ca4f5391" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04da918c731ba06a20cb94ef33b778e981a404a305f1941fe33666b45b03353156e2bb2694f575b45183be78e5c9b5210bf3bf488fd4c8294516d89572ca4f5391", + "wx": "00da918c731ba06a20cb94ef33b778e981a404a305f1941fe33666b45b03353156", + "wy": "00e2bb2694f575b45183be78e5c9b5210bf3bf488fd4c8294516d89572ca4f5391" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004da918c731ba06a20cb94ef33b778e981a404a305f1941fe33666b45b03353156e2bb2694f575b45183be78e5c9b5210bf3bf488fd4c8294516d89572ca4f5391", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE2pGMcxugaiDLlO8zt3jpgaQEowXxlB/j\nNma0WwM1MVbiuyaU9XW0UYO+eOXJtSEL879Ij9TIKUUW2JVyyk9TkQ==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004da918c731ba06a20cb94ef33b778e981a404a305f1941fe33666b45b03353156e2bb2694f575b45183be78e5c9b5210bf3bf488fd4c8294516d89572ca4f5391", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE2pGMcxugaiDLlO8zt3jpgaQEowXxlB/j\nNma0WwM1MVbiuyaU9XW0UYO+eOXJtSEL879Ij9TIKUUW2JVyyk9TkQ==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 416, - "comment" : "edge case for u2", - "flags" : [ + "tcId": 416, + "comment": "edge case for u2", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02202739ce739ce739ce739ce739ce739ce705560298d1f2f08dc419ac273a5b54d9", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02202739ce739ce739ce739ce739ce739ce705560298d1f2f08dc419ac273a5b54d9", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "043007e92c3937dade7964dfa35b0eff031f7eb02aed0a0314411106cdeb70fe3d5a7546fc0552997b20e3d6f413e75e2cb66e116322697114b79bac734bfc4dc5", - "wx" : "3007e92c3937dade7964dfa35b0eff031f7eb02aed0a0314411106cdeb70fe3d", - "wy" : "5a7546fc0552997b20e3d6f413e75e2cb66e116322697114b79bac734bfc4dc5" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "043007e92c3937dade7964dfa35b0eff031f7eb02aed0a0314411106cdeb70fe3d5a7546fc0552997b20e3d6f413e75e2cb66e116322697114b79bac734bfc4dc5", + "wx": "3007e92c3937dade7964dfa35b0eff031f7eb02aed0a0314411106cdeb70fe3d", + "wy": "5a7546fc0552997b20e3d6f413e75e2cb66e116322697114b79bac734bfc4dc5" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200043007e92c3937dade7964dfa35b0eff031f7eb02aed0a0314411106cdeb70fe3d5a7546fc0552997b20e3d6f413e75e2cb66e116322697114b79bac734bfc4dc5", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEMAfpLDk32t55ZN+jWw7/Ax9+sCrtCgMU\nQREGzetw/j1adUb8BVKZeyDj1vQT514stm4RYyJpcRS3m6xzS/xNxQ==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200043007e92c3937dade7964dfa35b0eff031f7eb02aed0a0314411106cdeb70fe3d5a7546fc0552997b20e3d6f413e75e2cb66e116322697114b79bac734bfc4dc5", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEMAfpLDk32t55ZN+jWw7/Ax9+sCrtCgMU\nQREGzetw/j1adUb8BVKZeyDj1vQT514stm4RYyJpcRS3m6xzS/xNxQ==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 417, - "comment" : "edge case for u2", - "flags" : [ + "tcId": 417, + "comment": "edge case for u2", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02204888888888888888888888888888888831c83ae82ebe0898776b4c69d11f88de", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02204888888888888888888888888888888831c83ae82ebe0898776b4c69d11f88de", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "0460e734ef5624d3cbf0ddd375011bd663d6d6aebc644eb599fdf98dbdcd18ce9bd2d90b3ac31f139af832cccf6ccbbb2c6ea11fa97370dc9906da474d7d8a7567", - "wx" : "60e734ef5624d3cbf0ddd375011bd663d6d6aebc644eb599fdf98dbdcd18ce9b", - "wy" : "00d2d90b3ac31f139af832cccf6ccbbb2c6ea11fa97370dc9906da474d7d8a7567" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "0460e734ef5624d3cbf0ddd375011bd663d6d6aebc644eb599fdf98dbdcd18ce9bd2d90b3ac31f139af832cccf6ccbbb2c6ea11fa97370dc9906da474d7d8a7567", + "wx": "60e734ef5624d3cbf0ddd375011bd663d6d6aebc644eb599fdf98dbdcd18ce9b", + "wy": "00d2d90b3ac31f139af832cccf6ccbbb2c6ea11fa97370dc9906da474d7d8a7567" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000460e734ef5624d3cbf0ddd375011bd663d6d6aebc644eb599fdf98dbdcd18ce9bd2d90b3ac31f139af832cccf6ccbbb2c6ea11fa97370dc9906da474d7d8a7567", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEYOc071Yk08vw3dN1ARvWY9bWrrxkTrWZ\n/fmNvc0YzpvS2Qs6wx8TmvgyzM9sy7ssbqEfqXNw3JkG2kdNfYp1Zw==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000460e734ef5624d3cbf0ddd375011bd663d6d6aebc644eb599fdf98dbdcd18ce9bd2d90b3ac31f139af832cccf6ccbbb2c6ea11fa97370dc9906da474d7d8a7567", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEYOc071Yk08vw3dN1ARvWY9bWrrxkTrWZ\n/fmNvc0YzpvS2Qs6wx8TmvgyzM9sy7ssbqEfqXNw3JkG2kdNfYp1Zw==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 418, - "comment" : "edge case for u2", - "flags" : [ + "tcId": 418, + "comment": "edge case for u2", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02206492492492492492492492492492492406dd3a19b8d5fb875235963c593bd2d3", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02206492492492492492492492492492492406dd3a19b8d5fb875235963c593bd2d3", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "0485a900e97858f693c0b7dfa261e380dad6ea046d1f65ddeeedd5f7d8af0ba33769744d15add4f6c0bc3b0da2aec93b34cb8c65f9340ddf74e7b0009eeeccce3c", - "wx" : "0085a900e97858f693c0b7dfa261e380dad6ea046d1f65ddeeedd5f7d8af0ba337", - "wy" : "69744d15add4f6c0bc3b0da2aec93b34cb8c65f9340ddf74e7b0009eeeccce3c" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "0485a900e97858f693c0b7dfa261e380dad6ea046d1f65ddeeedd5f7d8af0ba33769744d15add4f6c0bc3b0da2aec93b34cb8c65f9340ddf74e7b0009eeeccce3c", + "wx": "0085a900e97858f693c0b7dfa261e380dad6ea046d1f65ddeeedd5f7d8af0ba337", + "wy": "69744d15add4f6c0bc3b0da2aec93b34cb8c65f9340ddf74e7b0009eeeccce3c" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000485a900e97858f693c0b7dfa261e380dad6ea046d1f65ddeeedd5f7d8af0ba33769744d15add4f6c0bc3b0da2aec93b34cb8c65f9340ddf74e7b0009eeeccce3c", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEhakA6XhY9pPAt9+iYeOA2tbqBG0fZd3u\n7dX32K8LozdpdE0VrdT2wLw7DaKuyTs0y4xl+TQN33TnsACe7szOPA==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000485a900e97858f693c0b7dfa261e380dad6ea046d1f65ddeeedd5f7d8af0ba33769744d15add4f6c0bc3b0da2aec93b34cb8c65f9340ddf74e7b0009eeeccce3c", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEhakA6XhY9pPAt9+iYeOA2tbqBG0fZd3u\n7dX32K8LozdpdE0VrdT2wLw7DaKuyTs0y4xl+TQN33TnsACe7szOPA==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 419, - "comment" : "edge case for u2", - "flags" : [ + "tcId": 419, + "comment": "edge case for u2", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02206aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa3e3a49a23a6d8abe95461f8445676b15", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02206aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa3e3a49a23a6d8abe95461f8445676b15", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "0438066f75d88efc4c93de36f49e037b234cc18b1de5608750a62cab0345401046a3e84bed8cfcb819ef4d550444f2ce4b651766b69e2e2901f88836ff90034fed", - "wx" : "38066f75d88efc4c93de36f49e037b234cc18b1de5608750a62cab0345401046", - "wy" : "00a3e84bed8cfcb819ef4d550444f2ce4b651766b69e2e2901f88836ff90034fed" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "0438066f75d88efc4c93de36f49e037b234cc18b1de5608750a62cab0345401046a3e84bed8cfcb819ef4d550444f2ce4b651766b69e2e2901f88836ff90034fed", + "wx": "38066f75d88efc4c93de36f49e037b234cc18b1de5608750a62cab0345401046", + "wy": "00a3e84bed8cfcb819ef4d550444f2ce4b651766b69e2e2901f88836ff90034fed" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000438066f75d88efc4c93de36f49e037b234cc18b1de5608750a62cab0345401046a3e84bed8cfcb819ef4d550444f2ce4b651766b69e2e2901f88836ff90034fed", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEOAZvddiO/EyT3jb0ngN7I0zBix3lYIdQ\npiyrA0VAEEaj6EvtjPy4Ge9NVQRE8s5LZRdmtp4uKQH4iDb/kANP7Q==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000438066f75d88efc4c93de36f49e037b234cc18b1de5608750a62cab0345401046a3e84bed8cfcb819ef4d550444f2ce4b651766b69e2e2901f88836ff90034fed", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEOAZvddiO/EyT3jb0ngN7I0zBix3lYIdQ\npiyrA0VAEEaj6EvtjPy4Ge9NVQRE8s5LZRdmtp4uKQH4iDb/kANP7Q==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 420, - "comment" : "edge case for u2", - "flags" : [ + "tcId": 420, + "comment": "edge case for u2", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02202aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa3e3a49a23a6d8abe95461f8445676b17", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02202aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa3e3a49a23a6d8abe95461f8445676b17", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "0498f68177dc95c1b4cbfa5245488ca523a7d5629470d035d621a443c72f39aabfa33d29546fa1c648f2c7d5ccf70cf1ce4ab79b5db1ac059dbecd068dbdff1b89", - "wx" : "0098f68177dc95c1b4cbfa5245488ca523a7d5629470d035d621a443c72f39aabf", - "wy" : "00a33d29546fa1c648f2c7d5ccf70cf1ce4ab79b5db1ac059dbecd068dbdff1b89" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "0498f68177dc95c1b4cbfa5245488ca523a7d5629470d035d621a443c72f39aabfa33d29546fa1c648f2c7d5ccf70cf1ce4ab79b5db1ac059dbecd068dbdff1b89", + "wx": "0098f68177dc95c1b4cbfa5245488ca523a7d5629470d035d621a443c72f39aabf", + "wy": "00a33d29546fa1c648f2c7d5ccf70cf1ce4ab79b5db1ac059dbecd068dbdff1b89" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000498f68177dc95c1b4cbfa5245488ca523a7d5629470d035d621a443c72f39aabfa33d29546fa1c648f2c7d5ccf70cf1ce4ab79b5db1ac059dbecd068dbdff1b89", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEmPaBd9yVwbTL+lJFSIylI6fVYpRw0DXW\nIaRDxy85qr+jPSlUb6HGSPLH1cz3DPHOSrebXbGsBZ2+zQaNvf8biQ==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000498f68177dc95c1b4cbfa5245488ca523a7d5629470d035d621a443c72f39aabfa33d29546fa1c648f2c7d5ccf70cf1ce4ab79b5db1ac059dbecd068dbdff1b89", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEmPaBd9yVwbTL+lJFSIylI6fVYpRw0DXW\nIaRDxy85qr+jPSlUb6HGSPLH1cz3DPHOSrebXbGsBZ2+zQaNvf8biQ==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 421, - "comment" : "edge case for u2", - "flags" : [ + "tcId": 421, + "comment": "edge case for u2", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02203ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02203ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "045c2bbfa23c9b9ad07f038aa89b4930bf267d9401e4255de9e8da0a5078ec8277e3e882a31d5e6a379e0793983ccded39b95c4353ab2ff01ea5369ba47b0c3191", - "wx" : "5c2bbfa23c9b9ad07f038aa89b4930bf267d9401e4255de9e8da0a5078ec8277", - "wy" : "00e3e882a31d5e6a379e0793983ccded39b95c4353ab2ff01ea5369ba47b0c3191" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "045c2bbfa23c9b9ad07f038aa89b4930bf267d9401e4255de9e8da0a5078ec8277e3e882a31d5e6a379e0793983ccded39b95c4353ab2ff01ea5369ba47b0c3191", + "wx": "5c2bbfa23c9b9ad07f038aa89b4930bf267d9401e4255de9e8da0a5078ec8277", + "wy": "00e3e882a31d5e6a379e0793983ccded39b95c4353ab2ff01ea5369ba47b0c3191" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200045c2bbfa23c9b9ad07f038aa89b4930bf267d9401e4255de9e8da0a5078ec8277e3e882a31d5e6a379e0793983ccded39b95c4353ab2ff01ea5369ba47b0c3191", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEXCu/ojybmtB/A4qom0kwvyZ9lAHkJV3p\n6NoKUHjsgnfj6IKjHV5qN54Hk5g8ze05uVxDU6sv8B6lNpukewwxkQ==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200045c2bbfa23c9b9ad07f038aa89b4930bf267d9401e4255de9e8da0a5078ec8277e3e882a31d5e6a379e0793983ccded39b95c4353ab2ff01ea5369ba47b0c3191", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEXCu/ojybmtB/A4qom0kwvyZ9lAHkJV3p\n6NoKUHjsgnfj6IKjHV5qN54Hk5g8ze05uVxDU6sv8B6lNpukewwxkQ==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 422, - "comment" : "edge case for u2", - "flags" : [ + "tcId": 422, + "comment": "edge case for u2", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc0220185ddbca6dac41b1da033cfb60c152869e74b3cd66e9ffdf1b6bc09ed65ee40c", - "result" : "valid" + "msg": "313233343030", + "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc0220185ddbca6dac41b1da033cfb60c152869e74b3cd66e9ffdf1b6bc09ed65ee40c", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "042ea7133432339c69d27f9b267281bd2ddd5f19d6338d400a05cd3647b157a3853547808298448edb5e701ade84cd5fb1ac9567ba5e8fb68a6b933ec4b5cc84cc", - "wx" : "2ea7133432339c69d27f9b267281bd2ddd5f19d6338d400a05cd3647b157a385", - "wy" : "3547808298448edb5e701ade84cd5fb1ac9567ba5e8fb68a6b933ec4b5cc84cc" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "042ea7133432339c69d27f9b267281bd2ddd5f19d6338d400a05cd3647b157a3853547808298448edb5e701ade84cd5fb1ac9567ba5e8fb68a6b933ec4b5cc84cc", + "wx": "2ea7133432339c69d27f9b267281bd2ddd5f19d6338d400a05cd3647b157a385", + "wy": "3547808298448edb5e701ade84cd5fb1ac9567ba5e8fb68a6b933ec4b5cc84cc" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200042ea7133432339c69d27f9b267281bd2ddd5f19d6338d400a05cd3647b157a3853547808298448edb5e701ade84cd5fb1ac9567ba5e8fb68a6b933ec4b5cc84cc", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAELqcTNDIznGnSf5smcoG9Ld1fGdYzjUAK\nBc02R7FXo4U1R4CCmESO215wGt6EzV+xrJVnul6Ptoprkz7EtcyEzA==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200042ea7133432339c69d27f9b267281bd2ddd5f19d6338d400a05cd3647b157a3853547808298448edb5e701ade84cd5fb1ac9567ba5e8fb68a6b933ec4b5cc84cc", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAELqcTNDIznGnSf5smcoG9Ld1fGdYzjUAK\nBc02R7FXo4U1R4CCmESO215wGt6EzV+xrJVnul6Ptoprkz7EtcyEzA==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 423, - "comment" : "point duplication during verification", - "flags" : [ + "tcId": 423, + "comment": "point duplication during verification", + "flags": [ "PointDuplication" ], - "msg" : "313233343030", - "sig" : "3044022032b0d10d8d0e04bc8d4d064d270699e87cffc9b49c5c20730e1c26f6105ddcda022029ed3d67b3d505be95580d77d5b792b436881179b2b6b2e04c5fe592d38d82d9", - "result" : "valid" + "msg": "313233343030", + "sig": "3044022032b0d10d8d0e04bc8d4d064d270699e87cffc9b49c5c20730e1c26f6105ddcda022029ed3d67b3d505be95580d77d5b792b436881179b2b6b2e04c5fe592d38d82d9", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "042ea7133432339c69d27f9b267281bd2ddd5f19d6338d400a05cd3647b157a385cab87f7d67bb7124a18fe5217b32a04e536a9845a1704975946cc13a4a337763", - "wx" : "2ea7133432339c69d27f9b267281bd2ddd5f19d6338d400a05cd3647b157a385", - "wy" : "00cab87f7d67bb7124a18fe5217b32a04e536a9845a1704975946cc13a4a337763" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "042ea7133432339c69d27f9b267281bd2ddd5f19d6338d400a05cd3647b157a385cab87f7d67bb7124a18fe5217b32a04e536a9845a1704975946cc13a4a337763", + "wx": "2ea7133432339c69d27f9b267281bd2ddd5f19d6338d400a05cd3647b157a385", + "wy": "00cab87f7d67bb7124a18fe5217b32a04e536a9845a1704975946cc13a4a337763" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200042ea7133432339c69d27f9b267281bd2ddd5f19d6338d400a05cd3647b157a385cab87f7d67bb7124a18fe5217b32a04e536a9845a1704975946cc13a4a337763", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAELqcTNDIznGnSf5smcoG9Ld1fGdYzjUAK\nBc02R7FXo4XKuH99Z7txJKGP5SF7MqBOU2qYRaFwSXWUbME6SjN3Yw==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200042ea7133432339c69d27f9b267281bd2ddd5f19d6338d400a05cd3647b157a385cab87f7d67bb7124a18fe5217b32a04e536a9845a1704975946cc13a4a337763", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAELqcTNDIznGnSf5smcoG9Ld1fGdYzjUAK\nBc02R7FXo4XKuH99Z7txJKGP5SF7MqBOU2qYRaFwSXWUbME6SjN3Yw==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 424, - "comment" : "duplication bug", - "flags" : [ + "tcId": 424, + "comment": "duplication bug", + "flags": [ "PointDuplication" ], - "msg" : "313233343030", - "sig" : "3044022032b0d10d8d0e04bc8d4d064d270699e87cffc9b49c5c20730e1c26f6105ddcda022029ed3d67b3d505be95580d77d5b792b436881179b2b6b2e04c5fe592d38d82d9", - "result" : "invalid" + "msg": "313233343030", + "sig": "3044022032b0d10d8d0e04bc8d4d064d270699e87cffc9b49c5c20730e1c26f6105ddcda022029ed3d67b3d505be95580d77d5b792b436881179b2b6b2e04c5fe592d38d82d9", + "result": "invalid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "048aa2c64fa9c6437563abfbcbd00b2048d48c18c152a2a6f49036de7647ebe82e1ce64387995c68a060fa3bc0399b05cc06eec7d598f75041a4917e692b7f51ff", - "wx" : "008aa2c64fa9c6437563abfbcbd00b2048d48c18c152a2a6f49036de7647ebe82e", - "wy" : "1ce64387995c68a060fa3bc0399b05cc06eec7d598f75041a4917e692b7f51ff" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "048aa2c64fa9c6437563abfbcbd00b2048d48c18c152a2a6f49036de7647ebe82e1ce64387995c68a060fa3bc0399b05cc06eec7d598f75041a4917e692b7f51ff", + "wx": "008aa2c64fa9c6437563abfbcbd00b2048d48c18c152a2a6f49036de7647ebe82e", + "wy": "1ce64387995c68a060fa3bc0399b05cc06eec7d598f75041a4917e692b7f51ff" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200048aa2c64fa9c6437563abfbcbd00b2048d48c18c152a2a6f49036de7647ebe82e1ce64387995c68a060fa3bc0399b05cc06eec7d598f75041a4917e692b7f51ff", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEiqLGT6nGQ3Vjq/vL0AsgSNSMGMFSoqb0\nkDbedkfr6C4c5kOHmVxooGD6O8A5mwXMBu7H1Zj3UEGkkX5pK39R/w==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200048aa2c64fa9c6437563abfbcbd00b2048d48c18c152a2a6f49036de7647ebe82e1ce64387995c68a060fa3bc0399b05cc06eec7d598f75041a4917e692b7f51ff", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEiqLGT6nGQ3Vjq/vL0AsgSNSMGMFSoqb0\nkDbedkfr6C4c5kOHmVxooGD6O8A5mwXMBu7H1Zj3UEGkkX5pK39R/w==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 425, - "comment" : "comparison with point at infinity ", - "flags" : [ + "tcId": 425, + "comment": "comparison with point at infinity ", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c0022033333333333333333333333333333332f222f8faefdb533f265d461c29a47373", - "result" : "invalid" + "msg": "313233343030", + "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c0022033333333333333333333333333333332f222f8faefdb533f265d461c29a47373", + "result": "invalid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04391427ff7ee78013c14aec7d96a8a062209298a783835e94fd6549d502fff71fdd6624ec343ad9fcf4d9872181e59f842f9ba4cccae09a6c0972fb6ac6b4c6bd", - "wx" : "391427ff7ee78013c14aec7d96a8a062209298a783835e94fd6549d502fff71f", - "wy" : "00dd6624ec343ad9fcf4d9872181e59f842f9ba4cccae09a6c0972fb6ac6b4c6bd" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04391427ff7ee78013c14aec7d96a8a062209298a783835e94fd6549d502fff71fdd6624ec343ad9fcf4d9872181e59f842f9ba4cccae09a6c0972fb6ac6b4c6bd", + "wx": "391427ff7ee78013c14aec7d96a8a062209298a783835e94fd6549d502fff71f", + "wy": "00dd6624ec343ad9fcf4d9872181e59f842f9ba4cccae09a6c0972fb6ac6b4c6bd" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004391427ff7ee78013c14aec7d96a8a062209298a783835e94fd6549d502fff71fdd6624ec343ad9fcf4d9872181e59f842f9ba4cccae09a6c0972fb6ac6b4c6bd", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEORQn/37ngBPBSux9lqigYiCSmKeDg16U\n/WVJ1QL/9x/dZiTsNDrZ/PTZhyGB5Z+EL5ukzMrgmmwJcvtqxrTGvQ==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004391427ff7ee78013c14aec7d96a8a062209298a783835e94fd6549d502fff71fdd6624ec343ad9fcf4d9872181e59f842f9ba4cccae09a6c0972fb6ac6b4c6bd", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEORQn/37ngBPBSux9lqigYiCSmKeDg16U\n/WVJ1QL/9x/dZiTsNDrZ/PTZhyGB5Z+EL5ukzMrgmmwJcvtqxrTGvQ==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 426, - "comment" : "extreme value for k and edgecase s", - "flags" : [ + "tcId": 426, + "comment": "extreme value for k and edgecase s", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3045022100c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c0", - "result" : "valid" + "msg": "313233343030", + "sig": "3045022100c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c0", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04e762b8a219b4f180219cc7a9059245e4961bd191c03899789c7a34b89e8c138ec1533ef0419bb7376e0bfde9319d10a06968791d9ea0eed9c1ce6345aed9759e", - "wx" : "00e762b8a219b4f180219cc7a9059245e4961bd191c03899789c7a34b89e8c138e", - "wy" : "00c1533ef0419bb7376e0bfde9319d10a06968791d9ea0eed9c1ce6345aed9759e" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04e762b8a219b4f180219cc7a9059245e4961bd191c03899789c7a34b89e8c138ec1533ef0419bb7376e0bfde9319d10a06968791d9ea0eed9c1ce6345aed9759e", + "wx": "00e762b8a219b4f180219cc7a9059245e4961bd191c03899789c7a34b89e8c138e", + "wy": "00c1533ef0419bb7376e0bfde9319d10a06968791d9ea0eed9c1ce6345aed9759e" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004e762b8a219b4f180219cc7a9059245e4961bd191c03899789c7a34b89e8c138ec1533ef0419bb7376e0bfde9319d10a06968791d9ea0eed9c1ce6345aed9759e", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE52K4ohm08YAhnMepBZJF5JYb0ZHAOJl4\nnHo0uJ6ME47BUz7wQZu3N24L/ekxnRCgaWh5HZ6g7tnBzmNFrtl1ng==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004e762b8a219b4f180219cc7a9059245e4961bd191c03899789c7a34b89e8c138ec1533ef0419bb7376e0bfde9319d10a06968791d9ea0eed9c1ce6345aed9759e", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE52K4ohm08YAhnMepBZJF5JYb0ZHAOJl4\nnHo0uJ6ME47BUz7wQZu3N24L/ekxnRCgaWh5HZ6g7tnBzmNFrtl1ng==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 427, - "comment" : "extreme value for k and s^-1", - "flags" : [ + "tcId": 427, + "comment": "extreme value for k and s^-1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3045022100c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5022049249249249249249249249249249248c79facd43214c011123c1b03a93412a5", - "result" : "valid" + "msg": "313233343030", + "sig": "3045022100c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5022049249249249249249249249249249248c79facd43214c011123c1b03a93412a5", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "049aedb0d281db164e130000c5697fae0f305ef848be6fffb43ac593fbb950e952fa6f633359bdcd82b56b0b9f965b037789d46b9a8141b791b2aefa713f96c175", - "wx" : "009aedb0d281db164e130000c5697fae0f305ef848be6fffb43ac593fbb950e952", - "wy" : "00fa6f633359bdcd82b56b0b9f965b037789d46b9a8141b791b2aefa713f96c175" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "049aedb0d281db164e130000c5697fae0f305ef848be6fffb43ac593fbb950e952fa6f633359bdcd82b56b0b9f965b037789d46b9a8141b791b2aefa713f96c175", + "wx": "009aedb0d281db164e130000c5697fae0f305ef848be6fffb43ac593fbb950e952", + "wy": "00fa6f633359bdcd82b56b0b9f965b037789d46b9a8141b791b2aefa713f96c175" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200049aedb0d281db164e130000c5697fae0f305ef848be6fffb43ac593fbb950e952fa6f633359bdcd82b56b0b9f965b037789d46b9a8141b791b2aefa713f96c175", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEmu2w0oHbFk4TAADFaX+uDzBe+Ei+b/+0\nOsWT+7lQ6VL6b2MzWb3NgrVrC5+WWwN3idRrmoFBt5GyrvpxP5bBdQ==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200049aedb0d281db164e130000c5697fae0f305ef848be6fffb43ac593fbb950e952fa6f633359bdcd82b56b0b9f965b037789d46b9a8141b791b2aefa713f96c175", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEmu2w0oHbFk4TAADFaX+uDzBe+Ei+b/+0\nOsWT+7lQ6VL6b2MzWb3NgrVrC5+WWwN3idRrmoFBt5GyrvpxP5bBdQ==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 428, - "comment" : "extreme value for k and s^-1", - "flags" : [ + "tcId": 428, + "comment": "extreme value for k and s^-1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3045022100c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5022066666666666666666666666666666665e445f1f5dfb6a67e4cba8c385348e6e7", - "result" : "valid" + "msg": "313233343030", + "sig": "3045022100c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5022066666666666666666666666666666665e445f1f5dfb6a67e4cba8c385348e6e7", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "048ad445db62816260e4e687fd1884e48b9fc0636d031547d63315e792e19bfaee1de64f99d5f1cd8b6ec9cb0f787a654ae86993ba3db1008ef43cff0684cb22bd", - "wx" : "008ad445db62816260e4e687fd1884e48b9fc0636d031547d63315e792e19bfaee", - "wy" : "1de64f99d5f1cd8b6ec9cb0f787a654ae86993ba3db1008ef43cff0684cb22bd" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "048ad445db62816260e4e687fd1884e48b9fc0636d031547d63315e792e19bfaee1de64f99d5f1cd8b6ec9cb0f787a654ae86993ba3db1008ef43cff0684cb22bd", + "wx": "008ad445db62816260e4e687fd1884e48b9fc0636d031547d63315e792e19bfaee", + "wy": "1de64f99d5f1cd8b6ec9cb0f787a654ae86993ba3db1008ef43cff0684cb22bd" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200048ad445db62816260e4e687fd1884e48b9fc0636d031547d63315e792e19bfaee1de64f99d5f1cd8b6ec9cb0f787a654ae86993ba3db1008ef43cff0684cb22bd", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEitRF22KBYmDk5of9GITki5/AY20DFUfW\nMxXnkuGb+u4d5k+Z1fHNi27Jyw94emVK6GmTuj2xAI70PP8GhMsivQ==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200048ad445db62816260e4e687fd1884e48b9fc0636d031547d63315e792e19bfaee1de64f99d5f1cd8b6ec9cb0f787a654ae86993ba3db1008ef43cff0684cb22bd", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEitRF22KBYmDk5of9GITki5/AY20DFUfW\nMxXnkuGb+u4d5k+Z1fHNi27Jyw94emVK6GmTuj2xAI70PP8GhMsivQ==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 429, - "comment" : "extreme value for k and s^-1", - "flags" : [ + "tcId": 429, + "comment": "extreme value for k and s^-1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3045022100c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5022066666666666666666666666666666665e445f1f5dfb6a67e4cba8c385348e6e7", - "result" : "valid" + "msg": "313233343030", + "sig": "3045022100c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5022066666666666666666666666666666665e445f1f5dfb6a67e4cba8c385348e6e7", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "041f5799c95be89063b24f26e40cb928c1a868a76fb0094607e8043db409c91c32e75724e813a4191e3a839007f08e2e897388b06d4a00de6de60e536d91fab566", - "wx" : "1f5799c95be89063b24f26e40cb928c1a868a76fb0094607e8043db409c91c32", - "wy" : "00e75724e813a4191e3a839007f08e2e897388b06d4a00de6de60e536d91fab566" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "041f5799c95be89063b24f26e40cb928c1a868a76fb0094607e8043db409c91c32e75724e813a4191e3a839007f08e2e897388b06d4a00de6de60e536d91fab566", + "wx": "1f5799c95be89063b24f26e40cb928c1a868a76fb0094607e8043db409c91c32", + "wy": "00e75724e813a4191e3a839007f08e2e897388b06d4a00de6de60e536d91fab566" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200041f5799c95be89063b24f26e40cb928c1a868a76fb0094607e8043db409c91c32e75724e813a4191e3a839007f08e2e897388b06d4a00de6de60e536d91fab566", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEH1eZyVvokGOyTybkDLkowahop2+wCUYH\n6AQ9tAnJHDLnVyToE6QZHjqDkAfwji6Jc4iwbUoA3m3mDlNtkfq1Zg==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200041f5799c95be89063b24f26e40cb928c1a868a76fb0094607e8043db409c91c32e75724e813a4191e3a839007f08e2e897388b06d4a00de6de60e536d91fab566", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEH1eZyVvokGOyTybkDLkowahop2+wCUYH\n6AQ9tAnJHDLnVyToE6QZHjqDkAfwji6Jc4iwbUoA3m3mDlNtkfq1Zg==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 430, - "comment" : "extreme value for k and s^-1", - "flags" : [ + "tcId": 430, + "comment": "extreme value for k and s^-1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3045022100c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5022049249249249249249249249249249248c79facd43214c011123c1b03a93412a5", - "result" : "valid" + "msg": "313233343030", + "sig": "3045022100c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5022049249249249249249249249249249248c79facd43214c011123c1b03a93412a5", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04a3331a4e1b4223ec2c027edd482c928a14ed358d93f1d4217d39abf69fcb5ccc28d684d2aaabcd6383775caa6239de26d4c6937bb603ecb4196082f4cffd509d", - "wx" : "00a3331a4e1b4223ec2c027edd482c928a14ed358d93f1d4217d39abf69fcb5ccc", - "wy" : "28d684d2aaabcd6383775caa6239de26d4c6937bb603ecb4196082f4cffd509d" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04a3331a4e1b4223ec2c027edd482c928a14ed358d93f1d4217d39abf69fcb5ccc28d684d2aaabcd6383775caa6239de26d4c6937bb603ecb4196082f4cffd509d", + "wx": "00a3331a4e1b4223ec2c027edd482c928a14ed358d93f1d4217d39abf69fcb5ccc", + "wy": "28d684d2aaabcd6383775caa6239de26d4c6937bb603ecb4196082f4cffd509d" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004a3331a4e1b4223ec2c027edd482c928a14ed358d93f1d4217d39abf69fcb5ccc28d684d2aaabcd6383775caa6239de26d4c6937bb603ecb4196082f4cffd509d", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEozMaThtCI+wsAn7dSCySihTtNY2T8dQh\nfTmr9p/LXMwo1oTSqqvNY4N3XKpiOd4m1MaTe7YD7LQZYIL0z/1QnQ==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004a3331a4e1b4223ec2c027edd482c928a14ed358d93f1d4217d39abf69fcb5ccc28d684d2aaabcd6383775caa6239de26d4c6937bb603ecb4196082f4cffd509d", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEozMaThtCI+wsAn7dSCySihTtNY2T8dQh\nfTmr9p/LXMwo1oTSqqvNY4N3XKpiOd4m1MaTe7YD7LQZYIL0z/1QnQ==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 431, - "comment" : "extreme value for k", - "flags" : [ + "tcId": 431, + "comment": "extreme value for k", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3045022100c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee502200eb10e5ab95f2f275348d82ad2e4d7949c8193800d8c9c75df58e343f0ebba7b", - "result" : "valid" + "msg": "313233343030", + "sig": "3045022100c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee502200eb10e5ab95f2f275348d82ad2e4d7949c8193800d8c9c75df58e343f0ebba7b", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "043f3952199774c7cf39b38b66cb1042a6260d8680803845e4d433adba3bb248185ea495b68cbc7ed4173ee63c9042dc502625c7eb7e21fb02ca9a9114e0a3a18d", - "wx" : "3f3952199774c7cf39b38b66cb1042a6260d8680803845e4d433adba3bb24818", - "wy" : "5ea495b68cbc7ed4173ee63c9042dc502625c7eb7e21fb02ca9a9114e0a3a18d" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "043f3952199774c7cf39b38b66cb1042a6260d8680803845e4d433adba3bb248185ea495b68cbc7ed4173ee63c9042dc502625c7eb7e21fb02ca9a9114e0a3a18d", + "wx": "3f3952199774c7cf39b38b66cb1042a6260d8680803845e4d433adba3bb24818", + "wy": "5ea495b68cbc7ed4173ee63c9042dc502625c7eb7e21fb02ca9a9114e0a3a18d" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200043f3952199774c7cf39b38b66cb1042a6260d8680803845e4d433adba3bb248185ea495b68cbc7ed4173ee63c9042dc502625c7eb7e21fb02ca9a9114e0a3a18d", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEPzlSGZd0x885s4tmyxBCpiYNhoCAOEXk\n1DOtujuySBhepJW2jLx+1Bc+5jyQQtxQJiXH634h+wLKmpEU4KOhjQ==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200043f3952199774c7cf39b38b66cb1042a6260d8680803845e4d433adba3bb248185ea495b68cbc7ed4173ee63c9042dc502625c7eb7e21fb02ca9a9114e0a3a18d", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEPzlSGZd0x885s4tmyxBCpiYNhoCAOEXk\n1DOtujuySBhepJW2jLx+1Bc+5jyQQtxQJiXH634h+wLKmpEU4KOhjQ==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 432, - "comment" : "extreme value for k and edgecase s", - "flags" : [ + "tcId": 432, + "comment": "extreme value for k and edgecase s", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3044022079be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c0", - "result" : "valid" + "msg": "313233343030", + "sig": "3044022079be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c0", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04cdfb8c0f422e144e137c2412c86c171f5fe3fa3f5bbb544e9076288f3ced786e054fd0721b77c11c79beacb3c94211b0a19bda08652efeaf92513a3b0a163698", - "wx" : "00cdfb8c0f422e144e137c2412c86c171f5fe3fa3f5bbb544e9076288f3ced786e", - "wy" : "054fd0721b77c11c79beacb3c94211b0a19bda08652efeaf92513a3b0a163698" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04cdfb8c0f422e144e137c2412c86c171f5fe3fa3f5bbb544e9076288f3ced786e054fd0721b77c11c79beacb3c94211b0a19bda08652efeaf92513a3b0a163698", + "wx": "00cdfb8c0f422e144e137c2412c86c171f5fe3fa3f5bbb544e9076288f3ced786e", + "wy": "054fd0721b77c11c79beacb3c94211b0a19bda08652efeaf92513a3b0a163698" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004cdfb8c0f422e144e137c2412c86c171f5fe3fa3f5bbb544e9076288f3ced786e054fd0721b77c11c79beacb3c94211b0a19bda08652efeaf92513a3b0a163698", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEzfuMD0IuFE4TfCQSyGwXH1/j+j9bu1RO\nkHYojzzteG4FT9ByG3fBHHm+rLPJQhGwoZvaCGUu/q+SUTo7ChY2mA==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004cdfb8c0f422e144e137c2412c86c171f5fe3fa3f5bbb544e9076288f3ced786e054fd0721b77c11c79beacb3c94211b0a19bda08652efeaf92513a3b0a163698", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEzfuMD0IuFE4TfCQSyGwXH1/j+j9bu1RO\nkHYojzzteG4FT9ByG3fBHHm+rLPJQhGwoZvaCGUu/q+SUTo7ChY2mA==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 433, - "comment" : "extreme value for k and s^-1", - "flags" : [ + "tcId": 433, + "comment": "extreme value for k and s^-1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3044022079be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798022049249249249249249249249249249248c79facd43214c011123c1b03a93412a5", - "result" : "valid" + "msg": "313233343030", + "sig": "3044022079be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798022049249249249249249249249249249248c79facd43214c011123c1b03a93412a5", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "0473598a6a1c68278fa6bfd0ce4064e68235bc1c0f6b20a928108be336730f87e3cbae612519b5032ecc85aed811271a95fe7939d5d3460140ba318f4d14aba31d", - "wx" : "73598a6a1c68278fa6bfd0ce4064e68235bc1c0f6b20a928108be336730f87e3", - "wy" : "00cbae612519b5032ecc85aed811271a95fe7939d5d3460140ba318f4d14aba31d" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "0473598a6a1c68278fa6bfd0ce4064e68235bc1c0f6b20a928108be336730f87e3cbae612519b5032ecc85aed811271a95fe7939d5d3460140ba318f4d14aba31d", + "wx": "73598a6a1c68278fa6bfd0ce4064e68235bc1c0f6b20a928108be336730f87e3", + "wy": "00cbae612519b5032ecc85aed811271a95fe7939d5d3460140ba318f4d14aba31d" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000473598a6a1c68278fa6bfd0ce4064e68235bc1c0f6b20a928108be336730f87e3cbae612519b5032ecc85aed811271a95fe7939d5d3460140ba318f4d14aba31d", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEc1mKahxoJ4+mv9DOQGTmgjW8HA9rIKko\nEIvjNnMPh+PLrmElGbUDLsyFrtgRJxqV/nk51dNGAUC6MY9NFKujHQ==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000473598a6a1c68278fa6bfd0ce4064e68235bc1c0f6b20a928108be336730f87e3cbae612519b5032ecc85aed811271a95fe7939d5d3460140ba318f4d14aba31d", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEc1mKahxoJ4+mv9DOQGTmgjW8HA9rIKko\nEIvjNnMPh+PLrmElGbUDLsyFrtgRJxqV/nk51dNGAUC6MY9NFKujHQ==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 434, - "comment" : "extreme value for k and s^-1", - "flags" : [ + "tcId": 434, + "comment": "extreme value for k and s^-1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3044022079be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798022066666666666666666666666666666665e445f1f5dfb6a67e4cba8c385348e6e7", - "result" : "valid" + "msg": "313233343030", + "sig": "3044022079be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798022066666666666666666666666666666665e445f1f5dfb6a67e4cba8c385348e6e7", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "0458debd9a7ee2c9d59132478a5440ae4d5d7ed437308369f92ea86c82183f10a16773e76f5edbf4da0e4f1bdffac0f57257e1dfa465842931309a24245fda6a5d", - "wx" : "58debd9a7ee2c9d59132478a5440ae4d5d7ed437308369f92ea86c82183f10a1", - "wy" : "6773e76f5edbf4da0e4f1bdffac0f57257e1dfa465842931309a24245fda6a5d" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "0458debd9a7ee2c9d59132478a5440ae4d5d7ed437308369f92ea86c82183f10a16773e76f5edbf4da0e4f1bdffac0f57257e1dfa465842931309a24245fda6a5d", + "wx": "58debd9a7ee2c9d59132478a5440ae4d5d7ed437308369f92ea86c82183f10a1", + "wy": "6773e76f5edbf4da0e4f1bdffac0f57257e1dfa465842931309a24245fda6a5d" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000458debd9a7ee2c9d59132478a5440ae4d5d7ed437308369f92ea86c82183f10a16773e76f5edbf4da0e4f1bdffac0f57257e1dfa465842931309a24245fda6a5d", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEWN69mn7iydWRMkeKVECuTV1+1Dcwg2n5\nLqhsghg/EKFnc+dvXtv02g5PG9/6wPVyV+HfpGWEKTEwmiQkX9pqXQ==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000458debd9a7ee2c9d59132478a5440ae4d5d7ed437308369f92ea86c82183f10a16773e76f5edbf4da0e4f1bdffac0f57257e1dfa465842931309a24245fda6a5d", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEWN69mn7iydWRMkeKVECuTV1+1Dcwg2n5\nLqhsghg/EKFnc+dvXtv02g5PG9/6wPVyV+HfpGWEKTEwmiQkX9pqXQ==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 435, - "comment" : "extreme value for k and s^-1", - "flags" : [ + "tcId": 435, + "comment": "extreme value for k and s^-1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3044022079be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798022066666666666666666666666666666665e445f1f5dfb6a67e4cba8c385348e6e7", - "result" : "valid" + "msg": "313233343030", + "sig": "3044022079be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798022066666666666666666666666666666665e445f1f5dfb6a67e4cba8c385348e6e7", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "048b904de47967340c5f8c3572a720924ef7578637feab1949acb241a5a6ac3f5b950904496f9824b1d63f3313bae21b89fae89afdfc811b5ece03fd5aa301864f", - "wx" : "008b904de47967340c5f8c3572a720924ef7578637feab1949acb241a5a6ac3f5b", - "wy" : "00950904496f9824b1d63f3313bae21b89fae89afdfc811b5ece03fd5aa301864f" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "048b904de47967340c5f8c3572a720924ef7578637feab1949acb241a5a6ac3f5b950904496f9824b1d63f3313bae21b89fae89afdfc811b5ece03fd5aa301864f", + "wx": "008b904de47967340c5f8c3572a720924ef7578637feab1949acb241a5a6ac3f5b", + "wy": "00950904496f9824b1d63f3313bae21b89fae89afdfc811b5ece03fd5aa301864f" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200048b904de47967340c5f8c3572a720924ef7578637feab1949acb241a5a6ac3f5b950904496f9824b1d63f3313bae21b89fae89afdfc811b5ece03fd5aa301864f", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEi5BN5HlnNAxfjDVypyCSTvdXhjf+qxlJ\nrLJBpaasP1uVCQRJb5gksdY/MxO64huJ+uia/fyBG17OA/1aowGGTw==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200048b904de47967340c5f8c3572a720924ef7578637feab1949acb241a5a6ac3f5b950904496f9824b1d63f3313bae21b89fae89afdfc811b5ece03fd5aa301864f", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEi5BN5HlnNAxfjDVypyCSTvdXhjf+qxlJ\nrLJBpaasP1uVCQRJb5gksdY/MxO64huJ+uia/fyBG17OA/1aowGGTw==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 436, - "comment" : "extreme value for k and s^-1", - "flags" : [ + "tcId": 436, + "comment": "extreme value for k and s^-1", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3044022079be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798022049249249249249249249249249249248c79facd43214c011123c1b03a93412a5", - "result" : "valid" + "msg": "313233343030", + "sig": "3044022079be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798022049249249249249249249249249249248c79facd43214c011123c1b03a93412a5", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04f4892b6d525c771e035f2a252708f3784e48238604b4f94dc56eaa1e546d941a346b1aa0bce68b1c50e5b52f509fb5522e5c25e028bc8f863402edb7bcad8b1b", - "wx" : "00f4892b6d525c771e035f2a252708f3784e48238604b4f94dc56eaa1e546d941a", - "wy" : "346b1aa0bce68b1c50e5b52f509fb5522e5c25e028bc8f863402edb7bcad8b1b" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04f4892b6d525c771e035f2a252708f3784e48238604b4f94dc56eaa1e546d941a346b1aa0bce68b1c50e5b52f509fb5522e5c25e028bc8f863402edb7bcad8b1b", + "wx": "00f4892b6d525c771e035f2a252708f3784e48238604b4f94dc56eaa1e546d941a", + "wy": "346b1aa0bce68b1c50e5b52f509fb5522e5c25e028bc8f863402edb7bcad8b1b" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004f4892b6d525c771e035f2a252708f3784e48238604b4f94dc56eaa1e546d941a346b1aa0bce68b1c50e5b52f509fb5522e5c25e028bc8f863402edb7bcad8b1b", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE9IkrbVJcdx4DXyolJwjzeE5II4YEtPlN\nxW6qHlRtlBo0axqgvOaLHFDltS9Qn7VSLlwl4Ci8j4Y0Au23vK2LGw==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004f4892b6d525c771e035f2a252708f3784e48238604b4f94dc56eaa1e546d941a346b1aa0bce68b1c50e5b52f509fb5522e5c25e028bc8f863402edb7bcad8b1b", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE9IkrbVJcdx4DXyolJwjzeE5II4YEtPlN\nxW6qHlRtlBo0axqgvOaLHFDltS9Qn7VSLlwl4Ci8j4Y0Au23vK2LGw==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 437, - "comment" : "extreme value for k", - "flags" : [ + "tcId": 437, + "comment": "extreme value for k", + "flags": [ "ArithmeticError" ], - "msg" : "313233343030", - "sig" : "3044022079be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f8179802200eb10e5ab95f2f275348d82ad2e4d7949c8193800d8c9c75df58e343f0ebba7b", - "result" : "valid" + "msg": "313233343030", + "sig": "3044022079be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f8179802200eb10e5ab95f2f275348d82ad2e4d7949c8193800d8c9c75df58e343f0ebba7b", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "0479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8", - "wx" : "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798", - "wy" : "483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "0479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8", + "wx": "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798", + "wy": "483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEeb5mfvncu6xVoGKVzocLBwKb/NstzijZ\nWfKBWxb4F5hIOtp3JqPEZV2k+/wOEQio/Re0SKaFVBmcR9CP+xDUuA==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEeb5mfvncu6xVoGKVzocLBwKb/NstzijZ\nWfKBWxb4F5hIOtp3JqPEZV2k+/wOEQio/Re0SKaFVBmcR9CP+xDUuA==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 438, - "comment" : "public key shares x-coordinate with generator", - "flags" : [ + "tcId": 438, + "comment": "public key shares x-coordinate with generator", + "flags": [ "PointDuplication" ], - "msg" : "313233343030", - "sig" : "3045022100bb5a52f42f9c9261ed4361f59422a1e30036e7c32b270c8807a419feca60502302202492492492492492492492492492492463cfd66a190a6008891e0d81d49a0952", - "result" : "invalid" + "msg": "313233343030", + "sig": "3045022100bb5a52f42f9c9261ed4361f59422a1e30036e7c32b270c8807a419feca60502302202492492492492492492492492492492463cfd66a190a6008891e0d81d49a0952", + "result": "invalid" }, { - "tcId" : 439, - "comment" : "public key shares x-coordinate with generator", - "flags" : [ + "tcId": 439, + "comment": "public key shares x-coordinate with generator", + "flags": [ "PointDuplication" ], - "msg" : "313233343030", - "sig" : "3044022044a5ad0bd0636d9e12bc9e0a6bdd5e1bba77f523842193b3b82e448e05d5f11e02202492492492492492492492492492492463cfd66a190a6008891e0d81d49a0952", - "result" : "invalid" + "msg": "313233343030", + "sig": "3044022044a5ad0bd0636d9e12bc9e0a6bdd5e1bba77f523842193b3b82e448e05d5f11e02202492492492492492492492492492492463cfd66a190a6008891e0d81d49a0952", + "result": "invalid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "0479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798b7c52588d95c3b9aa25b0403f1eef75702e84bb7597aabe663b82f6f04ef2777", - "wx" : "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798", - "wy" : "00b7c52588d95c3b9aa25b0403f1eef75702e84bb7597aabe663b82f6f04ef2777" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "0479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798b7c52588d95c3b9aa25b0403f1eef75702e84bb7597aabe663b82f6f04ef2777", + "wx": "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798", + "wy": "00b7c52588d95c3b9aa25b0403f1eef75702e84bb7597aabe663b82f6f04ef2777" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798b7c52588d95c3b9aa25b0403f1eef75702e84bb7597aabe663b82f6f04ef2777", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEeb5mfvncu6xVoGKVzocLBwKb/NstzijZ\nWfKBWxb4F5i3xSWI2Vw7mqJbBAPx7vdXAuhLt1l6q+ZjuC9vBO8ndw==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798b7c52588d95c3b9aa25b0403f1eef75702e84bb7597aabe663b82f6f04ef2777", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEeb5mfvncu6xVoGKVzocLBwKb/NstzijZ\nWfKBWxb4F5i3xSWI2Vw7mqJbBAPx7vdXAuhLt1l6q+ZjuC9vBO8ndw==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 440, - "comment" : "public key shares x-coordinate with generator", - "flags" : [ + "tcId": 440, + "comment": "public key shares x-coordinate with generator", + "flags": [ "PointDuplication" ], - "msg" : "313233343030", - "sig" : "3045022100bb5a52f42f9c9261ed4361f59422a1e30036e7c32b270c8807a419feca60502302202492492492492492492492492492492463cfd66a190a6008891e0d81d49a0952", - "result" : "invalid" + "msg": "313233343030", + "sig": "3045022100bb5a52f42f9c9261ed4361f59422a1e30036e7c32b270c8807a419feca60502302202492492492492492492492492492492463cfd66a190a6008891e0d81d49a0952", + "result": "invalid" }, { - "tcId" : 441, - "comment" : "public key shares x-coordinate with generator", - "flags" : [ + "tcId": 441, + "comment": "public key shares x-coordinate with generator", + "flags": [ "PointDuplication" ], - "msg" : "313233343030", - "sig" : "3044022044a5ad0bd0636d9e12bc9e0a6bdd5e1bba77f523842193b3b82e448e05d5f11e02202492492492492492492492492492492463cfd66a190a6008891e0d81d49a0952", - "result" : "invalid" + "msg": "313233343030", + "sig": "3044022044a5ad0bd0636d9e12bc9e0a6bdd5e1bba77f523842193b3b82e448e05d5f11e02202492492492492492492492492492492463cfd66a190a6008891e0d81d49a0952", + "result": "invalid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04782c8ed17e3b2a783b5464f33b09652a71c678e05ec51e84e2bcfc663a3de963af9acb4280b8c7f7c42f4ef9aba6245ec1ec1712fd38a0fa96418d8cd6aa6152", - "wx" : "782c8ed17e3b2a783b5464f33b09652a71c678e05ec51e84e2bcfc663a3de963", - "wy" : "00af9acb4280b8c7f7c42f4ef9aba6245ec1ec1712fd38a0fa96418d8cd6aa6152" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04782c8ed17e3b2a783b5464f33b09652a71c678e05ec51e84e2bcfc663a3de963af9acb4280b8c7f7c42f4ef9aba6245ec1ec1712fd38a0fa96418d8cd6aa6152", + "wx": "782c8ed17e3b2a783b5464f33b09652a71c678e05ec51e84e2bcfc663a3de963", + "wy": "00af9acb4280b8c7f7c42f4ef9aba6245ec1ec1712fd38a0fa96418d8cd6aa6152" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004782c8ed17e3b2a783b5464f33b09652a71c678e05ec51e84e2bcfc663a3de963af9acb4280b8c7f7c42f4ef9aba6245ec1ec1712fd38a0fa96418d8cd6aa6152", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEeCyO0X47Kng7VGTzOwllKnHGeOBexR6E\n4rz8Zjo96WOvmstCgLjH98QvTvmrpiRewewXEv04oPqWQY2M1qphUg==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004782c8ed17e3b2a783b5464f33b09652a71c678e05ec51e84e2bcfc663a3de963af9acb4280b8c7f7c42f4ef9aba6245ec1ec1712fd38a0fa96418d8cd6aa6152", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEeCyO0X47Kng7VGTzOwllKnHGeOBexR6E\n4rz8Zjo96WOvmstCgLjH98QvTvmrpiRewewXEv04oPqWQY2M1qphUg==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 442, - "comment" : "pseudorandom signature", - "flags" : [ + "tcId": 442, + "comment": "pseudorandom signature", + "flags": [ "ValidSignature" ], - "msg" : "", - "sig" : "3045022100f80ae4f96cdbc9d853f83d47aae225bf407d51c56b7776cd67d0dc195d99a9dc02204cfc1d941e08cb9aceadde0f4ccead76b30d332fc442115d50e673e28686b70b", - "result" : "valid" + "msg": "", + "sig": "3045022100f80ae4f96cdbc9d853f83d47aae225bf407d51c56b7776cd67d0dc195d99a9dc02204cfc1d941e08cb9aceadde0f4ccead76b30d332fc442115d50e673e28686b70b", + "result": "valid" }, { - "tcId" : 443, - "comment" : "pseudorandom signature", - "flags" : [ + "tcId": 443, + "comment": "pseudorandom signature", + "flags": [ "ValidSignature" ], - "msg" : "4d7367", - "sig" : "30440220109cd8ae0374358984a8249c0a843628f2835ffad1df1a9a69aa2fe72355545c02205390ff250ac4274e1cb25cd6ca6491f6b91281e32f5b264d87977aed4a94e77b", - "result" : "valid" + "msg": "4d7367", + "sig": "30440220109cd8ae0374358984a8249c0a843628f2835ffad1df1a9a69aa2fe72355545c02205390ff250ac4274e1cb25cd6ca6491f6b91281e32f5b264d87977aed4a94e77b", + "result": "valid" }, { - "tcId" : 444, - "comment" : "pseudorandom signature", - "flags" : [ + "tcId": 444, + "comment": "pseudorandom signature", + "flags": [ "ValidSignature" ], - "msg" : "313233343030", - "sig" : "3045022100d035ee1f17fdb0b2681b163e33c359932659990af77dca632012b30b27a057b302201939d9f3b2858bc13e3474cb50e6a82be44faa71940f876c1cba4c3e989202b6", - "result" : "valid" + "msg": "313233343030", + "sig": "3045022100d035ee1f17fdb0b2681b163e33c359932659990af77dca632012b30b27a057b302201939d9f3b2858bc13e3474cb50e6a82be44faa71940f876c1cba4c3e989202b6", + "result": "valid" }, { - "tcId" : 445, - "comment" : "pseudorandom signature", - "flags" : [ + "tcId": 445, + "comment": "pseudorandom signature", + "flags": [ "ValidSignature" ], - "msg" : "0000000000000000000000000000000000000000", - "sig" : "304402204f053f563ad34b74fd8c9934ce59e79c2eb8e6eca0fef5b323ca67d5ac7ed23802204d4b05daa0719e773d8617dce5631c5fd6f59c9bdc748e4b55c970040af01be5", - "result" : "valid" + "msg": "0000000000000000000000000000000000000000", + "sig": "304402204f053f563ad34b74fd8c9934ce59e79c2eb8e6eca0fef5b323ca67d5ac7ed23802204d4b05daa0719e773d8617dce5631c5fd6f59c9bdc748e4b55c970040af01be5", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "046e823555452914099182c6b2c1d6f0b5d28d50ccd005af2ce1bba541aa40caff00000001060492d5a5673e0f25d8d50fb7e58c49d86d46d4216955e0aa3d40e1", - "wx" : "6e823555452914099182c6b2c1d6f0b5d28d50ccd005af2ce1bba541aa40caff", - "wy" : "01060492d5a5673e0f25d8d50fb7e58c49d86d46d4216955e0aa3d40e1" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "046e823555452914099182c6b2c1d6f0b5d28d50ccd005af2ce1bba541aa40caff00000001060492d5a5673e0f25d8d50fb7e58c49d86d46d4216955e0aa3d40e1", + "wx": "6e823555452914099182c6b2c1d6f0b5d28d50ccd005af2ce1bba541aa40caff", + "wy": "01060492d5a5673e0f25d8d50fb7e58c49d86d46d4216955e0aa3d40e1" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200046e823555452914099182c6b2c1d6f0b5d28d50ccd005af2ce1bba541aa40caff00000001060492d5a5673e0f25d8d50fb7e58c49d86d46d4216955e0aa3d40e1", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEboI1VUUpFAmRgsaywdbwtdKNUMzQBa8s\n4bulQapAyv8AAAABBgSS1aVnPg8l2NUPt+WMSdhtRtQhaVXgqj1A4Q==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200046e823555452914099182c6b2c1d6f0b5d28d50ccd005af2ce1bba541aa40caff00000001060492d5a5673e0f25d8d50fb7e58c49d86d46d4216955e0aa3d40e1", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEboI1VUUpFAmRgsaywdbwtdKNUMzQBa8s\n4bulQapAyv8AAAABBgSS1aVnPg8l2NUPt+WMSdhtRtQhaVXgqj1A4Q==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 446, - "comment" : "y-coordinate of the public key is small", - "flags" : [ + "tcId": 446, + "comment": "y-coordinate of the public key is small", + "flags": [ "EdgeCasePublicKey" ], - "msg" : "4d657373616765", - "sig" : "304402206d6a4f556ccce154e7fb9f19e76c3deca13d59cc2aeb4ecad968aab2ded45965022053b9fa74803ede0fc4441bf683d56c564d3e274e09ccf47390badd1471c05fb7", - "result" : "valid" + "msg": "4d657373616765", + "sig": "304402206d6a4f556ccce154e7fb9f19e76c3deca13d59cc2aeb4ecad968aab2ded45965022053b9fa74803ede0fc4441bf683d56c564d3e274e09ccf47390badd1471c05fb7", + "result": "valid" }, { - "tcId" : 447, - "comment" : "y-coordinate of the public key is small", - "flags" : [ + "tcId": 447, + "comment": "y-coordinate of the public key is small", + "flags": [ "EdgeCasePublicKey" ], - "msg" : "4d657373616765", - "sig" : "3044022100aad503de9b9fd66b948e9acf596f0a0e65e700b28b26ec56e6e45e846489b3c4021f0ddc3a2f89abb817bb85c062ce02f823c63fc26b269e0bc9b84d81a5aa123d", - "result" : "valid" + "msg": "4d657373616765", + "sig": "3044022100aad503de9b9fd66b948e9acf596f0a0e65e700b28b26ec56e6e45e846489b3c4021f0ddc3a2f89abb817bb85c062ce02f823c63fc26b269e0bc9b84d81a5aa123d", + "result": "valid" }, { - "tcId" : 448, - "comment" : "y-coordinate of the public key is small", - "flags" : [ + "tcId": 448, + "comment": "y-coordinate of the public key is small", + "flags": [ "EdgeCasePublicKey" ], - "msg" : "4d657373616765", - "sig" : "30450221009182cebd3bb8ab572e167174397209ef4b1d439af3b200cdf003620089e43225022054477c982ea019d2e1000497fc25fcee1bccae55f2ac27530ae53b29c4b356a4", - "result" : "valid" + "msg": "4d657373616765", + "sig": "30450221009182cebd3bb8ab572e167174397209ef4b1d439af3b200cdf003620089e43225022054477c982ea019d2e1000497fc25fcee1bccae55f2ac27530ae53b29c4b356a4", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "046e823555452914099182c6b2c1d6f0b5d28d50ccd005af2ce1bba541aa40cafffffffffef9fb6d2a5a98c1f0da272af0481a73b62792b92bde96aa1e55c2bb4e", - "wx" : "6e823555452914099182c6b2c1d6f0b5d28d50ccd005af2ce1bba541aa40caff", - "wy" : "00fffffffef9fb6d2a5a98c1f0da272af0481a73b62792b92bde96aa1e55c2bb4e" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "046e823555452914099182c6b2c1d6f0b5d28d50ccd005af2ce1bba541aa40cafffffffffef9fb6d2a5a98c1f0da272af0481a73b62792b92bde96aa1e55c2bb4e", + "wx": "6e823555452914099182c6b2c1d6f0b5d28d50ccd005af2ce1bba541aa40caff", + "wy": "00fffffffef9fb6d2a5a98c1f0da272af0481a73b62792b92bde96aa1e55c2bb4e" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200046e823555452914099182c6b2c1d6f0b5d28d50ccd005af2ce1bba541aa40cafffffffffef9fb6d2a5a98c1f0da272af0481a73b62792b92bde96aa1e55c2bb4e", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEboI1VUUpFAmRgsaywdbwtdKNUMzQBa8s\n4bulQapAyv/////++fttKlqYwfDaJyrwSBpztieSuSvelqoeVcK7Tg==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200046e823555452914099182c6b2c1d6f0b5d28d50ccd005af2ce1bba541aa40cafffffffffef9fb6d2a5a98c1f0da272af0481a73b62792b92bde96aa1e55c2bb4e", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEboI1VUUpFAmRgsaywdbwtdKNUMzQBa8s\n4bulQapAyv/////++fttKlqYwfDaJyrwSBpztieSuSvelqoeVcK7Tg==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 449, - "comment" : "y-coordinate of the public key is large", - "flags" : [ + "tcId": 449, + "comment": "y-coordinate of the public key is large", + "flags": [ "EdgeCasePublicKey" ], - "msg" : "4d657373616765", - "sig" : "304402203854a3998aebdf2dbc28adac4181462ccac7873907ab7f212c42db0e69b56ed802203ed3f6b8a388d02f3e4df9f2ae9c1bd2c3916a686460dffcd42909cd7f82058e", - "result" : "valid" + "msg": "4d657373616765", + "sig": "304402203854a3998aebdf2dbc28adac4181462ccac7873907ab7f212c42db0e69b56ed802203ed3f6b8a388d02f3e4df9f2ae9c1bd2c3916a686460dffcd42909cd7f82058e", + "result": "valid" }, { - "tcId" : 450, - "comment" : "y-coordinate of the public key is large", - "flags" : [ + "tcId": 450, + "comment": "y-coordinate of the public key is large", + "flags": [ "EdgeCasePublicKey" ], - "msg" : "4d657373616765", - "sig" : "3045022100e94dbdc38795fe5c904d8f16d969d3b587f0a25d2de90b6d8c5c53ff887e360702207a947369c164972521bb8af406813b2d9f94d2aeaa53d4c215aaa0a2578a2c5d", - "result" : "valid" + "msg": "4d657373616765", + "sig": "3045022100e94dbdc38795fe5c904d8f16d969d3b587f0a25d2de90b6d8c5c53ff887e360702207a947369c164972521bb8af406813b2d9f94d2aeaa53d4c215aaa0a2578a2c5d", + "result": "valid" }, { - "tcId" : 451, - "comment" : "y-coordinate of the public key is large", - "flags" : [ + "tcId": 451, + "comment": "y-coordinate of the public key is large", + "flags": [ "EdgeCasePublicKey" ], - "msg" : "4d657373616765", - "sig" : "3044022049fc102a08ca47b60e0858cd0284d22cddd7233f94aaffbb2db1dd2cf08425e102205b16fca5a12cdb39701697ad8e39ffd6bdec0024298afaa2326aea09200b14d6", - "result" : "valid" + "msg": "4d657373616765", + "sig": "3044022049fc102a08ca47b60e0858cd0284d22cddd7233f94aaffbb2db1dd2cf08425e102205b16fca5a12cdb39701697ad8e39ffd6bdec0024298afaa2326aea09200b14d6", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04000000013fd22248d64d95f73c29b48ab48631850be503fd00f8468b5f0f70e0f6ee7aa43bc2c6fd25b1d8269241cbdd9dbb0dac96dc96231f430705f838717d", - "wx" : "013fd22248d64d95f73c29b48ab48631850be503fd00f8468b5f0f70e0", - "wy" : "00f6ee7aa43bc2c6fd25b1d8269241cbdd9dbb0dac96dc96231f430705f838717d" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04000000013fd22248d64d95f73c29b48ab48631850be503fd00f8468b5f0f70e0f6ee7aa43bc2c6fd25b1d8269241cbdd9dbb0dac96dc96231f430705f838717d", + "wx": "013fd22248d64d95f73c29b48ab48631850be503fd00f8468b5f0f70e0", + "wy": "00f6ee7aa43bc2c6fd25b1d8269241cbdd9dbb0dac96dc96231f430705f838717d" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004000000013fd22248d64d95f73c29b48ab48631850be503fd00f8468b5f0f70e0f6ee7aa43bc2c6fd25b1d8269241cbdd9dbb0dac96dc96231f430705f838717d", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEAAAAAT/SIkjWTZX3PCm0irSGMYUL5QP9\nAPhGi18PcOD27nqkO8LG/SWx2CaSQcvdnbsNrJbcliMfQwcF+DhxfQ==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004000000013fd22248d64d95f73c29b48ab48631850be503fd00f8468b5f0f70e0f6ee7aa43bc2c6fd25b1d8269241cbdd9dbb0dac96dc96231f430705f838717d", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEAAAAAT/SIkjWTZX3PCm0irSGMYUL5QP9\nAPhGi18PcOD27nqkO8LG/SWx2CaSQcvdnbsNrJbcliMfQwcF+DhxfQ==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 452, - "comment" : "x-coordinate of the public key is small", - "flags" : [ + "tcId": 452, + "comment": "x-coordinate of the public key is small", + "flags": [ "EdgeCasePublicKey" ], - "msg" : "4d657373616765", - "sig" : "3044022041efa7d3f05a0010675fcb918a45c693da4b348df21a59d6f9cd73e0d831d67a02204454ada693e5e26b7bd693236d340f80545c834577b6f73d378c7bcc534244da", - "result" : "valid" + "msg": "4d657373616765", + "sig": "3044022041efa7d3f05a0010675fcb918a45c693da4b348df21a59d6f9cd73e0d831d67a02204454ada693e5e26b7bd693236d340f80545c834577b6f73d378c7bcc534244da", + "result": "valid" }, { - "tcId" : 453, - "comment" : "x-coordinate of the public key is small", - "flags" : [ + "tcId": 453, + "comment": "x-coordinate of the public key is small", + "flags": [ "EdgeCasePublicKey" ], - "msg" : "4d657373616765", - "sig" : "3045022100b615698c358b35920dd883eca625a6c5f7563970cdfc378f8fe0cee17092144c022025f47b326b5be1fb610b885153ea84d41eb4716be66a994e8779989df1c863d4", - "result" : "valid" + "msg": "4d657373616765", + "sig": "3045022100b615698c358b35920dd883eca625a6c5f7563970cdfc378f8fe0cee17092144c022025f47b326b5be1fb610b885153ea84d41eb4716be66a994e8779989df1c863d4", + "result": "valid" }, { - "tcId" : 454, - "comment" : "x-coordinate of the public key is small", - "flags" : [ + "tcId": 454, + "comment": "x-coordinate of the public key is small", + "flags": [ "EdgeCasePublicKey" ], - "msg" : "4d657373616765", - "sig" : "304502210087cf8c0eb82d44f69c60a2ff5457d3aaa322e7ec61ae5aecfd678ae1c1932b0e02203add3b115815047d6eb340a3e008989eaa0f8708d1794814729094d08d2460d3", - "result" : "valid" + "msg": "4d657373616765", + "sig": "304502210087cf8c0eb82d44f69c60a2ff5457d3aaa322e7ec61ae5aecfd678ae1c1932b0e02203add3b115815047d6eb340a3e008989eaa0f8708d1794814729094d08d2460d3", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "0425afd689acabaed67c1f296de59406f8c550f57146a0b4ec2c97876dfffffffffa46a76e520322dfbc491ec4f0cc197420fc4ea5883d8f6dd53c354bc4f67c35", - "wx" : "25afd689acabaed67c1f296de59406f8c550f57146a0b4ec2c97876dffffffff", - "wy" : "00fa46a76e520322dfbc491ec4f0cc197420fc4ea5883d8f6dd53c354bc4f67c35" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "0425afd689acabaed67c1f296de59406f8c550f57146a0b4ec2c97876dfffffffffa46a76e520322dfbc491ec4f0cc197420fc4ea5883d8f6dd53c354bc4f67c35", + "wx": "25afd689acabaed67c1f296de59406f8c550f57146a0b4ec2c97876dffffffff", + "wy": "00fa46a76e520322dfbc491ec4f0cc197420fc4ea5883d8f6dd53c354bc4f67c35" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000425afd689acabaed67c1f296de59406f8c550f57146a0b4ec2c97876dfffffffffa46a76e520322dfbc491ec4f0cc197420fc4ea5883d8f6dd53c354bc4f67c35", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEJa/WiayrrtZ8Hylt5ZQG+MVQ9XFGoLTs\nLJeHbf/////6RqduUgMi37xJHsTwzBl0IPxOpYg9j23VPDVLxPZ8NQ==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000425afd689acabaed67c1f296de59406f8c550f57146a0b4ec2c97876dfffffffffa46a76e520322dfbc491ec4f0cc197420fc4ea5883d8f6dd53c354bc4f67c35", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEJa/WiayrrtZ8Hylt5ZQG+MVQ9XFGoLTs\nLJeHbf/////6RqduUgMi37xJHsTwzBl0IPxOpYg9j23VPDVLxPZ8NQ==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 455, - "comment" : "x-coordinate of the public key has many trailing 1's", - "flags" : [ + "tcId": 455, + "comment": "x-coordinate of the public key has many trailing 1's", + "flags": [ "EdgeCasePublicKey" ], - "msg" : "4d657373616765", - "sig" : "3044022062f48ef71ace27bf5a01834de1f7e3f948b9dce1ca1e911d5e13d3b104471d8202205ea8f33f0c778972c4582080deda9b341857dd64514f0849a05f6964c2e34022", - "result" : "valid" + "msg": "4d657373616765", + "sig": "3044022062f48ef71ace27bf5a01834de1f7e3f948b9dce1ca1e911d5e13d3b104471d8202205ea8f33f0c778972c4582080deda9b341857dd64514f0849a05f6964c2e34022", + "result": "valid" }, { - "tcId" : 456, - "comment" : "x-coordinate of the public key has many trailing 1's", - "flags" : [ + "tcId": 456, + "comment": "x-coordinate of the public key has many trailing 1's", + "flags": [ "EdgeCasePublicKey" ], - "msg" : "4d657373616765", - "sig" : "3045022100f6b0e2f6fe020cf7c0c20137434344ed7add6c4be51861e2d14cbda472a6ffb402206416c8dd3e5c5282b306e8dc8ff34ab64cc99549232d678d714402eb6ca7aa0f", - "result" : "valid" + "msg": "4d657373616765", + "sig": "3045022100f6b0e2f6fe020cf7c0c20137434344ed7add6c4be51861e2d14cbda472a6ffb402206416c8dd3e5c5282b306e8dc8ff34ab64cc99549232d678d714402eb6ca7aa0f", + "result": "valid" }, { - "tcId" : 457, - "comment" : "x-coordinate of the public key has many trailing 1's", - "flags" : [ + "tcId": 457, + "comment": "x-coordinate of the public key has many trailing 1's", + "flags": [ "EdgeCasePublicKey" ], - "msg" : "4d657373616765", - "sig" : "3045022100db09d8460f05eff23bc7e436b67da563fa4b4edb58ac24ce201fa8a358125057022046da116754602940c8999c8d665f786c50f5772c0a3cdbda075e77eabc64df16", - "result" : "valid" + "msg": "4d657373616765", + "sig": "3045022100db09d8460f05eff23bc7e436b67da563fa4b4edb58ac24ce201fa8a358125057022046da116754602940c8999c8d665f786c50f5772c0a3cdbda075e77eabc64df16", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "04d12e6c66b67734c3c84d2601cf5d35dc097e27637f0aca4a4fdb74b6aadd3bb93f5bdff88bd5736df898e699006ed750f11cf07c5866cd7ad70c7121ffffffff", - "wx" : "00d12e6c66b67734c3c84d2601cf5d35dc097e27637f0aca4a4fdb74b6aadd3bb9", - "wy" : "3f5bdff88bd5736df898e699006ed750f11cf07c5866cd7ad70c7121ffffffff" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "04d12e6c66b67734c3c84d2601cf5d35dc097e27637f0aca4a4fdb74b6aadd3bb93f5bdff88bd5736df898e699006ed750f11cf07c5866cd7ad70c7121ffffffff", + "wx": "00d12e6c66b67734c3c84d2601cf5d35dc097e27637f0aca4a4fdb74b6aadd3bb9", + "wy": "3f5bdff88bd5736df898e699006ed750f11cf07c5866cd7ad70c7121ffffffff" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004d12e6c66b67734c3c84d2601cf5d35dc097e27637f0aca4a4fdb74b6aadd3bb93f5bdff88bd5736df898e699006ed750f11cf07c5866cd7ad70c7121ffffffff", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE0S5sZrZ3NMPITSYBz1013Al+J2N/CspK\nT9t0tqrdO7k/W9/4i9VzbfiY5pkAbtdQ8RzwfFhmzXrXDHEh/////w==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004d12e6c66b67734c3c84d2601cf5d35dc097e27637f0aca4a4fdb74b6aadd3bb93f5bdff88bd5736df898e699006ed750f11cf07c5866cd7ad70c7121ffffffff", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE0S5sZrZ3NMPITSYBz1013Al+J2N/CspK\nT9t0tqrdO7k/W9/4i9VzbfiY5pkAbtdQ8RzwfFhmzXrXDHEh/////w==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 458, - "comment" : "y-coordinate of the public key has many trailing 1's", - "flags" : [ + "tcId": 458, + "comment": "y-coordinate of the public key has many trailing 1's", + "flags": [ "EdgeCasePublicKey" ], - "msg" : "4d657373616765", - "sig" : "30440220592c41e16517f12fcabd98267674f974b588e9f35d35406c1a7bb2ed1d19b7b802203e65a06bd9f83caaeb7b00f2368d7e0dece6b12221269a9b5b765198f840a3a1", - "result" : "valid" + "msg": "4d657373616765", + "sig": "30440220592c41e16517f12fcabd98267674f974b588e9f35d35406c1a7bb2ed1d19b7b802203e65a06bd9f83caaeb7b00f2368d7e0dece6b12221269a9b5b765198f840a3a1", + "result": "valid" }, { - "tcId" : 459, - "comment" : "y-coordinate of the public key has many trailing 1's", - "flags" : [ + "tcId": 459, + "comment": "y-coordinate of the public key has many trailing 1's", + "flags": [ "EdgeCasePublicKey" ], - "msg" : "4d657373616765", - "sig" : "3045022100be0d70887d5e40821a61b68047de4ea03debfdf51cdf4d4b195558b959a032b202207d994b2d8f1dbbeb13534eb3f6e5dccd85f5c4133c27d9e64271b1826ce1f67d", - "result" : "valid" + "msg": "4d657373616765", + "sig": "3045022100be0d70887d5e40821a61b68047de4ea03debfdf51cdf4d4b195558b959a032b202207d994b2d8f1dbbeb13534eb3f6e5dccd85f5c4133c27d9e64271b1826ce1f67d", + "result": "valid" }, { - "tcId" : 460, - "comment" : "y-coordinate of the public key has many trailing 1's", - "flags" : [ + "tcId": 460, + "comment": "y-coordinate of the public key has many trailing 1's", + "flags": [ "EdgeCasePublicKey" ], - "msg" : "4d657373616765", - "sig" : "3045022100fae92dfcb2ee392d270af3a5739faa26d4f97bfd39ed3cbee4d29e26af3b206a02206c9ba37f9faa6a1fd3f65f23b4e853d4692a7274240a12db7ba3884830630d16", - "result" : "valid" + "msg": "4d657373616765", + "sig": "3045022100fae92dfcb2ee392d270af3a5739faa26d4f97bfd39ed3cbee4d29e26af3b206a02206c9ba37f9faa6a1fd3f65f23b4e853d4692a7274240a12db7ba3884830630d16", + "result": "valid" } ] }, { - "type" : "EcdsaBitcoinVerify", - "publicKey" : { - "type" : "EcPublicKey", - "curve" : "secp256k1", - "keySize" : 256, - "uncompressed" : "046d4a7f60d4774a4f0aa8bbdedb953c7eea7909407e3164755664bc2800000000e659d34e4df38d9e8c9eaadfba36612c769195be86c77aac3f36e78b538680fb", - "wx" : "6d4a7f60d4774a4f0aa8bbdedb953c7eea7909407e3164755664bc2800000000", - "wy" : "00e659d34e4df38d9e8c9eaadfba36612c769195be86c77aac3f36e78b538680fb" + "type": "EcdsaBitcoinVerify", + "publicKey": { + "type": "EcPublicKey", + "curve": "secp256k1", + "keySize": 256, + "uncompressed": "046d4a7f60d4774a4f0aa8bbdedb953c7eea7909407e3164755664bc2800000000e659d34e4df38d9e8c9eaadfba36612c769195be86c77aac3f36e78b538680fb", + "wx": "6d4a7f60d4774a4f0aa8bbdedb953c7eea7909407e3164755664bc2800000000", + "wy": "00e659d34e4df38d9e8c9eaadfba36612c769195be86c77aac3f36e78b538680fb" }, - "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200046d4a7f60d4774a4f0aa8bbdedb953c7eea7909407e3164755664bc2800000000e659d34e4df38d9e8c9eaadfba36612c769195be86c77aac3f36e78b538680fb", - "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEbUp/YNR3Sk8KqLve25U8fup5CUB+MWR1\nVmS8KAAAAADmWdNOTfONnoyeqt+6NmEsdpGVvobHeqw/NueLU4aA+w==\n-----END PUBLIC KEY-----\n", - "sha" : "SHA-256", - "tests" : [ + "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200046d4a7f60d4774a4f0aa8bbdedb953c7eea7909407e3164755664bc2800000000e659d34e4df38d9e8c9eaadfba36612c769195be86c77aac3f36e78b538680fb", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEbUp/YNR3Sk8KqLve25U8fup5CUB+MWR1\nVmS8KAAAAADmWdNOTfONnoyeqt+6NmEsdpGVvobHeqw/NueLU4aA+w==\n-----END PUBLIC KEY-----\n", + "sha": "SHA-256", + "tests": [ { - "tcId" : 461, - "comment" : "x-coordinate of the public key has many trailing 0's", - "flags" : [ + "tcId": 461, + "comment": "x-coordinate of the public key has many trailing 0's", + "flags": [ "EdgeCasePublicKey" ], - "msg" : "4d657373616765", - "sig" : "30440220176a2557566ffa518b11226694eb9802ed2098bfe278e5570fe1d5d7af18a94302201291df6a0ed5fc0d15098e70bcf13a009284dfd0689d3bb4be6ceeb9be1487c4", - "result" : "valid" + "msg": "4d657373616765", + "sig": "30440220176a2557566ffa518b11226694eb9802ed2098bfe278e5570fe1d5d7af18a94302201291df6a0ed5fc0d15098e70bcf13a009284dfd0689d3bb4be6ceeb9be1487c4", + "result": "valid" }, { - "tcId" : 462, - "comment" : "x-coordinate of the public key has many trailing 0's", - "flags" : [ + "tcId": 462, + "comment": "x-coordinate of the public key has many trailing 0's", + "flags": [ "EdgeCasePublicKey" ], - "msg" : "4d657373616765", - "sig" : "3044022060be20c3dbc162dd34d26780621c104bbe5dace630171b2daef0d826409ee5c20220427f7e4d889d549170bda6a9409fb1cb8b0e763d13eea7bd97f64cf41dc6e497", - "result" : "valid" + "msg": "4d657373616765", + "sig": "3044022060be20c3dbc162dd34d26780621c104bbe5dace630171b2daef0d826409ee5c20220427f7e4d889d549170bda6a9409fb1cb8b0e763d13eea7bd97f64cf41dc6e497", + "result": "valid" }, { - "tcId" : 463, - "comment" : "x-coordinate of the public key has many trailing 0's", - "flags" : [ + "tcId": 463, + "comment": "x-coordinate of the public key has many trailing 0's", + "flags": [ "EdgeCasePublicKey" ], - "msg" : "4d657373616765", - "sig" : "3045022100edf03cf63f658883289a1a593d1007895b9f236d27c9c1f1313089aaed6b16ae02201a4dd6fc0814dc523d1fefa81c64fbf5e618e651e7096fccadbb94cd48e5e0cd", - "result" : "valid" + "msg": "4d657373616765", + "sig": "3045022100edf03cf63f658883289a1a593d1007895b9f236d27c9c1f1313089aaed6b16ae02201a4dd6fc0814dc523d1fefa81c64fbf5e618e651e7096fccadbb94cd48e5e0cd", + "result": "valid" } ] } diff --git a/include/xrpl/basics/README.md b/include/xrpl/basics/README.md index 544bc8aece..290bb7ad0c 100644 --- a/include/xrpl/basics/README.md +++ b/include/xrpl/basics/README.md @@ -4,37 +4,34 @@ Utility functions and classes. ripple/basic should contain no dependencies on other modules. +# Choosing a rippled container. -Choosing a rippled container. -============================= +- `std::vector` + - For ordered containers with most insertions or erases at the end. -* `std::vector` - * For ordered containers with most insertions or erases at the end. +- `std::deque` + - For ordered containers with most insertions or erases at the start or end. -* `std::deque` - * For ordered containers with most insertions or erases at the start or end. - -* `std::list` - * For ordered containers with inserts and erases to the middle. - * For containers with iterators stable over insert and erase. - * Generally slower and bigger than `std::vector` or `std::deque` except for +- `std::list` + - For ordered containers with inserts and erases to the middle. + - For containers with iterators stable over insert and erase. + - Generally slower and bigger than `std::vector` or `std::deque` except for those cases. -* `std::set` - * For sorted containers. +- `std::set` + - For sorted containers. -* `ripple::hash_set` - * Where inserts and contains need to be O(1). - * For "small" sets, `std::set` might be faster and smaller. +- `ripple::hash_set` + - Where inserts and contains need to be O(1). + - For "small" sets, `std::set` might be faster and smaller. -* `ripple::hardened_hash_set` - * For data sets where the key could be manipulated by an attacker - in an attempt to mount an algorithmic complexity attack: see +- `ripple::hardened_hash_set` + - For data sets where the key could be manipulated by an attacker + in an attempt to mount an algorithmic complexity attack: see http://en.wikipedia.org/wiki/Algorithmic_complexity_attack - The following container is deprecated -* `std::unordered_set` - * Use `ripple::hash_set` instead, which uses a better hashing algorithm. - * Or use `ripple::hardened_hash_set` to prevent algorithmic complexity attacks. +- `std::unordered_set` +- Use `ripple::hash_set` instead, which uses a better hashing algorithm. +- Or use `ripple::hardened_hash_set` to prevent algorithmic complexity attacks. diff --git a/include/xrpl/proto/org/xrpl/rpc/v1/README.md b/include/xrpl/proto/org/xrpl/rpc/v1/README.md index 9268439847..e9b9b55841 100644 --- a/include/xrpl/proto/org/xrpl/rpc/v1/README.md +++ b/include/xrpl/proto/org/xrpl/rpc/v1/README.md @@ -79,4 +79,3 @@ mirror the JSON test as much as possible. Refer to the Protocol Buffers [language guide](https://developers.google.com/protocol-buffers/docs/proto3) for more detailed information about Protocol Buffers. - diff --git a/include/xrpl/protocol/README.md b/include/xrpl/protocol/README.md index 9864b700ef..a6e8d24982 100644 --- a/include/xrpl/protocol/README.md +++ b/include/xrpl/protocol/README.md @@ -23,9 +23,9 @@ optional fields easier to read: if it exists, or nothing if it doesn't." This usage of the tilde/bitwise NOT operator is not standard outside of the `rippled` codebase. - - As a consequence of this, `x[~sfFoo] = y[~sfFoo]` - assigns the value of Foo from y to x, including omitting - Foo from x if it doesn't exist in y. + - As a consequence of this, `x[~sfFoo] = y[~sfFoo]` + assigns the value of Foo from y to x, including omitting + Foo from x if it doesn't exist in y. Typically, for things that are guaranteed to exist, you use `x[sfFoo]` and avoid having to deal with a container that may diff --git a/include/xrpl/resource/README.md b/include/xrpl/resource/README.md index 8fed985bfd..253e3c7625 100644 --- a/include/xrpl/resource/README.md +++ b/include/xrpl/resource/README.md @@ -1,4 +1,4 @@ -# Resource::Manager # +# Resource::Manager The ResourceManager module has these responsibilities: @@ -7,7 +7,7 @@ The ResourceManager module has these responsibilities: - Provide an interface to share load information in a cluster. - Warn and/or disconnect endpoints for imposing load. -## Description ## +## Description To prevent monopolization of server resources or attacks on servers, resource consumption is monitored at each endpoint. When consumption @@ -33,44 +33,44 @@ Although RPC connections consume resources, they are transient and cannot be rate limited. It is advised not to expose RPC interfaces to the general public. -## Consumer Types ## +## Consumer Types Consumers are placed into three classifications (as identified by the Resource::Kind enumeration): - - InBound, - - OutBound, and - - Admin +- InBound, +- OutBound, and +- Admin - Each caller determines for itself the classification of the Consumer it is - creating. +Each caller determines for itself the classification of the Consumer it is +creating. -## Resource Loading ## +## Resource Loading It is expected that a client will impose a higher load on the server when it first connects: the client may need to catch up on transactions -it has missed, or get trust lines, or transfer fees. The Manager must +it has missed, or get trust lines, or transfer fees. The Manager must expect this initial peak load, but not allow that high load to continue because over the long term that would unduly stress the server. If a client places a sustained high load on the server, that client -is initially given a warning message. If that high load continues +is initially given a warning message. If that high load continues the Manager may tell the heavily loaded server to drop the connection entirely and not allow re-connection for some amount of time. Each load is monitored by capturing peaks and then decaying those peak values over time: this is implemented by the DecayingSample class. -## Gossip ## +## Gossip Each server in a cluster creates a list of IP addresses of end points -that are imposing a significant load. This list is called Gossip, which -is passed to other nodes in that cluster. Gossip helps individual +that are imposing a significant load. This list is called Gossip, which +is passed to other nodes in that cluster. Gossip helps individual servers in the cluster identify IP addreses that might be unduly loading -the entire cluster. Again the recourse of the individual servers is to +the entire cluster. Again the recourse of the individual servers is to drop connections to those IP addresses that occur commonly in the gossip. -## Access ## +## Access In rippled, the Application holds a unique instance of Resource::Manager, which may be retrieved by calling the method diff --git a/src/test/README.md b/src/test/README.md index 7d342f24bf..b012607f58 100644 --- a/src/test/README.md +++ b/src/test/README.md @@ -1,4 +1,3 @@ - # Unit Tests ## Running Tests @@ -12,13 +11,13 @@ just `NoRippleCheckLimits`). More than one suite or group of suites can be specified as a comma separated list via the argument. For example, `--unittest=beast,OversizeMeta` will run all suites in the `beast` library (root identifier) as well as the test suite -named `OversizeMeta`). All name matches are case sensitive. +named `OversizeMeta`). All name matches are case sensitive. Tests can be executed in parallel using several child processes by specifying the `--unittest-jobs=N` parameter. The default behavior is to execute serially using a single process. -The order that suites are executed is determined by the suite priority that +The order that suites are executed is determined by the suite priority that is optionally specified when the suite is declared in the code with one of the `BEAST_DEFINE_TESTSUITE` macros. By default, suites have a priority of 0, and other suites can choose to declare an integer priority value to make themselves diff --git a/src/test/csf/README.md b/src/test/csf/README.md index a4b69abab5..30d5abb042 100644 --- a/src/test/csf/README.md +++ b/src/test/csf/README.md @@ -26,7 +26,7 @@ collect when running the simulation. The specification includes: - A collection of [`Peer`s](./Peer.h) that represent the participants in the network, with each independently running the consensus algorithm. - The `Peer` trust relationships as a `TrustGraph`. This is a directed graph - whose edges define what other `Peer`s a given `Peer` trusts. In other words, + whose edges define what other `Peer`s a given `Peer` trusts. In other words, the set of out edges for a `Peer` in the graph correspond to the UNL of that `Peer`. - The network communication layer as a `BasicNetwork`. This models the overlay @@ -45,6 +45,7 @@ eventually fully validating the consensus history of accepted transactions. Each the registered `Collector`s. ## Example Simulation + Below is a basic simulation we can walk through to get an understanding of the framework. This simulation is for a set of 5 validators that aren't directly connected but rely on a single hub node for communication. @@ -98,12 +99,12 @@ center[0]->runAsValidator = false; The simulation code starts by creating a single instance of the [`Sim` class](./Sim.h). This class is used to manage the overall simulation and internally owns most other components, including the `Peer`s, `Scheduler`, -`BasicNetwork` and `TrustGraph`. The next two lines create two differ +`BasicNetwork` and `TrustGraph`. The next two lines create two differ `PeerGroup`s of size 5 and 1 . A [`PeerGroup`](./PeerGroup.h) is a convenient way for configuring a set of related peers together and internally has a vector of pointers to the `Peer`s which are owned by the `Sim`. `PeerGroup`s can be combined using `+/-` operators to configure more complex relationships of nodes -as shown by `PeerGroup network`. Note that each call to `createGroup` adds that +as shown by `PeerGroup network`. Note that each call to `createGroup` adds that many new `Peer`s to the simulation, but does not specify any trust or network relationships for the new `Peer`s. @@ -125,14 +126,14 @@ validators.connect(center, delay); Although the `sim` object has accessible instances of [TrustGraph](./TrustGraph.h) and [BasicNetwork](./BasicNetwork.h), it is more -convenient to manage the graphs via the `PeerGroup`s. The first two lines -create a trust topology in which all `Peer`s trust the 5 validating `Peer`s. Or +convenient to manage the graphs via the `PeerGroup`s. The first two lines +create a trust topology in which all `Peer`s trust the 5 validating `Peer`s. Or in the UNL perspective, all `Peer`s are configured with the same UNL listing the 5 validating `Peer`s. The two lines could've been rewritten as `network.trust(validators)`. The next lines create the network communication topology. Each of the validating -`Peer`s connects to the central hub `Peer` with a fixed delay of 200ms. Note +`Peer`s connects to the central hub `Peer` with a fixed delay of 200ms. Note that the network connections are really undirected, but are represented internally in a directed graph using edge pairs of inbound and outbound connections. @@ -143,11 +144,11 @@ SimDurationCollector simDur; sim.collectors.add(simDur); ``` -The next lines add a single collector to the simulation. The +The next lines add a single collector to the simulation. The `SimDurationCollector` is a simple example collector which tracks the total -duration of the simulation. More generally, a collector is any class that +duration of the simulation. More generally, a collector is any class that implements `void on(NodeID, SimTime, Event)` for all [Events](./events.h) -emitted by a Peer. Events are arbitrary types used to indicate some action or +emitted by a Peer. Events are arbitrary types used to indicate some action or change of state of a `Peer`. Other [existing collectors](./collectors.h) measure latencies of transaction submission to validation or the rate of ledger closing and monitor any jumps in ledger history. @@ -176,9 +177,9 @@ to send transactions in at fixed or random intervals to fixed or random `Peer`s. ## Run -The example has two calls to `sim.run(1)`. This call runs the simulation until -each `Peer` has closed one additional ledger. After closing the additional -ledger, the `Peer` stops participating in consensus. The first call is used to +The example has two calls to `sim.run(1)`. This call runs the simulation until +each `Peer` has closed one additional ledger. After closing the additional +ledger, the `Peer` stops participating in consensus. The first call is used to ensure a more useful prior state of all `Peer`s. After the transaction submission, the second call to `run` results in one additional ledger that accepts those transactions. @@ -188,4 +189,4 @@ Alternatively, you can specify a duration to run the simulation, e.g. scheduler has elapsed 10 additional seconds. The `sim.scheduler.in` or `sim.scheduler.at` methods can schedule arbitrary code to execute at a later time in the simulation, for example removing a network connection or modifying -the trust graph. \ No newline at end of file +the trust graph. diff --git a/src/tests/README.md b/src/tests/README.md index 8065316580..7c4cc5edf8 100644 --- a/src/tests/README.md +++ b/src/tests/README.md @@ -1,4 +1,5 @@ # Unit tests + This directory contains unit tests for the project. The difference from existing `src/test` folder is that we switch to 3rd party testing framework (doctest). We intend to gradually move existing tests from our own framework to doctest and such tests will be moved to this new folder. diff --git a/src/xrpld/app/consensus/README.md b/src/xrpld/app/consensus/README.md index 0050273138..bdf5afe87c 100644 --- a/src/xrpld/app/consensus/README.md +++ b/src/xrpld/app/consensus/README.md @@ -1,13 +1,12 @@ -# RCL Consensus +# RCL Consensus This directory holds the types and classes needed to connect the generic consensus algorithm to the rippled-specific instance of consensus. - * `RCLCxTx` adapts a `SHAMapItem` transaction. - * `RCLCxTxSet` adapts a `SHAMap` to represent a set of transactions. - * `RCLCxLedger` adapts a `Ledger`. - * `RCLConsensus` is implements the requirements of the generic - `Consensus` class by connecting to the rest of the `rippled` - application. - +- `RCLCxTx` adapts a `SHAMapItem` transaction. +- `RCLCxTxSet` adapts a `SHAMap` to represent a set of transactions. +- `RCLCxLedger` adapts a `Ledger`. +- `RCLConsensus` is implements the requirements of the generic + `Consensus` class by connecting to the rest of the `rippled` + application. diff --git a/src/xrpld/app/ledger/README.md b/src/xrpld/app/ledger/README.md index cf7844856b..d2afe01e71 100644 --- a/src/xrpld/app/ledger/README.md +++ b/src/xrpld/app/ledger/README.md @@ -1,9 +1,8 @@ +# Ledger Process -# Ledger Process # +## Introduction -## Introduction ## - -## Life Cycle ## +## Life Cycle Every server always has an open ledger. All received new transactions are applied to the open ledger. The open ledger can't close until we reach @@ -37,10 +36,11 @@ round. This is a "rebase": now that we know the real history, the current open ledger is rebased against the last closed ledger. The purpose of the open ledger is as follows: + - Forms the basis of the initial proposal during consensus - Used to decide if we can reject the transaction without relaying it -## Byzantine Failures ## +## Byzantine Failures Byzantine failures are resolved as follows. If there is a supermajority ledger, then a minority of validators will discover that the consensus round is @@ -52,167 +52,169 @@ If there is no majority ledger, then starting on the next consensus round there will not be a consensus on the last closed ledger. Another avalanche process is started. -## Validators ## +## Validators The only meaningful difference between a validator and a 'regular' server is that the validator sends its proposals and validations to the network. --- -# The Ledger Stream # +# The Ledger Stream -## Ledger Priorities ## +## Ledger Priorities There are two ledgers that are the most important for a rippled server to have: - - The consensus ledger and - - The last validated ledger. +- The consensus ledger and +- The last validated ledger. If we need either of those two ledgers they are fetched with the highest -priority. Also, when they arrive, they replace their earlier counterparts +priority. Also, when they arrive, they replace their earlier counterparts (if they exist). The `LedgerMaster` object tracks - - the last published ledger, - - the last validated ledger, and - - ledger history. -So the `LedgerMaster` is at the center of fetching historical ledger data. + +- the last published ledger, +- the last validated ledger, and +- ledger history. + So the `LedgerMaster` is at the center of fetching historical ledger data. In specific, the `LedgerMaster::doAdvance()` method triggers the code that fetches historical data and controls the state machine for ledger acquisition. The server tries to publish an on-going stream of consecutive ledgers to its -clients. After the server has started and caught up with network +clients. After the server has started and caught up with network activity, say when ledger 500 is being settled, then the server puts its best effort into publishing validated ledger 500 followed by validated ledger 501 -and then 502. This effort continues until the server is shut down. +and then 502. This effort continues until the server is shut down. But loading or network connectivity may sometimes interfere with that ledger -stream. So suppose the server publishes validated ledger 600 and then -receives validated ledger 603. Then the server wants to back fill its ledger +stream. So suppose the server publishes validated ledger 600 and then +receives validated ledger 603. Then the server wants to back fill its ledger history with ledgers 601 and 602. -The server prioritizes keeping up with current ledgers. But if it is caught +The server prioritizes keeping up with current ledgers. But if it is caught up on the current ledger, and there are no higher priority demands on the -server, then it will attempt to back fill its historical ledgers. It fills +server, then it will attempt to back fill its historical ledgers. It fills in the historical ledger data first by attempting to retrieve it from the -local database. If the local database does not have all of the necessary data +local database. If the local database does not have all of the necessary data then the server requests the remaining information from network peers. -Suppose the server is missing multiple historical ledgers. Take the previous -example where we have ledgers 603 and 600, but we're missing 601 and 602. In +Suppose the server is missing multiple historical ledgers. Take the previous +example where we have ledgers 603 and 600, but we're missing 601 and 602. In that case the server requests information for ledger 602 first, before -back-filling ledger 601. We want to expand the contiguous range of -most-recent ledgers that the server has locally. There's also a limit to -how much historical ledger data is useful. So if we're on ledger 603, but +back-filling ledger 601. We want to expand the contiguous range of +most-recent ledgers that the server has locally. There's also a limit to +how much historical ledger data is useful. So if we're on ledger 603, but we're missing ledger 4 we may not bother asking for ledger 4. -## Assembling a Ledger ## +## Assembling a Ledger When data for a ledger arrives from a peer, it may take a while before the -server can apply that data. So when ledger data arrives we schedule a job -thread to apply that data. If more data arrives before the job starts we add -that data to the job. We defer requesting more ledger data until all of the -data we have for that ledger has been processed. Once all of that data is +server can apply that data. So when ledger data arrives we schedule a job +thread to apply that data. If more data arrives before the job starts we add +that data to the job. We defer requesting more ledger data until all of the +data we have for that ledger has been processed. Once all of that data is processed we can intelligently request only the additional data that we need -to fill in the ledger. This reduces network traffic and minimizes the load +to fill in the ledger. This reduces network traffic and minimizes the load on peers supplying the data. If we receive data for a ledger that is not currently under construction, -we don't just throw the data away. In particular the AccountStateNodes -may be useful, since they can be re-used across ledgers. This data is +we don't just throw the data away. In particular the AccountStateNodes +may be useful, since they can be re-used across ledgers. This data is stashed in memory (not the database) where the acquire process can find it. Peers deliver ledger data in the order in which the data can be validated. Data arrives in the following order: - 1. The hash of the ledger header - 2. The ledger header - 3. The root nodes of the transaction tree and state tree - 4. The lower (non-root) nodes of the state tree - 5. The lower (non-root) nodes of the transaction tree +1. The hash of the ledger header +2. The ledger header +3. The root nodes of the transaction tree and state tree +4. The lower (non-root) nodes of the state tree +5. The lower (non-root) nodes of the transaction tree -Inner-most nodes are supplied before outer nodes. This allows the +Inner-most nodes are supplied before outer nodes. This allows the requesting server to hook things up (and validate) in the order in which data arrives. If this process fails, then a server can also ask for ledger data by hash, -rather than by asking for specific nodes in a ledger. Asking for information +rather than by asking for specific nodes in a ledger. Asking for information by hash is less efficient, but it allows a peer to return the information -even if the information is not assembled into a tree. All the peer needs is +even if the information is not assembled into a tree. All the peer needs is the raw data. -## Which Peer To Ask ## +## Which Peer To Ask Peers go though state transitions as the network goes through its state -transitions. Peer's provide their state to their directly connected peers. +transitions. Peer's provide their state to their directly connected peers. By monitoring the state of each connected peer a server can tell which of its peers has the information that it needs. Therefore if a server suffers a byzantine failure the server can tell which -of its peers did not suffer that same failure. So the server knows which +of its peers did not suffer that same failure. So the server knows which peer(s) to ask for the missing information. -Peers also report their contiguous range of ledgers. This is another way that +Peers also report their contiguous range of ledgers. This is another way that a server can determine which peer to ask for a particular ledger or piece of a ledger. -There are also indirect peer queries. If there have been timeouts while -acquiring ledger data then a server may issue indirect queries. In that +There are also indirect peer queries. If there have been timeouts while +acquiring ledger data then a server may issue indirect queries. In that case the server receiving the indirect query passes the query along to any -of its peers that may have the requested data. This is important if the -network has a byzantine failure. It also helps protect the validation -network. A validator may need to get a peer set from one of the other +of its peers that may have the requested data. This is important if the +network has a byzantine failure. It also helps protect the validation +network. A validator may need to get a peer set from one of the other validators, and indirect queries improve the likelihood of success with that. -## Kinds of Fetch Packs ## +## Kinds of Fetch Packs A FetchPack is the way that peers send partial ledger data to other peers so the receiving peer can reconstruct a ledger. -A 'normal' FetchPack is a bucket of nodes indexed by hash. The server +A 'normal' FetchPack is a bucket of nodes indexed by hash. The server building the FetchPack puts information into the FetchPack that the -destination server is likely to need. Normally they contain all of the +destination server is likely to need. Normally they contain all of the missing nodes needed to fill in a ledger. A 'compact' FetchPack, on the other hand, contains only leaf nodes, no -inner nodes. Because there are no inner nodes, the ledger information that -it contains cannot be validated as the ledger is assembled. We have to, +inner nodes. Because there are no inner nodes, the ledger information that +it contains cannot be validated as the ledger is assembled. We have to, initially, take the accuracy of the FetchPack for granted and assemble the -ledger. Once the entire ledger is assembled the entire ledger can be -validated. But if the ledger does not validate then there's nothing to be +ledger. Once the entire ledger is assembled the entire ledger can be +validated. But if the ledger does not validate then there's nothing to be done but throw the entire FetchPack away; there's no way to save a portion of the FetchPack. -The FetchPacks just described could be termed 'reverse FetchPacks.' They -only provide historical data. There may be a use for what could be called a -'forward FetchPack.' A forward FetchPack would contain the information that +The FetchPacks just described could be termed 'reverse FetchPacks.' They +only provide historical data. There may be a use for what could be called a +'forward FetchPack.' A forward FetchPack would contain the information that is needed to build a new ledger out of the preceding ledger. A forward compact FetchPack would need to contain: - - The header for the new ledger, - - The leaf nodes of the transaction tree (if there is one), - - The index of deleted nodes in the state tree, - - The index and data for new nodes in the state tree, and - - The index and new data of modified nodes in the state tree. + +- The header for the new ledger, +- The leaf nodes of the transaction tree (if there is one), +- The index of deleted nodes in the state tree, +- The index and data for new nodes in the state tree, and +- The index and new data of modified nodes in the state tree. --- -# Definitions # +# Definitions -## Open Ledger ## +## Open Ledger The open ledger is the ledger that the server applies all new incoming transactions to. -## Last Validated Ledger ## +## Last Validated Ledger The most recent ledger that the server is certain will always remain part of the permanent, public history. -## Last Closed Ledger ## +## Last Closed Ledger The most recent ledger that the server believes the network reached consensus on. Different servers can arrive at a different conclusion about the last @@ -220,29 +222,29 @@ closed ledger. This is a consequence of Byzantanine failure. The purpose of validations is to resolve the differences between servers and come to a common conclusion about which last closed ledger is authoritative. -## Consensus ## +## Consensus A distributed agreement protocol. Ripple uses the consensus process to solve the problem of double-spending. -## Validation ## +## Validation A signed statement indicating that it built a particular ledger as a result of the consensus process. -## Proposal ## +## Proposal A signed statement of which transactions it believes should be included in the next consensus ledger. -## Ledger Header ## +## Ledger Header The "ledger header" is the chunk of data that hashes to the ledger's hash. It contains the sequence number, parent hash, hash of the previous ledger, hash of the root node of the state tree, and so on. -## Ledger Base ## +## Ledger Base The term "ledger base" refers to a particular type of query and response used in the ledger fetch process that includes @@ -251,9 +253,9 @@ such as the root node of the state tree. --- -# Ledger Structures # +# Ledger Structures -## Account Root ## +## Account Root **Account:** A 160-bit account ID. @@ -264,8 +266,8 @@ such as the root node of the state tree. **LedgerEntryType:** "AccountRoot" **OwnerCount:** The number of items the account owns that are charged to the -account. Offers are charged to the account. Trust lines may be charged to -the account (but not necessarily). The OwnerCount determines the reserve on +account. Offers are charged to the account. Trust lines may be charged to +the account (but not necessarily). The OwnerCount determines the reserve on the account. **PreviousTxnID:** 256-bit index of the previous transaction on this account. @@ -274,43 +276,45 @@ the account. transaction on this account. **Sequence:** Must be a value of 1 for the account to process a valid -transaction. The value initially matches the sequence number of the state -tree of the account that signed the transaction. The process of executing -the transaction increments the sequence number. This is how ripple prevents +transaction. The value initially matches the sequence number of the state +tree of the account that signed the transaction. The process of executing +the transaction increments the sequence number. This is how ripple prevents a transaction from executing more than once. **index:** 256-bit hash of this AccountRoot. - -## Trust Line ## +## Trust Line The trust line acts as an edge connecting two accounts: the accounts -represented by the HighNode and the LowNode. Which account is "high" and -"low" is determined by the values of the two 160-bit account IDs. The -account with the smaller 160-bit ID is always the low account. This +represented by the HighNode and the LowNode. Which account is "high" and +"low" is determined by the values of the two 160-bit account IDs. The +account with the smaller 160-bit ID is always the low account. This ordering makes the hash of a trust line between accounts A and B have the same value as a trust line between accounts B and A. **Balance:** - - **currency:** String identifying a valid currency, e.g., "BTC". - - **issuer:** There is no issuer, really, this entry is "NoAccount". - - **value:** + +- **currency:** String identifying a valid currency, e.g., "BTC". +- **issuer:** There is no issuer, really, this entry is "NoAccount". +- **value:** **Flags:** ??? **HighLimit:** - - **currency:** Same as for Balance. - - **issuer:** A 160-bit account ID. - - **value:** The largest amount this issuer will accept of the currency. + +- **currency:** Same as for Balance. +- **issuer:** A 160-bit account ID. +- **value:** The largest amount this issuer will accept of the currency. **HighNode:** A deletion hint. **LedgerEntryType:** "RippleState". **LowLimit:** - - **currency:** Same as for Balance. - - **issuer:** A 160-bit account ID. - - **value:** The largest amount of the currency this issuer will accept. + +- **currency:** Same as for Balance. +- **issuer:** A 160-bit account ID. +- **value:** The largest amount of the currency this issuer will accept. **LowNode:** A deletion hint @@ -321,8 +325,7 @@ transaction on this account. **index:** 256-bit hash of this RippleState. - -## Ledger Hashes ## +## Ledger Hashes **Flags:** ??? @@ -334,8 +337,7 @@ transaction on this account. **index:** 256-bit hash of this LedgerHashes. - -## Owner Directory ## +## Owner Directory Lists all of the offers and trust lines that are associated with an account. @@ -351,8 +353,7 @@ Lists all of the offers and trust lines that are associated with an account. **index:** A hash of the owner account. - -## Book Directory ## +## Book Directory Lists one or more offers that have the same quality. @@ -360,18 +361,18 @@ If a pair of Currency and Issuer fields are all zeros, then that pair is dealing in XRP. The code, at the moment, does not recognize that the Currency and Issuer -fields are currencies and issuers. So those values are presented in hex, -rather than as accounts and currencies. That's a bug and should be fixed +fields are currencies and issuers. So those values are presented in hex, +rather than as accounts and currencies. That's a bug and should be fixed at some point. -**ExchangeRate:** A 64-bit value. The first 8-bits is the exponent and the -remaining bits are the mantissa. The format is such that a bigger 64-bit +**ExchangeRate:** A 64-bit value. The first 8-bits is the exponent and the +remaining bits are the mantissa. The format is such that a bigger 64-bit value always represents a higher exchange rate. -Each type can compute its own hash. The hash of a book directory contains, -as its lowest 64 bits, the exchange rate. This means that if there are -multiple *almost* identical book directories, but with different exchange -rates, then these book directories will sit together in the ledger. The best +Each type can compute its own hash. The hash of a book directory contains, +as its lowest 64 bits, the exchange rate. This means that if there are +multiple _almost_ identical book directories, but with different exchange +rates, then these book directories will sit together in the ledger. The best exchange rate will be the first in the sequence of Book Directories. **Flags:** ??? @@ -392,14 +393,14 @@ currencies described by this BookDirectory. **TakerPaysIssuer:** Issuer of the PaysCurrency. **index:** A 256-bit hash computed using the TakerGetsCurrency, TakerGetsIssuer, -TakerPaysCurrency, and TakerPaysIssuer in the top 192 bits. The lower 64-bits +TakerPaysCurrency, and TakerPaysIssuer in the top 192 bits. The lower 64-bits are occupied by the exchange rate. --- -# Ledger Publication # +# Ledger Publication -## Overview ## +## Overview The Ripple server permits clients to subscribe to a continuous stream of fully-validated ledgers. The publication code maintains this stream. @@ -408,7 +409,7 @@ The server attempts to maintain this continuous stream unless it falls too far behind, in which case it jumps to the current fully-validated ledger and then attempts to resume a continuous stream. -## Implementation ## +## Implementation `LedgerMaster::doAdvance` is invoked when work may need to be done to publish ledgers to clients. This code loops until it cannot make further @@ -430,17 +431,17 @@ the list of resident ledgers. --- -# The Ledger Cleaner # +# The Ledger Cleaner -## Overview ## +## Overview The ledger cleaner checks and, if necessary, repairs the SQLite ledger and -transaction databases. It can also check for pieces of a ledger that should -be in the node back end but are missing. If it detects this case, it -triggers a fetch of the ledger. The ledger cleaner only operates by manual +transaction databases. It can also check for pieces of a ledger that should +be in the node back end but are missing. If it detects this case, it +triggers a fetch of the ledger. The ledger cleaner only operates by manual request. It is never started automatically. -## Operations ## +## Operations The ledger cleaner can operate on a single ledger or a range of ledgers. It always validates the ledger chain itself, ensuring that the SQLite database @@ -448,7 +449,7 @@ contains a consistent chain of ledgers from the last validated ledger as far back as the database goes. If requested, it can additionally repair the SQLite entries for transactions -in each checked ledger. This was primarily intended to repair incorrect +in each checked ledger. This was primarily intended to repair incorrect entries created by a bug (since fixed) that could cause transasctions from a ledger other than the fully-validated ledger to appear in the SQLite databases in addition to the transactions from the correct ledger. @@ -460,7 +461,7 @@ To prevent the ledger cleaner from saturating the available I/O bandwidth and excessively polluting caches with ancient information, the ledger cleaner paces itself and does not attempt to get its work done quickly. -## Commands ## +## Commands The ledger cleaner can be controlled and monitored with the **ledger_cleaner** RPC command. With no parameters, this command reports on the status of the @@ -486,4 +487,4 @@ ledger(s) for missing nodes in the back end node store --- -# References # +# References diff --git a/src/xrpld/app/misc/FeeEscalation.md b/src/xrpld/app/misc/FeeEscalation.md index b86f8dab94..468ab2b528 100644 --- a/src/xrpld/app/misc/FeeEscalation.md +++ b/src/xrpld/app/misc/FeeEscalation.md @@ -17,15 +17,16 @@ transactions into the open ledger, even during unfavorable conditions. How fees escalate: 1. There is a base [fee level](#fee-level) of 256, -which is the minimum that a typical transaction -is required to pay. For a [reference -transaction](#reference-transaction), that corresponds to the -network base fee, which is currently 10 drops. + which is the minimum that a typical transaction + is required to pay. For a [reference + transaction](#reference-transaction), that corresponds to the + network base fee, which is currently 10 drops. 2. However, there is a limit on the number of transactions that -can get into an open ledger for that base fee level. The limit -will vary based on the [health](#consensus-health) of the -consensus process, but will be at least [5](#other-constants). - * If consensus stays [healthy](#consensus-health), the limit will + can get into an open ledger for that base fee level. The limit + will vary based on the [health](#consensus-health) of the + consensus process, but will be at least [5](#other-constants). + +- If consensus stays [healthy](#consensus-health), the limit will be the max of the number of transactions in the validated ledger plus [20%](#other-constants) or the current limit until it gets to [50](#other-constants), at which point, the limit will be the @@ -35,50 +36,56 @@ consensus process, but will be at least [5](#other-constants). decreases (i.e. a large ledger is no longer recent), the limit will decrease to the new largest value by 10% each time the ledger has more than 50 transactions. - * If consensus does not stay [healthy](#consensus-health), +- If consensus does not stay [healthy](#consensus-health), the limit will clamp down to the smaller of the number of transactions in the validated ledger minus [50%](#other-constants) or the previous limit minus [50%](#other-constants). - * The intended effect of these mechanisms is to allow as many base fee +- The intended effect of these mechanisms is to allow as many base fee level transactions to get into the ledger as possible while the network is [healthy](#consensus-health), but to respond quickly to any condition that makes it [unhealthy](#consensus-health), including, but not limited to, malicious attacks. + 3. Once there are more transactions in the open ledger than indicated -by the limit, the required fee level jumps drastically. - * The formula is `( lastLedgerMedianFeeLevel * - TransactionsInOpenLedger^2 / limit^2 )`, + by the limit, the required fee level jumps drastically. + +- The formula is `( lastLedgerMedianFeeLevel * +TransactionsInOpenLedger^2 / limit^2 )`, and returns a [fee level](#fee-level). + 4. That may still be pretty small, but as more transactions get -into the ledger, the fee level increases exponentially. - * For example, if the limit is 6, and the median fee is minimal, + into the ledger, the fee level increases exponentially. + +- For example, if the limit is 6, and the median fee is minimal, and assuming all [reference transactions](#reference-transaction), the 8th transaction only requires a [level](#fee-level) of about 174,000 or about 6800 drops, but the 20th transaction requires a [level](#fee-level) of about 1,283,000 or about 50,000 drops. + 5. Finally, as each ledger closes, the median fee level of that ledger is -computed and used as `lastLedgerMedianFeeLevel` (with a -[minimum value of 128,000](#other-constants)) -in the fee escalation formula for the next open ledger. - * Continuing the example above, if ledger consensus completes with + computed and used as `lastLedgerMedianFeeLevel` (with a + [minimum value of 128,000](#other-constants)) + in the fee escalation formula for the next open ledger. + +- Continuing the example above, if ledger consensus completes with only those 20 transactions, and all of those transactions paid the minimum required fee at each step, the limit will be adjusted from 6 to 24, and the `lastLedgerMedianFeeLevel` will be about 322,000, which is 12,600 drops for a [reference transaction](#reference-transaction). - * This will only require 10 drops for the first 25 transactions, +- This will only require 10 drops for the first 25 transactions, but the 26th transaction will require a level of about 349,150 or about 13,649 drops. -* This example assumes a cold-start scenario, with a single, possibly -malicious, user willing to pay arbitrary amounts to get transactions -into the open ledger. It ignores the effects of the [Transaction -Queue](#transaction-queue). Any lower fee level transactions submitted -by other users at the same time as this user's transactions will go into -the transaction queue, and will have the first opportunity to be applied -to the _next_ open ledger. The next section describes how that works in -more detail. +- This example assumes a cold-start scenario, with a single, possibly + malicious, user willing to pay arbitrary amounts to get transactions + into the open ledger. It ignores the effects of the [Transaction + Queue](#transaction-queue). Any lower fee level transactions submitted + by other users at the same time as this user's transactions will go into + the transaction queue, and will have the first opportunity to be applied + to the _next_ open ledger. The next section describes how that works in + more detail. ## Transaction Queue @@ -92,33 +99,34 @@ traffic periods, and give those transactions a much better chance to succeed. 1. If an incoming transaction meets both the base [fee -level](#fee-level) and the [load fee](#load-fee) minimum, but does not have a high -enough [fee level](#fee-level) to immediately go into the open ledger, -it is instead put into the queue and broadcast to peers. Each peer will -then make an independent decision about whether to put the transaction -into its open ledger or the queue. In principle, peers with identical -open ledgers will come to identical decisions. Any discrepancies will be -resolved as usual during consensus. + level](#fee-level) and the [load fee](#load-fee) minimum, but does not have a high + enough [fee level](#fee-level) to immediately go into the open ledger, + it is instead put into the queue and broadcast to peers. Each peer will + then make an independent decision about whether to put the transaction + into its open ledger or the queue. In principle, peers with identical + open ledgers will come to identical decisions. Any discrepancies will be + resolved as usual during consensus. 2. When consensus completes, the open ledger limit is adjusted, and -the required [fee level](#fee-level) drops back to the base -[fee level](#fee-level). Before the ledger is made available to -external transactions, transactions are applied from the queue to the -ledger from highest [fee level](#fee-level) to lowest. These transactions -count against the open ledger limit, so the required [fee level](#fee-level) -may start rising during this process. + the required [fee level](#fee-level) drops back to the base + [fee level](#fee-level). Before the ledger is made available to + external transactions, transactions are applied from the queue to the + ledger from highest [fee level](#fee-level) to lowest. These transactions + count against the open ledger limit, so the required [fee level](#fee-level) + may start rising during this process. 3. Once the queue is empty, or the required [fee level](#fee-level) -rises too high for the remaining transactions in the queue, the ledger -is opened up for normal transaction processing. + rises too high for the remaining transactions in the queue, the ledger + is opened up for normal transaction processing. 4. A transaction in the queue can stay there indefinitely in principle, -but in practice, either - * it will eventually get applied to the ledger, - * it will attempt to apply to the ledger and fail, - * it will attempt to apply to the ledger and retry [10 + but in practice, either + +- it will eventually get applied to the ledger, +- it will attempt to apply to the ledger and fail, +- it will attempt to apply to the ledger and retry [10 times](#other-constants), - * its last ledger sequence number will expire, - * the user will replace it by submitting another transaction with the same +- its last ledger sequence number will expire, +- the user will replace it by submitting another transaction with the same sequence number and at least a [25% higher fee](#other-constants), or - * it will get dropped when the queue fills up with more valuable transactions. +- it will get dropped when the queue fills up with more valuable transactions. The size limit is computed dynamically, and can hold transactions for the next [20 ledgers](#other-constants) (restricted to a minimum of [2000 transactions](#other-constants)). The lower the transaction's @@ -128,14 +136,15 @@ If a transaction is submitted for an account with one or more transactions already in the queue, and a sequence number that is sequential with the other transactions in the queue for that account, it will be considered for the queue if it meets these additional criteria: - * the account has fewer than [10](#other-constants) transactions + +- the account has fewer than [10](#other-constants) transactions already in the queue. - * all other queued transactions for that account, in the case where +- all other queued transactions for that account, in the case where they spend the maximum possible XRP, leave enough XRP balance to pay the fee, - * the total fees for the other queued transactions are less than both +- the total fees for the other queued transactions are less than both the network's minimum reserve and the account's XRP balance, and - * none of the prior queued transactions affect the ability of subsequent +- none of the prior queued transactions affect the ability of subsequent transactions to claim a fee. Currently, there is an additional restriction that the queue cannot work with @@ -148,7 +157,7 @@ development will make the queue aware of `sfAccountTxnID` mechanisms. ### Fee Level "Fee level" is used to allow the cost of different types of transactions -to be compared directly. For a [reference +to be compared directly. For a [reference transaction](#reference-transaction), the base fee level is 256. If a transaction is submitted with a higher `Fee` field, the fee level is scaled appropriately. @@ -157,16 +166,16 @@ Examples, assuming a [reference transaction](#reference-transaction) base fee of 10 drops: 1. A single-signed [reference transaction](#reference-transaction) -with `Fee=20` will have a fee level of -`20 drop fee * 256 fee level / 10 drop base fee = 512 fee level`. + with `Fee=20` will have a fee level of + `20 drop fee * 256 fee level / 10 drop base fee = 512 fee level`. 2. A multi-signed [reference transaction](#reference-transaction) with -3 signatures (base fee = 40 drops) and `Fee=60` will have a fee level of -`60 drop fee * 256 fee level / ((1tx + 3sigs) * 10 drop base fee) = 384 + 3 signatures (base fee = 40 drops) and `Fee=60` will have a fee level of + `60 drop fee * 256 fee level / ((1tx + 3sigs) * 10 drop base fee) = 384 fee level`. 3. A hypothetical future non-reference transaction with a base -fee of 15 drops multi-signed with 5 signatures and `Fee=90` will -have a fee level of -`90 drop fee * 256 fee level / ((1tx + 5sigs) * 15 drop base fee) = 256 + fee of 15 drops multi-signed with 5 signatures and `Fee=90` will + have a fee level of + `90 drop fee * 256 fee level / ((1tx + 5sigs) * 15 drop base fee) = 256 fee level`. This demonstrates that a simpler transaction paying less XRP can be more @@ -194,7 +203,7 @@ For consensus to be considered healthy, the peers on the network should largely remain in sync with one another. It is particularly important for the validators to remain in sync, because that is required for participation in consensus. However, the network tolerates some -validators being out of sync. Fundamentally, network health is a +validators being out of sync. Fundamentally, network health is a function of validators reaching consensus on sets of recently submitted transactions. @@ -214,61 +223,61 @@ often coincides with new ledgers with zero transactions. ### Other Constants -* *Base fee transaction limit per ledger*. The minimum value of 5 was -chosen to ensure the limit never gets so small that the ledger becomes -unusable. The "target" value of 50 was chosen so the limit never gets large -enough to invite abuse, but keeps up if the network stays healthy and -active. These exact values were chosen experimentally, and can easily -change in the future. -* *Expected ledger size growth and reduction percentages*. The growth -value of 20% was chosen to allow the limit to grow quickly as load -increases, but not so quickly as to allow bad actors to run unrestricted. -The reduction value of 50% was chosen to cause the limit to drop -significantly, but not so drastically that the limit cannot quickly -recover if the problem is temporary. These exact values were chosen -experimentally, and can easily change in the future. -* *Minimum `lastLedgerMedianFeeLevel`*. The value of 500 was chosen to -ensure that the first escalated fee was more significant and noticable -than what the default would allow. This exact value was chosen -experimentally, and can easily change in the future. -* *Transaction queue size limit*. The limit is computed based on the -base fee transaction limit per ledger, so that the queue can grow -automatically as the network's performance improves, allowing -more transactions per second, and thus more transactions per ledger -to process successfully. The limit of 20 ledgers was used to provide -a balance between resource (specifically memory) usage, and giving -transactions a realistic chance to be processed. The minimum size of -2000 transactions was chosen to allow a decent functional backlog during -network congestion conditions. These exact values were -chosen experimentally, and can easily change in the future. -* *Maximum retries*. A transaction in the queue can attempt to apply -to the open ledger, but get a retry (`ter`) code up to 10 times, at -which point, it will be removed from the queue and dropped. The -value was chosen to be large enough to allow temporary failures to clear -up, but small enough that the queue doesn't fill up with stale -transactions which prevent lower fee level, but more likely to succeed, -transactions from queuing. -* *Maximum transactions per account*. A single account can have up to 10 -transactions in the queue at any given time. This is primarily to -mitigate the lost cost of broadcasting multiple transactions if one of -the earlier ones fails or is otherwise removed from the queue without -being applied to the open ledger. The value was chosen arbitrarily, and -can easily change in the future. -* *Minimum last ledger sequence buffer*. If a transaction has a -`LastLedgerSequence` value, and cannot be processed into the open -ledger, that `LastLedgerSequence` must be at least 2 more than the -sequence number of the open ledger to be considered for the queue. The -value was chosen to provide a balance between letting the user control -the lifespan of the transaction, and giving a queued transaction a -chance to get processed out of the queue before getting discarded, -particularly since it may have dependent transactions also in the queue, -which will never succeed if this one is discarded. -* *Replaced transaction fee increase*. Any transaction in the queue can be -replaced by another transaction with the same sequence number and at -least a 25% higher fee level. The 25% increase is intended to cover the -resource cost incurred by broadcasting the original transaction to the -network. This value was chosen experimentally, and can easily change in -the future. +- _Base fee transaction limit per ledger_. The minimum value of 5 was + chosen to ensure the limit never gets so small that the ledger becomes + unusable. The "target" value of 50 was chosen so the limit never gets large + enough to invite abuse, but keeps up if the network stays healthy and + active. These exact values were chosen experimentally, and can easily + change in the future. +- _Expected ledger size growth and reduction percentages_. The growth + value of 20% was chosen to allow the limit to grow quickly as load + increases, but not so quickly as to allow bad actors to run unrestricted. + The reduction value of 50% was chosen to cause the limit to drop + significantly, but not so drastically that the limit cannot quickly + recover if the problem is temporary. These exact values were chosen + experimentally, and can easily change in the future. +- _Minimum `lastLedgerMedianFeeLevel`_. The value of 500 was chosen to + ensure that the first escalated fee was more significant and noticable + than what the default would allow. This exact value was chosen + experimentally, and can easily change in the future. +- _Transaction queue size limit_. The limit is computed based on the + base fee transaction limit per ledger, so that the queue can grow + automatically as the network's performance improves, allowing + more transactions per second, and thus more transactions per ledger + to process successfully. The limit of 20 ledgers was used to provide + a balance between resource (specifically memory) usage, and giving + transactions a realistic chance to be processed. The minimum size of + 2000 transactions was chosen to allow a decent functional backlog during + network congestion conditions. These exact values were + chosen experimentally, and can easily change in the future. +- _Maximum retries_. A transaction in the queue can attempt to apply + to the open ledger, but get a retry (`ter`) code up to 10 times, at + which point, it will be removed from the queue and dropped. The + value was chosen to be large enough to allow temporary failures to clear + up, but small enough that the queue doesn't fill up with stale + transactions which prevent lower fee level, but more likely to succeed, + transactions from queuing. +- _Maximum transactions per account_. A single account can have up to 10 + transactions in the queue at any given time. This is primarily to + mitigate the lost cost of broadcasting multiple transactions if one of + the earlier ones fails or is otherwise removed from the queue without + being applied to the open ledger. The value was chosen arbitrarily, and + can easily change in the future. +- _Minimum last ledger sequence buffer_. If a transaction has a + `LastLedgerSequence` value, and cannot be processed into the open + ledger, that `LastLedgerSequence` must be at least 2 more than the + sequence number of the open ledger to be considered for the queue. The + value was chosen to provide a balance between letting the user control + the lifespan of the transaction, and giving a queued transaction a + chance to get processed out of the queue before getting discarded, + particularly since it may have dependent transactions also in the queue, + which will never succeed if this one is discarded. +- _Replaced transaction fee increase_. Any transaction in the queue can be + replaced by another transaction with the same sequence number and at + least a 25% higher fee level. The 25% increase is intended to cover the + resource cost incurred by broadcasting the original transaction to the + network. This value was chosen experimentally, and can easily change in + the future. ### `fee` command @@ -287,6 +296,7 @@ ledger. It includes the sequence number of the current open ledger, but may not make sense if rippled is not synced to the network. Result format: + ``` { "result" : { @@ -319,13 +329,13 @@ without warning.** Up to two fields in `server_info` output are related to fee escalation. 1. `load_factor_fee_escalation`: The factor on base transaction cost -that a transaction must pay to get into the open ledger. This value can -change quickly as transactions are processed from the network and -ledgers are closed. If not escalated, the value is 1, so will not be -returned. + that a transaction must pay to get into the open ledger. This value can + change quickly as transactions are processed from the network and + ledgers are closed. If not escalated, the value is 1, so will not be + returned. 2. `load_factor_fee_queue`: If the queue is full, this is the factor on -base transaction cost that a transaction must pay to get into the queue. -If not full, the value is 1, so will not be returned. + base transaction cost that a transaction must pay to get into the queue. + If not full, the value is 1, so will not be returned. In all cases, the transaction fee must be high enough to overcome both `load_factor_fee_queue` and `load_factor` to be considered. It does not @@ -341,22 +351,21 @@ without warning.** Three fields in `server_state` output are related to fee escalation. 1. `load_factor_fee_escalation`: The factor on base transaction cost -that a transaction must pay to get into the open ledger. This value can -change quickly as transactions are processed from the network and -ledgers are closed. The ratio between this value and -`load_factor_fee_reference` determines the multiplier for transaction -fees to get into the current open ledger. + that a transaction must pay to get into the open ledger. This value can + change quickly as transactions are processed from the network and + ledgers are closed. The ratio between this value and + `load_factor_fee_reference` determines the multiplier for transaction + fees to get into the current open ledger. 2. `load_factor_fee_queue`: This is the factor on base transaction cost -that a transaction must pay to get into the queue. The ratio between -this value and `load_factor_fee_reference` determines the multiplier for -transaction fees to get into the transaction queue to be considered for -a later ledger. + that a transaction must pay to get into the queue. The ratio between + this value and `load_factor_fee_reference` determines the multiplier for + transaction fees to get into the transaction queue to be considered for + a later ledger. 3. `load_factor_fee_reference`: Like `load_base`, this is the baseline -that is used to scale fee escalation computations. + that is used to scale fee escalation computations. In all cases, the transaction fee must be high enough to overcome both `load_factor_fee_queue` and `load_factor` to be considered. It does not need to overcome `load_factor_fee_escalation`, though if it does not, it is more likely to be queued than immediately processed into the open ledger. - diff --git a/src/xrpld/app/misc/README.md b/src/xrpld/app/misc/README.md index 52e6e14934..2f9fff0ca3 100644 --- a/src/xrpld/app/misc/README.md +++ b/src/xrpld/app/misc/README.md @@ -71,18 +71,18 @@ Amendment must receive at least an 80% approval rate from validating nodes for a period of two weeks before being accepted. The following example outlines the process of an Amendment from its conception to approval and usage. -* A community member proposes to change transaction processing in some way. +- A community member proposes to change transaction processing in some way. The proposal is discussed amongst the community and receives its support creating a community or human consensus. -* Some members contribute their time and work to develop the Amendment. +- Some members contribute their time and work to develop the Amendment. -* A pull request is created and the new code is folded into a rippled build +- A pull request is created and the new code is folded into a rippled build and made available for use. -* The consensus process begins with the validating nodes. +- The consensus process begins with the validating nodes. -* If the Amendment holds an 80% majority for a two week period, nodes will begin +- If the Amendment holds an 80% majority for a two week period, nodes will begin including the transaction to enable it in their initial sets. Nodes may veto Amendments they consider undesirable by never announcing their @@ -112,7 +112,7 @@ enabled. Optional online deletion happens through the SHAMapStore. Records are deleted from disk based on ledger sequence number. These records reside in the -key-value database as well as in the SQLite ledger and transaction databases. +key-value database as well as in the SQLite ledger and transaction databases. Without online deletion storage usage grows without bounds. It can only be pruned by stopping, manually deleting data, and restarting the server. Online deletion requires less operator intervention to manage the server. @@ -142,14 +142,14 @@ server restarts. Configuration: -* In the [node_db] configuration section, an optional online_delete parameter is -set. If not set or if set to 0, online delete is disabled. Otherwise, the -setting defines number of ledgers between deletion cycles. -* Another optional parameter in [node_db] is that for advisory_delete. It is -disabled by default. If set to non-zero, requires an RPC call to activate the -deletion routine. -* online_delete must not be greater than the [ledger_history] parameter. -* [fetch_depth] will be silently set to equal the online_delete setting if -online_delete is greater than fetch_depth. -* In the [node_db] section, there is a performance tuning option, delete_batch, -which sets the maximum size in ledgers for each SQL DELETE query. +- In the [node_db] configuration section, an optional online_delete parameter is + set. If not set or if set to 0, online delete is disabled. Otherwise, the + setting defines number of ledgers between deletion cycles. +- Another optional parameter in [node_db] is that for advisory_delete. It is + disabled by default. If set to non-zero, requires an RPC call to activate the + deletion routine. +- online_delete must not be greater than the [ledger_history] parameter. +- [fetch_depth] will be silently set to equal the online_delete setting if + online_delete is greater than fetch_depth. +- In the [node_db] section, there is a performance tuning option, delete_batch, + which sets the maximum size in ledgers for each SQL DELETE query. diff --git a/src/xrpld/app/rdb/README.md b/src/xrpld/app/rdb/README.md index 81aaa32f2c..a50bb395c1 100644 --- a/src/xrpld/app/rdb/README.md +++ b/src/xrpld/app/rdb/README.md @@ -2,8 +2,8 @@ The guiding principles of the Relational Database Interface are summarized below: -* All hard-coded SQL statements should be stored in the [files](#source-files) under the `xrpld/app/rdb` directory. With the exception of test modules, no hard-coded SQL should be added to any other file in rippled. -* The base class `RelationalDatabase` is inherited by derived classes that each provide an interface for operating on distinct relational database systems. +- All hard-coded SQL statements should be stored in the [files](#source-files) under the `xrpld/app/rdb` directory. With the exception of test modules, no hard-coded SQL should be added to any other file in rippled. +- The base class `RelationalDatabase` is inherited by derived classes that each provide an interface for operating on distinct relational database systems. ## Overview @@ -45,36 +45,34 @@ src/xrpld/app/rdb/ ``` ### File Contents -| File | Contents | -| ----------- | ----------- | -| `Node.[h\|cpp]` | Defines/Implements methods used by `SQLiteDatabase` for interacting with SQLite node databases| -|`SQLiteDatabase.[h\|cpp]`| Defines/Implements the class `SQLiteDatabase`/`SQLiteDatabaseImp` which inherits from `RelationalDatabase` and is used to operate on the main stores | -| `PeerFinder.[h\|cpp]` | Defines/Implements methods for interacting with the PeerFinder SQLite database | -|`RelationalDatabase.cpp`| Implements the static method `RelationalDatabase::init` which is used to initialize an instance of `RelationalDatabase` | -| `RelationalDatabase.h` | Defines the abstract class `RelationalDatabase`, the primary class of the Relational Database Interface | -| `State.[h\|cpp]` | Defines/Implements methods for interacting with the State SQLite database which concerns ledger deletion and database rotation | -| `Vacuum.[h\|cpp]` | Defines/Implements a method for performing the `VACUUM` operation on SQLite databases | -| `Wallet.[h\|cpp]` | Defines/Implements methods for interacting with Wallet SQLite databases | + +| File | Contents | +| ------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Node.[h\|cpp]` | Defines/Implements methods used by `SQLiteDatabase` for interacting with SQLite node databases | +| `SQLiteDatabase.[h\|cpp]` | Defines/Implements the class `SQLiteDatabase`/`SQLiteDatabaseImp` which inherits from `RelationalDatabase` and is used to operate on the main stores | +| `PeerFinder.[h\|cpp]` | Defines/Implements methods for interacting with the PeerFinder SQLite database | +| `RelationalDatabase.cpp` | Implements the static method `RelationalDatabase::init` which is used to initialize an instance of `RelationalDatabase` | +| `RelationalDatabase.h` | Defines the abstract class `RelationalDatabase`, the primary class of the Relational Database Interface | +| `State.[h\|cpp]` | Defines/Implements methods for interacting with the State SQLite database which concerns ledger deletion and database rotation | +| `Vacuum.[h\|cpp]` | Defines/Implements a method for performing the `VACUUM` operation on SQLite databases | +| `Wallet.[h\|cpp]` | Defines/Implements methods for interacting with Wallet SQLite databases | ## Classes -The abstract class `RelationalDatabase` is the primary class of the Relational Database Interface and is defined in the eponymous header file. This class provides a static method `init()` which, when invoked, creates a concrete instance of a derived class whose type is specified by the system configuration. All other methods in the class are virtual. Presently there exist two classes that derive from `RelationalDatabase`, namely `SQLiteDatabase` and `PostgresDatabase`. +The abstract class `RelationalDatabase` is the primary class of the Relational Database Interface and is defined in the eponymous header file. This class provides a static method `init()` which, when invoked, creates a concrete instance of a derived class whose type is specified by the system configuration. All other methods in the class are virtual. Presently there exist two classes that derive from `RelationalDatabase`, namely `SQLiteDatabase` and `PostgresDatabase`. ## Database Methods The Relational Database Interface provides three categories of methods for interacting with databases: -* Free functions for interacting with SQLite databases used by various components of the software. These methods feature a `soci::session` parameter which facilitates connecting to SQLite databases, and are defined and implemented in the following files: +- Free functions for interacting with SQLite databases used by various components of the software. These methods feature a `soci::session` parameter which facilitates connecting to SQLite databases, and are defined and implemented in the following files: - * `PeerFinder.[h\|cpp]` - * `State.[h\|cpp]` - * `Vacuum.[h\|cpp]` - * `Wallet.[h\|cpp]` +- `PeerFinder.[h\|cpp]` +- `State.[h\|cpp]` +- `Vacuum.[h\|cpp]` +- `Wallet.[h\|cpp]` +- Free functions used exclusively by `SQLiteDatabaseImp` for interacting with SQLite databases owned by the node store. Unlike the free functions in the files listed above, these are not intended to be invoked directly by clients. Rather, these methods are invoked by derived instances of `RelationalDatabase`. These methods are defined in the following files: + - `Node.[h|cpp]` -* Free functions used exclusively by `SQLiteDatabaseImp` for interacting with SQLite databases owned by the node store. Unlike the free functions in the files listed above, these are not intended to be invoked directly by clients. Rather, these methods are invoked by derived instances of `RelationalDatabase`. These methods are defined in the following files: - - * `Node.[h|cpp]` - - -* Member functions of `RelationalDatabase`, `SQLiteDatabase`, and `PostgresDatabase` which are used to access the node store. +- Member functions of `RelationalDatabase`, `SQLiteDatabase`, and `PostgresDatabase` which are used to access the node store. diff --git a/src/xrpld/consensus/README.md b/src/xrpld/consensus/README.md index c8850a2a51..3cc1294c4f 100644 --- a/src/xrpld/consensus/README.md +++ b/src/xrpld/consensus/README.md @@ -1,9 +1,8 @@ # Consensus This directory contains the implementation of a -generic consensus algorithm. The implementation +generic consensus algorithm. The implementation follows a CRTP design, requiring client code to implement specific functions and types to use consensus in their -application. The interface is undergoing refactoring and +application. The interface is undergoing refactoring and is not yet finalized. - diff --git a/src/xrpld/nodestore/README.md b/src/xrpld/nodestore/README.md index 1549c1ef96..a5d1128d17 100644 --- a/src/xrpld/nodestore/README.md +++ b/src/xrpld/nodestore/README.md @@ -1,6 +1,7 @@ # Database Documentation -* [NodeStore](#nodestore) -* [Benchmarks](#benchmarks) + +- [NodeStore](#nodestore) +- [Benchmarks](#benchmarks) # NodeStore @@ -12,41 +13,43 @@ identified by the hash, which is a 256 bit hash of the blob. The blob is a variable length block of serialized data. The type identifies what the blob contains. The fields are as follows: -* `mType` +- `mType` - An enumeration that determines what the blob holds. There are four - different types of objects stored. +An enumeration that determines what the blob holds. There are four +different types of objects stored. - * **ledger** +- **ledger** - A ledger header. + A ledger header. - * **transaction** +- **transaction** - A signed transaction. + A signed transaction. - * **account node** +- **account node** - A node in a ledger's account state tree. + A node in a ledger's account state tree. - * **transaction node** +- **transaction node** - A node in a ledger's transaction tree. + A node in a ledger's transaction tree. -* `mHash` +- `mHash` - A 256-bit hash of the blob. +A 256-bit hash of the blob. -* `mData` +- `mData` - A blob containing the payload. Stored in the following format. +A blob containing the payload. Stored in the following format. + +| Byte | | | +| :------ | :----- | :------------------------- | +| 0...7 | unused | | +| 8 | type | NodeObjectType enumeration | +| 9...end | data | body of the object data | -|Byte | | | -|:------|:--------------------|:-------------------------| -|0...7 |unused | | -|8 |type |NodeObjectType enumeration| -|9...end|data |body of the object data | --- + The `NodeStore` provides an interface that stores, in a persistent database, a collection of NodeObjects that rippled uses as its primary representation of ledger entries. All ledger entries are stored as NodeObjects and as such, need @@ -64,41 +67,42 @@ the configuration file [node_db] section as follows. One or more lines of key / value pairs Example: + ``` type=RocksDB path=rocksdb compression=1 ``` + Choices for 'type' (not case-sensitive) -* **HyperLevelDB** +- **HyperLevelDB** - An improved version of LevelDB (preferred). +An improved version of LevelDB (preferred). -* **LevelDB** +- **LevelDB** - Google's LevelDB database (deprecated). +Google's LevelDB database (deprecated). -* **none** +- **none** - Use no backend. +Use no backend. -* **RocksDB** +- **RocksDB** - Facebook's RocksDB database, builds on LevelDB. +Facebook's RocksDB database, builds on LevelDB. -* **SQLite** +- **SQLite** - Use SQLite. +Use SQLite. 'path' speficies where the backend will store its data files. Choices for 'compression' -* **0** off - -* **1** on (default) +- **0** off +- **1** on (default) # Benchmarks @@ -129,48 +133,48 @@ RocksDBQuickFactory is intended to provide a testbed for comparing potential rocksdb performance with the existing recommended configuration in rippled.cfg. Through various executions and profiling some conclusions are presented below. -* If the write ahead log is enabled, insert speed soon clogs up under load. The -BatchWriter class intends to stop this from blocking the main threads by queuing -up writes and running them in a separate thread. However, rocksdb already has -separate threads dedicated to flushing the memtable to disk and the memtable is -itself an in-memory queue. The result is two queues with a guarantee of -durability in between. However if the memtable was used as the sole queue and -the rocksdb::Flush() call was manually triggered at opportune moments, possibly -just after ledger close, then that would provide similar, but more predictable -guarantees. It would also remove an unneeded thread and unnecessary memory -usage. An alternative point of view is that because there will always be many -other rippled instances running there is no need for such guarantees. The nodes -will always be available from another peer. +- If the write ahead log is enabled, insert speed soon clogs up under load. The + BatchWriter class intends to stop this from blocking the main threads by queuing + up writes and running them in a separate thread. However, rocksdb already has + separate threads dedicated to flushing the memtable to disk and the memtable is + itself an in-memory queue. The result is two queues with a guarantee of + durability in between. However if the memtable was used as the sole queue and + the rocksdb::Flush() call was manually triggered at opportune moments, possibly + just after ledger close, then that would provide similar, but more predictable + guarantees. It would also remove an unneeded thread and unnecessary memory + usage. An alternative point of view is that because there will always be many + other rippled instances running there is no need for such guarantees. The nodes + will always be available from another peer. -* Lookup in a block was previously using binary search. With rippled's use case -it is highly unlikely that two adjacent key/values will ever be requested one -after the other. Therefore hash indexing of blocks makes much more sense. -Rocksdb has a number of options for hash indexing both memtables and blocks and -these need more testing to find the best choice. +- Lookup in a block was previously using binary search. With rippled's use case + it is highly unlikely that two adjacent key/values will ever be requested one + after the other. Therefore hash indexing of blocks makes much more sense. + Rocksdb has a number of options for hash indexing both memtables and blocks and + these need more testing to find the best choice. -* The current Database implementation has two forms of caching, so the LRU cache -of blocks at Factory level does not make any sense. However, if the hash -indexing and potentially the new [bloom -filter](http://rocksdb.org/blog/1427/new-bloom-filter-format/) can provide -faster lookup for non-existent keys, then potentially the caching could exist at -Factory level. +- The current Database implementation has two forms of caching, so the LRU cache + of blocks at Factory level does not make any sense. However, if the hash + indexing and potentially the new [bloom + filter](http://rocksdb.org/blog/1427/new-bloom-filter-format/) can provide + faster lookup for non-existent keys, then potentially the caching could exist at + Factory level. -* Multiple runs of the benchmarks can yield surprisingly different results. This -can perhaps be attributed to the asynchronous nature of rocksdb's compaction -process. The benchmarks are artifical and create highly unlikely write load to -create the dataset to measure different read access patterns. Therefore multiple -runs of the benchmarks are required to get a feel for the effectiveness of the -changes. This contrasts sharply with the keyvadb benchmarking were highly -repeatable timings were discovered. Also realistically sized datasets are -required to get a correct insight. The number of 2,000,000 key/values (actually -4,000,000 after the two insert benchmarks complete) is too low to get a full -picture. +- Multiple runs of the benchmarks can yield surprisingly different results. This + can perhaps be attributed to the asynchronous nature of rocksdb's compaction + process. The benchmarks are artifical and create highly unlikely write load to + create the dataset to measure different read access patterns. Therefore multiple + runs of the benchmarks are required to get a feel for the effectiveness of the + changes. This contrasts sharply with the keyvadb benchmarking were highly + repeatable timings were discovered. Also realistically sized datasets are + required to get a correct insight. The number of 2,000,000 key/values (actually + 4,000,000 after the two insert benchmarks complete) is too low to get a full + picture. -* An interesting side effect of running the benchmarks in a profiler was that a -clear pattern of what RocksDB does under the hood was observable. This led to -the decision to trial hash indexing and also the discovery of the native CRC32 -instruction not being used. +- An interesting side effect of running the benchmarks in a profiler was that a + clear pattern of what RocksDB does under the hood was observable. This led to + the decision to trial hash indexing and also the discovery of the native CRC32 + instruction not being used. -* Important point to note that is if this factory is tested with an existing set -of sst files none of the old sst files will benefit from indexing changes until -they are compacted at a future point in time. +- Important point to note that is if this factory is tested with an existing set + of sst files none of the old sst files will benefit from indexing changes until + they are compacted at a future point in time. diff --git a/src/xrpld/overlay/README.md b/src/xrpld/overlay/README.md index 6525e5edf8..cd00488915 100644 --- a/src/xrpld/overlay/README.md +++ b/src/xrpld/overlay/README.md @@ -39,10 +39,10 @@ The HTTP [request](https://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html) must: - Use HTTP version 1.1. - Specify a request URI consisting of a single forward slash character ("/") -indicating the server root. Requests using different URIs are reserved for -future protocol implementations. + indicating the server root. Requests using different URIs are reserved for + future protocol implementations. - Use the [_HTTP/1.1 Upgrade_][upgrade_header] mechanism with additional custom -fields to communicate protocol specific information related to the upgrade. + fields to communicate protocol specific information related to the upgrade. HTTP requests which do not conform to this requirements must generate an appropriate HTTP error and result in the connection being closed. @@ -72,7 +72,6 @@ Previous-Ledger: q4aKbP7sd5wv+EXArwCmQiWZhq9AwBl2p/hCtpGJNsc= ##### Example HTTP Upgrade Response (Success) - ``` HTTP/1.1 101 Switching Protocols Connection: Upgrade @@ -102,9 +101,9 @@ Content-Type: application/json #### Standard Fields -| Field Name | Request | Response | -|--------------------- |:-----------------: |:-----------------: | -| `User-Agent` | :heavy_check_mark: | | +| Field Name | Request | Response | +| ------------ | :----------------: | :------: | +| `User-Agent` | :heavy_check_mark: | | The `User-Agent` field indicates the version of the software that the peer that is making the HTTP request is using. No semantic meaning is @@ -113,9 +112,9 @@ specify the version of the software that is used. See [RFC2616 §14.43](https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.43). -| Field Name | Request | Response | -|--------------------- |:-----------------: |:-----------------: | -| `Server` | | :heavy_check_mark: | +| Field Name | Request | Response | +| ---------- | :-----: | :----------------: | +| `Server` | | :heavy_check_mark: | The `Server` field indicates the version of the software that the peer that is processing the HTTP request is using. No semantic meaning is @@ -124,18 +123,18 @@ specify the version of the software that is used. See [RFC2616 §14.38](https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.38). -| Field Name | Request | Response | -|--------------------- |:-----------------: |:-----------------: | -| `Connection` | :heavy_check_mark: | :heavy_check_mark: | +| Field Name | Request | Response | +| ------------ | :----------------: | :----------------: | +| `Connection` | :heavy_check_mark: | :heavy_check_mark: | The `Connection` field should have a value of `Upgrade` to indicate that a request to upgrade the connection is being performed. See [RFC2616 §14.10](https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.10). -| Field Name | Request | Response | -|--------------------- |:-----------------: |:-----------------: | -| `Upgrade` | :heavy_check_mark: | :heavy_check_mark: | +| Field Name | Request | Response | +| ---------- | :----------------: | :----------------: | +| `Upgrade` | :heavy_check_mark: | :heavy_check_mark: | The `Upgrade` field is part of the standard connection upgrade mechanism and must be present in both requests and responses. It is used to negotiate the @@ -156,12 +155,11 @@ equal to 2 and the minor is greater than or equal to 0. See [RFC 2616 §14.42](https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.42) - #### Custom Fields -| Field Name | Request | Response | -|--------------------- |:-----------------: |:-----------------: | -| `Connect-As` | :heavy_check_mark: | :heavy_check_mark: | +| Field Name | Request | Response | +| ------------ | :----------------: | :----------------: | +| `Connect-As` | :heavy_check_mark: | :heavy_check_mark: | The mandatory `Connect-As` field is used to specify that type of connection that is being requested. @@ -175,10 +173,9 @@ elements specified in the request. If a server processing a request does not recognize any of the connection types, the request should fail with an appropriate HTTP error code (e.g. by sending an HTTP 400 "Bad Request" response). - -| Field Name | Request | Response | -|--------------------- |:-----------------: |:-----------------: | -| `Remote-IP` | :white_check_mark: | :white_check_mark: | +| Field Name | Request | Response | +| ----------- | :----------------: | :----------------: | +| `Remote-IP` | :white_check_mark: | :white_check_mark: | The optional `Remote-IP` field contains the string representation of the IP address of the remote end of the connection as seen from the peer that is @@ -187,10 +184,9 @@ sending the field. By observing values of this field from a sufficient number of different servers, a peer making outgoing connections can deduce its own IP address. - -| Field Name | Request | Response | -|--------------------- |:-----------------: |:-----------------: | -| `Local-IP` | :white_check_mark: | :white_check_mark: | +| Field Name | Request | Response | +| ---------- | :----------------: | :----------------: | +| `Local-IP` | :white_check_mark: | :white_check_mark: | The optional `Local-IP` field contains the string representation of the IP address that the peer sending the field believes to be its own. @@ -198,10 +194,9 @@ address that the peer sending the field believes to be its own. Servers receiving this field can detect IP address mismatches, which may indicate a potential man-in-the-middle attack. - -| Field Name | Request | Response | -|--------------------- |:-----------------: |:-----------------: | -| `Network-ID` | :white_check_mark: | :white_check_mark: | +| Field Name | Request | Response | +| ------------ | :----------------: | :----------------: | +| `Network-ID` | :white_check_mark: | :white_check_mark: | The optional `Network-ID` can be used to identify to which of several [parallel networks](https://xrpl.org/parallel-networks.html) the server @@ -217,10 +212,9 @@ If a server configured to join one network receives a connection request from a server configured to join another network, the request should fail with an appropriate HTTP error code (e.g. by sending an HTTP 400 "Bad Request" response). - -| Field Name | Request | Response | -|--------------------- |:-----------------: |:-----------------: | -| `Network-Time` | :white_check_mark: | :white_check_mark: | +| Field Name | Request | Response | +| -------------- | :----------------: | :----------------: | +| `Network-Time` | :white_check_mark: | :white_check_mark: | The optional `Network-Time` field reports the current [time](https://xrpl.org/basic-data-types.html#specifying-time) according to sender's internal clock. @@ -232,20 +226,18 @@ each other with an appropriate HTTP error code (e.g. by sending an HTTP 400 It is highly recommended that servers synchronize their clocks using time synchronization software. For more on this topic, please visit [ntp.org](http://www.ntp.org/). - -| Field Name | Request | Response | -|--------------------- |:-----------------: |:-----------------: | -| `Public-Key` | :heavy_check_mark: | :heavy_check_mark: | +| Field Name | Request | Response | +| ------------ | :----------------: | :----------------: | +| `Public-Key` | :heavy_check_mark: | :heavy_check_mark: | The mandatory `Public-Key` field identifies the sending server's public key, encoded in base58 using the standard encoding for node public keys. See: https://xrpl.org/base58-encodings.html - -| Field Name | Request | Response | -|--------------------- |:-----------------: |:-----------------: | -| `Server-Domain` | :white_check_mark: | :white_check_mark: | +| Field Name | Request | Response | +| --------------- | :----------------: | :----------------: | +| `Server-Domain` | :white_check_mark: | :white_check_mark: | The optional `Server-Domain` field allows a server to report the domain that it is operating under. The value is configured by the server administrator in @@ -259,10 +251,9 @@ under the specified domain and locating the public key of this server under the Sending a malformed domain will prevent a connection from being established. - -| Field Name | Request | Response | -|--------------------- |:-----------------: |:-----------------: | -| `Session-Signature` | :heavy_check_mark: | :heavy_check_mark: | +| Field Name | Request | Response | +| ------------------- | :----------------: | :----------------: | +| `Session-Signature` | :heavy_check_mark: | :heavy_check_mark: | The `Session-Signature` field is mandatory and is used to secure the peer link against certain types of attack. For more details see "Session Signature" below. @@ -272,36 +263,35 @@ should support both **Base64** and **HEX** encoding for this value. For more details on this field, please see **Session Signature** below. - -| Field Name | Request | Response | -|--------------------- |:-----------------: |:-----------------: | -| `Crawl` | :white_check_mark: | :white_check_mark: | +| Field Name | Request | Response | +| ---------- | :----------------: | :----------------: | +| `Crawl` | :white_check_mark: | :white_check_mark: | The optional `Crawl` field can be used by a server to indicate whether peers should include it in crawl reports. The field can take two values: + - **`Public`**: The server's IP address and port should be included in crawl -reports. + reports. - **`Private`**: The server's IP address and port should not be included in -crawl reports. _This is the default, if the field is omitted._ + crawl reports. _This is the default, if the field is omitted._ For more on the Peer Crawler, please visit https://xrpl.org/peer-crawler.html. - -| Field Name | Request | Response | -|--------------------- |:-----------------: |:-----------------: | -| `Closed-Ledger` | :white_check_mark: | :white_check_mark: | +| Field Name | Request | Response | +| --------------- | :----------------: | :----------------: | +| `Closed-Ledger` | :white_check_mark: | :white_check_mark: | If present, identifies the hash of the last ledger that the sending server considers to be closed. The value is encoded as **HEX**, but implementations should support both **Base64** and **HEX** encoding for this value for legacy purposes. - -| Field Name | Request | Response | -|--------------------- |:-----------------: |:-----------------: | -| `Previous-Ledger` | :white_check_mark: | :white_check_mark: | + +| Field Name | Request | Response | +| ----------------- | :----------------: | :----------------: | +| `Previous-Ledger` | :white_check_mark: | :white_check_mark: | If present, identifies the hash of the parent ledger that the sending server considers to be closed. @@ -317,7 +307,6 @@ and values in both requests and responses. Implementations should not reject requests because of the presence of fields that they do not understand. - ### Session Signature Even for SSL/TLS encrypted connections, it is possible for an attacker to mount @@ -365,8 +354,7 @@ transferred between A and B and will not be able to intelligently tamper with th message stream between Alice and Bob, although she may be still be able to inject delays or terminate the link. - -# Ripple Clustering # +# Ripple Clustering A cluster consists of more than one Ripple server under common administration that share load information, distribute cryptography @@ -374,19 +362,19 @@ operations, and provide greater response consistency. Cluster nodes are identified by their public node keys. Cluster nodes exchange information about endpoints that are imposing load upon them. -Cluster nodes share information about their internal load status. Cluster +Cluster nodes share information about their internal load status. Cluster nodes do not have to verify the cryptographic signatures on messages received from other cluster nodes. -## Configuration ## +## Configuration A server's public key can be determined from the output of the `server_info` -command. The key is in the `pubkey_node` value, and is a text string -beginning with the letter `n`. The key is maintained across runs in a +command. The key is in the `pubkey_node` value, and is a text string +beginning with the letter `n`. The key is maintained across runs in a database. Cluster members are configured in the `rippled.cfg` file under -`[cluster_nodes]`. Each member should be configured on a line beginning +`[cluster_nodes]`. Each member should be configured on a line beginning with the node public key, followed optionally by a space and a friendly name. @@ -404,23 +392,23 @@ New spokes can be added as follows: - Restart each hub, one by one - Restart the spoke -## Transaction Behavior ## +## Transaction Behavior When a transaction is received from a cluster member, several normal checks are bypassed: Signature checking is bypassed because we trust that a cluster member would -not relay a transaction with an incorrect signature. Validators may wish to +not relay a transaction with an incorrect signature. Validators may wish to disable this feature, preferring the additional load to get the additional security of having validators check each transaction. Local checks for transaction checking are also bypassed. For example, a server will not reject a transaction from a cluster peer because the fee -does not meet its current relay fee. It is preferable to keep the cluster +does not meet its current relay fee. It is preferable to keep the cluster in agreement and permit confirmation from one cluster member to more reliably indicate the transaction's acceptance by the cluster. -## Server Load Information ## +## Server Load Information Cluster members exchange information on their server's load level. The load level is essentially the amount by which the normal fee levels are multiplied @@ -431,22 +419,22 @@ fee, is the highest of its local load level, the network load level, and the cluster load level. The cluster load level is the median load level reported by a cluster member. -## Gossip ## +## Gossip Gossip is the mechanism by which cluster members share information about endpoints (typically IPv4 addresses) that are imposing unusually high load -on them. The endpoint load manager takes into account gossip to reduce the +on them. The endpoint load manager takes into account gossip to reduce the amount of load the endpoint is permitted to impose on the local server before it is warned, disconnected, or banned. Suppose, for example, that an attacker controls a large number of IP addresses, and with these, he can send sufficient requests to overload a -server. Without gossip, he could use these same addresses to overload all -the servers in a cluster. With gossip, if he chooses to use the same IP +server. Without gossip, he could use these same addresses to overload all +the servers in a cluster. With gossip, if he chooses to use the same IP address to impose load on more than one server, he will find that the amount of load he can impose before getting disconnected is much lower. -## Monitoring ## +## Monitoring The `peers` command will report on the status of the cluster. The `cluster` object will contain one entry for each member of the cluster (either configured diff --git a/src/xrpld/peerfinder/README.md b/src/xrpld/peerfinder/README.md index ab1ac4491a..a3f89fd446 100644 --- a/src/xrpld/peerfinder/README.md +++ b/src/xrpld/peerfinder/README.md @@ -1,4 +1,3 @@ - # PeerFinder ## Introduction @@ -31,23 +30,23 @@ slots_. PeerFinder has these responsibilities -* Maintain a persistent set of endpoint addresses suitable for bootstrapping +- Maintain a persistent set of endpoint addresses suitable for bootstrapping into the peer to peer overlay, ranked by relative locally observed utility. -* Send and receive protocol messages for discovery of endpoint addresses. +- Send and receive protocol messages for discovery of endpoint addresses. -* Provide endpoint addresses to new peers that need them. +- Provide endpoint addresses to new peers that need them. -* Maintain connections to a configured set of fixed peers. +- Maintain connections to a configured set of fixed peers. -* Impose limits on the various slots consumed by peer connections. +- Impose limits on the various slots consumed by peer connections. -* Initiate outgoing connection attempts to endpoint addresses to maintain the +- Initiate outgoing connection attempts to endpoint addresses to maintain the overlay connectivity and fixed peer policies. -* Verify the connectivity of neighbors who advertise inbound connection slots. +- Verify the connectivity of neighbors who advertise inbound connection slots. -* Prevent duplicate connections and connections to self. +- Prevent duplicate connections and connections to self. --- @@ -79,28 +78,28 @@ The `Config` structure defines the operational parameters of the PeerFinder. Some values come from the configuration file while others are calculated via tuned heuristics. The fields are as follows: -* `autoConnect` - +- `autoConnect` + A flag indicating whether or not the Autoconnect feature is enabled. -* `wantIncoming` +- `wantIncoming` A flag indicating whether or not the peer desires inbound connections. When this flag is turned off, a peer will not advertise itself in Endpoint messages. -* `listeningPort` +- `listeningPort` The port number to use when creating the listening socket for peer connections. -* `maxPeers` +- `maxPeers` The largest number of active peer connections to allow. This includes inbound and outbound connections, but excludes fixed and cluster peers. There is an implementation defined floor on this value. -* `outPeers` +- `outPeers` The number of automatic outbound connections that PeerFinder will maintain when the Autoconnect feature is enabled. The value is computed with fractional @@ -161,8 +160,8 @@ Endpoint messages are received from the overlay over time. The `Bootcache` stores IP addresses useful for gaining initial connections. Each address is associated with the following metadata: - -* **Valence** + +- **Valence** A signed integer which represents the number of successful consecutive connection attempts when positive, and the number of @@ -202,30 +201,30 @@ a slot. Slots have properties and state associated with them: The slot state represents the current stage of the connection as it passes through the business logic for establishing peer connections. -* `accept` +- `accept` The accept state is an initial state resulting from accepting an incoming connection request on a listening socket. The remote IP address and port are known, and a handshake is expected next. -* `connect` +- `connect` The connect state is an initial state used when actively establishing outbound connection attempts. The desired remote IP address and port are known. -* `connected` +- `connected` When an outbound connection attempt succeeds, it moves to the connected state. The handshake is initiated but not completed. -* `active` +- `active` The state becomes Active when a connection in either the Accepted or Connected state completes the handshake process, and a slot is available based on the properties. If no slot is available when the handshake completes, the socket is gracefully closed. -* `closing` +- `closing` The Closing state represents a connected socket in the process of being gracefully closed. @@ -234,13 +233,13 @@ through the business logic for establishing peer connections. Slot properties may be combined and are not mutually exclusive. -* **Inbound** +- **Inbound** An inbound slot is the condition of a socket which has accepted an incoming connection request. A connection which is not inbound is by definition outbound. -* **Fixed** +- **Fixed** A fixed slot is a desired connection to a known peer identified by IP address, usually entered manually in the configuration file. For the purpose of @@ -248,14 +247,14 @@ Slot properties may be combined and are not mutually exclusive. although only the IP address is checked to determine if the fixed peer is already connected. Fixed slots do not count towards connection limits. -* **Cluster** +- **Cluster** A cluster slot is a connection which has completed the handshake stage, whose public key matches a known public key usually entered manually in the configuration file or learned through overlay messages from other trusted peers. Cluster slots do not count towards connection limits. -* **Superpeer** (forthcoming) +- **Superpeer** (forthcoming) A superpeer slot is a connection to a peer which can accept incoming connections, meets certain resource availaibility requirements (such as @@ -279,7 +278,7 @@ Cluster slots are identified by the public key and set up during the initialization of the manager or discovered upon receipt of messages in the overlay from trusted connections. --------------------------------------------------------------------------------- +--- # Algorithms @@ -295,8 +294,8 @@ This stage is invoked when the number of active fixed connections is below the number of fixed connections specified in the configuration, and one of the following is true: -* There are eligible fixed addresses to try -* Any outbound connection attempts are in progress +- There are eligible fixed addresses to try +- Any outbound connection attempts are in progress Each fixed address is associated with a retry timer. On a fixed connection failure, the timer is reset so that the address is not tried for some amount @@ -317,8 +316,8 @@ The Livecache is invoked when Stage 1 is not active, autoconnect is enabled, and the number of active outbound connections is below the number desired. The stage remains active while: -* The Livecache has addresses to try -* Any outbound connection attempts are in progress +- The Livecache has addresses to try +- Any outbound connection attempts are in progress PeerFinder makes its best effort to exhaust addresses in the Livecache before moving on to the Bootcache, because Livecache addresses are highly likely @@ -333,7 +332,7 @@ The Bootcache is invoked when Stage 1 and Stage 2 are not active, autoconnect is enabled, and the number of active outbound connections is below the number desired. The stage remains active while: -* There are addresses in the cache that have not been tried recently. +- There are addresses in the cache that have not been tried recently. Entries in the Bootcache are ranked, with highly connectible addresses preferred over others. Connection attempts to Bootcache addresses are very likely to @@ -342,7 +341,7 @@ not have open slots. Before the remote peer closes the connection it will send a handful of addresses from its Livecache to help the new peer coming online obtain connections. --------------------------------------------------------------------------------- +--- # References @@ -352,10 +351,11 @@ Much of the work in PeerFinder was inspired by earlier work in Gnutella: _By Christopher Rohrs and Vincent Falco_ [Gnutella 0.6 Protocol:](http://rfc-gnutella.sourceforge.net/src/rfc-0_6-draft.html) Sections: -* 2.2.2 Ping (0x00) -* 2.2.3 Pong (0x01) -* 2.2.4 Use of Ping and Pong messages -* 2.2.4.1 A simple pong caching scheme -* 2.2.4.2 Other pong caching schemes + +- 2.2.2 Ping (0x00) +- 2.2.3 Pong (0x01) +- 2.2.4 Use of Ping and Pong messages +- 2.2.4.1 A simple pong caching scheme +- 2.2.4.2 Other pong caching schemes [overlay_network]: http://en.wikipedia.org/wiki/Overlay_network diff --git a/src/xrpld/rpc/README.md b/src/xrpld/rpc/README.md index cece30a3b2..5bb9655a76 100644 --- a/src/xrpld/rpc/README.md +++ b/src/xrpld/rpc/README.md @@ -2,15 +2,16 @@ ## Introduction. -By default, an RPC handler runs as an uninterrupted task on the JobQueue. This +By default, an RPC handler runs as an uninterrupted task on the JobQueue. This is fine for commands that are fast to compute but might not be acceptable for tasks that require multiple parts or are large, like a full ledger. -For this purpose, the rippled RPC handler allows *suspension with continuation* +For this purpose, the rippled RPC handler allows _suspension with continuation_ + - a request to suspend execution of the RPC response and to continue it after -some function or job has been executed. A default continuation is supplied -which simply reschedules the job on the JobQueue, or the programmer can supply -their own. + some function or job has been executed. A default continuation is supplied + which simply reschedules the job on the JobQueue, or the programmer can supply + their own. ## The classes. @@ -28,16 +29,16 @@ would prevent any other task from making forward progress when you call a `Callback`. A `Continuation` is a function that is given a `Callback` and promises to call -it later. A `Continuation` guarantees to call the `Callback` exactly once at +it later. A `Continuation` guarantees to call the `Callback` exactly once at some point in the future, but it does not have to be immediately or even in the current thread. -A `Suspend` is a function belonging to a `Coroutine`. A `Suspend` runs a +A `Suspend` is a function belonging to a `Coroutine`. A `Suspend` runs a `Continuation`, passing it a `Callback` that continues execution of the `Coroutine`. And finally, a `Coroutine` is a `std::function` which is given a -`Suspend`. This is what the RPC handler gives to the coroutine manager, +`Suspend`. This is what the RPC handler gives to the coroutine manager, expecting to get called back with a `Suspend` and to be able to start execution. ## The flow of control. diff --git a/src/xrpld/shamap/README.md b/src/xrpld/shamap/README.md index ef2d22024b..3bff74e67b 100644 --- a/src/xrpld/shamap/README.md +++ b/src/xrpld/shamap/README.md @@ -1,4 +1,4 @@ -# SHAMap Introduction # +# SHAMap Introduction March 2020 @@ -30,20 +30,20 @@ The root node is always a SHAMapInnerNode. A given `SHAMap` always stores only one of three kinds of data: - * Transactions with metadata - * Transactions without metadata, or - * Account states. +- Transactions with metadata +- Transactions without metadata, or +- Account states. So all of the leaf nodes of a particular `SHAMap` will always have a uniform type. The inner nodes carry no data other than the hash of the nodes beneath them. All nodes are owned by shared_ptrs resident in either other nodes, or in case of -the root node, a shared_ptr in the `SHAMap` itself. The use of shared_ptrs -permits more than one `SHAMap` at a time to share ownership of a node. This +the root node, a shared_ptr in the `SHAMap` itself. The use of shared_ptrs +permits more than one `SHAMap` at a time to share ownership of a node. This occurs (for example), when a copy of a `SHAMap` is made. Copies are made with the `snapShot` function as opposed to the `SHAMap` copy -constructor. See the section on `SHAMap` creation for more details about +constructor. See the section on `SHAMap` creation for more details about `snapShot`. Sequence numbers are used to further customize the node ownership strategy. See @@ -51,62 +51,62 @@ the section on sequence numbers for details on sequence numbers. ![node diagram](https://user-images.githubusercontent.com/46455409/77350005-1ef12c80-6cf9-11ea-9c8d-56410f442859.png) -## Mutability ## +## Mutability There are two different ways of building and using a `SHAMap`: - 1. A mutable `SHAMap` and - 2. An immutable `SHAMap` +1. A mutable `SHAMap` and +2. An immutable `SHAMap` The distinction here is not of the classic C++ immutable-means-unchanging sense. - An immutable `SHAMap` contains *nodes* that are immutable. Also, once a node has +An immutable `SHAMap` contains _nodes_ that are immutable. Also, once a node has been located in an immutable `SHAMap`, that node is guaranteed to persist in that `SHAMap` for the lifetime of the `SHAMap`. So, somewhat counter-intuitively, an immutable `SHAMap` may grow as new nodes are -introduced. But an immutable `SHAMap` will never get smaller (until it entirely -evaporates when it is destroyed). Nodes, once introduced to the immutable -`SHAMap`, also never change their location in memory. So nodes in an immutable +introduced. But an immutable `SHAMap` will never get smaller (until it entirely +evaporates when it is destroyed). Nodes, once introduced to the immutable +`SHAMap`, also never change their location in memory. So nodes in an immutable `SHAMap` can be handled using raw pointers (if you're careful). One consequence of this design is that an immutable `SHAMap` can never be -"trimmed". There is no way to identify unnecessary nodes in an immutable `SHAMap` -that could be removed. Once a node has been brought into the in-memory `SHAMap`, +"trimmed". There is no way to identify unnecessary nodes in an immutable `SHAMap` +that could be removed. Once a node has been brought into the in-memory `SHAMap`, that node stays in memory for the life of the `SHAMap`. Most `SHAMap`s are immutable, in the sense that they don't modify or remove their contained nodes. An example where a mutable `SHAMap` is required is when we want to apply -transactions to the last closed ledger. To do so we'd make a mutable snapshot +transactions to the last closed ledger. To do so we'd make a mutable snapshot of the state trie and then start applying transactions to it. Because the snapshot is mutable, changes to nodes in the snapshot will not affect nodes in other `SHAMap`s. An example using a immutable ledger would be when there's an open ledger and -some piece of code wishes to query the state of the ledger. In this case we +some piece of code wishes to query the state of the ledger. In this case we don't wish to change the state of the `SHAMap`, so we'd use an immutable snapshot. -## Sequence numbers ## +## Sequence numbers -Both `SHAMap`s and their nodes carry a sequence number. This is simply an +Both `SHAMap`s and their nodes carry a sequence number. This is simply an unsigned number that indicates ownership or membership, or a non-membership. -`SHAMap`s sequence numbers normally start out as 1. However when a snap-shot of +`SHAMap`s sequence numbers normally start out as 1. However when a snap-shot of a `SHAMap` is made, the copy's sequence number is 1 greater than the original. -The nodes of a `SHAMap` have their own copy of a sequence number. If the `SHAMap` +The nodes of a `SHAMap` have their own copy of a sequence number. If the `SHAMap` is mutable, meaning it can change, then all of its nodes must have the -same sequence number as the `SHAMap` itself. This enforces an invariant that none +same sequence number as the `SHAMap` itself. This enforces an invariant that none of the nodes are shared with other `SHAMap`s. When a `SHAMap` needs to have a private copy of a node, not shared by any other `SHAMap`, it first clones it and then sets the new copy to have a sequence number -equal to the `SHAMap` sequence number. The `unshareNode` is a private utility +equal to the `SHAMap` sequence number. The `unshareNode` is a private utility which automates the task of first checking if the node is already sharable, and -if so, cloning it and giving it the proper sequence number. An example case +if so, cloning it and giving it the proper sequence number. An example case where a private copy is needed is when an inner node needs to have a child -pointer altered. Any modification to a node will require a non-shared node. +pointer altered. Any modification to a node will require a non-shared node. When a `SHAMap` decides that it is safe to share a node of its own, it sets the node's sequence number to 0 (a `SHAMap` never has a sequence number of 0). This @@ -116,40 +116,40 @@ Note that other objects in rippled also have sequence numbers (e.g. ledgers). The `SHAMap` and node sequence numbers should not be confused with these other sequence numbers (no relation). -## SHAMap Creation ## +## SHAMap Creation -A `SHAMap` is usually not created from vacuum. Once an initial `SHAMap` is +A `SHAMap` is usually not created from vacuum. Once an initial `SHAMap` is constructed, later `SHAMap`s are usually created by calling snapShot(bool -isMutable) on the original `SHAMap`. The returned `SHAMap` has the expected +isMutable) on the original `SHAMap`. The returned `SHAMap` has the expected characteristics (mutable or immutable) based on the passed in flag. It is cheaper to make an immutable snapshot of a `SHAMap` than to make a mutable -snapshot. If the `SHAMap` snapshot is mutable then sharable nodes must be +snapshot. If the `SHAMap` snapshot is mutable then sharable nodes must be copied before they are placed in the mutable map. -A new `SHAMap` is created with each new ledger round. Transactions not executed +A new `SHAMap` is created with each new ledger round. Transactions not executed in the previous ledger populate the `SHAMap` for the new ledger. -## Storing SHAMap data in the database ## +## Storing SHAMap data in the database -When consensus is reached, the ledger is closed. As part of this process, the +When consensus is reached, the ledger is closed. As part of this process, the `SHAMap` is stored to the database by calling `SHAMap::flushDirty`. Both `unshare()` and `flushDirty` walk the `SHAMap` by calling -`SHAMap::walkSubTree`. As `unshare()` walks the trie, nodes are not written to +`SHAMap::walkSubTree`. As `unshare()` walks the trie, nodes are not written to the database, and as `flushDirty` walks the trie nodes are written to the database. `walkSubTree` visits every node in the trie. This process must ensure that each node is only owned by this trie, and so "unshares" as it walks each -node (from the root down). This is done in the `preFlushNode` function by -ensuring that the node has a sequence number equal to that of the `SHAMap`. If +node (from the root down). This is done in the `preFlushNode` function by +ensuring that the node has a sequence number equal to that of the `SHAMap`. If the node doesn't, it is cloned. For each inner node encountered (starting with the root node), each of the -children are inspected (from 1 to 16). For each child, if it has a non-zero -sequence number (unshareable), the child is first copied. Then if the child is -an inner node, we recurse down to that node's children. Otherwise we've found a -leaf node and that node is written to the database. A count of each leaf node -that is visited is kept. The hash of the data in the leaf node is computed at +children are inspected (from 1 to 16). For each child, if it has a non-zero +sequence number (unshareable), the child is first copied. Then if the child is +an inner node, we recurse down to that node's children. Otherwise we've found a +leaf node and that node is written to the database. A count of each leaf node +that is visited is kept. The hash of the data in the leaf node is computed at this time, and the child is reassigned back into the parent inner node just in case the COW operation created a new pointer to this leaf node. @@ -157,22 +157,22 @@ After processing each node, the node is then marked as sharable again by setting its sequence number to 0. After all of an inner node's children are processed, then its hash is updated -and the inner node is written to the database. Then this inner node is assigned +and the inner node is written to the database. Then this inner node is assigned back into it's parent node, again in case the COW operation created a new pointer to it. -## Walking a SHAMap ## +## Walking a SHAMap -The private function `SHAMap::walkTowardsKey` is a good example of *how* to walk +The private function `SHAMap::walkTowardsKey` is a good example of _how_ to walk a `SHAMap`, and the various functions that call `walkTowardsKey` are good examples -of *why* one would want to walk a `SHAMap` (e.g. `SHAMap::findKey`). +of _why_ one would want to walk a `SHAMap` (e.g. `SHAMap::findKey`). `walkTowardsKey` always starts at the root of the `SHAMap` and traverses down through the inner nodes, looking for a leaf node along a path in the trie designated by a `uint256`. -As one walks the trie, one can *optionally* keep a stack of nodes that one has -passed through. This isn't necessary for walking the trie, but many clients -will use the stack after finding the desired node. For example if one is +As one walks the trie, one can _optionally_ keep a stack of nodes that one has +passed through. This isn't necessary for walking the trie, but many clients +will use the stack after finding the desired node. For example if one is deleting a node from the trie, the stack is handy for repairing invariants in the trie after the deletion. @@ -189,10 +189,10 @@ how we use a `SHAMapNodeID` to select a "branch" (child) by indexing into a path at a given depth. While the current node is an inner node, traversing down the trie from the root -continues, unless the path indicates a child that does not exist. And in this +continues, unless the path indicates a child that does not exist. And in this case, `nullptr` is returned to indicate no leaf node along the given path -exists. Otherwise a leaf node is found and a (non-owning) pointer to it is -returned. At each step, if a stack is requested, a +exists. Otherwise a leaf node is found and a (non-owning) pointer to it is +returned. At each step, if a stack is requested, a `pair, SHAMapNodeID>` is pushed onto the stack. When a child node is found by `selectBranch`, the traversal to that node @@ -210,35 +210,35 @@ The first step consists of several attempts to find the node in various places: If the node is not found in the trie, then it is installed into the trie as part of the traversal process. -## Late-arriving Nodes ## +## Late-arriving Nodes -As we noted earlier, `SHAMap`s (even immutable ones) may grow. If a `SHAMap` is +As we noted earlier, `SHAMap`s (even immutable ones) may grow. If a `SHAMap` is searching for a node and runs into an empty spot in the trie, then the `SHAMap` -looks to see if the node exists but has not yet been made part of the map. This -operation is performed in the `SHAMap::fetchNodeNT()` method. The *NT* +looks to see if the node exists but has not yet been made part of the map. This +operation is performed in the `SHAMap::fetchNodeNT()` method. The _NT_ is this case stands for 'No Throw'. The `fetchNodeNT()` method goes through three phases: - 1. By calling `cacheLookup()` we attempt to locate the missing node in the - TreeNodeCache. The TreeNodeCache is a cache of immutable SHAMapTreeNodes +1. By calling `cacheLookup()` we attempt to locate the missing node in the + TreeNodeCache. The TreeNodeCache is a cache of immutable SHAMapTreeNodes that are shared across all `SHAMap`s. Any SHAMapLeafNode that is immutable has a sequence number of zero (sharable). When a mutable `SHAMap` is created then its SHAMapTreeNodes are - given non-zero sequence numbers (unsharable). But all nodes in the + given non-zero sequence numbers (unsharable). But all nodes in the TreeNodeCache are immutable, so if one is found here, its sequence number will be 0. - 2. If the node is not in the TreeNodeCache, we attempt to locate the node - in the historic data stored by the data base. The call to to +2. If the node is not in the TreeNodeCache, we attempt to locate the node + in the historic data stored by the data base. The call to to `fetchNodeFromDB(hash)` does that work for us. - 3. Finally if a filter exists, we check if it can supply the node. This is +3. Finally if a filter exists, we check if it can supply the node. This is typically the LedgerMaster which tracks the current ledger and ledgers in the process of closing. -## Canonicalize ## +## Canonicalize `canonicalize()` is called every time a node is introduced into the `SHAMap`. @@ -251,51 +251,50 @@ by favoring the copy already in the `TreeNodeCache`. By using `canonicalize()` we manage a thread race condition where two different threads might both recognize the lack of a SHAMapLeafNode at the same time -(during a fetch). If they both attempt to insert the node into the `SHAMap`, then +(during a fetch). If they both attempt to insert the node into the `SHAMap`, then `canonicalize` makes sure that the first node in wins and the slower thread -receives back a pointer to the node inserted by the faster thread. Recall +receives back a pointer to the node inserted by the faster thread. Recall that these two `SHAMap`s will share the same `TreeNodeCache`. -## `TreeNodeCache` ## +## `TreeNodeCache` The `TreeNodeCache` is a `std::unordered_map` keyed on the hash of the -`SHAMap` node. The stored type consists of `shared_ptr`, +`SHAMap` node. The stored type consists of `shared_ptr`, `weak_ptr`, and a time point indicating the most recent -access of this node in the cache. The time point is based on +access of this node in the cache. The time point is based on `std::chrono::steady_clock`. The container uses a cryptographically secure hash that is randomly seeded. The `TreeNodeCache` also carries with it various data used for statistics -and logging, and a target age for the contained nodes. When the target age +and logging, and a target age for the contained nodes. When the target age for a node is exceeded, and there are no more references to the node, the node is removed from the `TreeNodeCache`. -## `FullBelowCache` ## +## `FullBelowCache` This cache remembers which trie keys have all of their children resident in a -`SHAMap`. This optimizes the process of acquiring a complete trie. This is used -when creating the missing nodes list. Missing nodes are those nodes that a +`SHAMap`. This optimizes the process of acquiring a complete trie. This is used +when creating the missing nodes list. Missing nodes are those nodes that a `SHAMap` refers to but that are not stored in the local database. As a depth-first walk of a `SHAMap` is performed, if an inner node answers true to `isFullBelow()` then it is known that none of this node's children are missing -nodes, and thus that subtree does not need to be walked. These nodes are stored -in the FullBelowCache. Subsequent walks check the FullBelowCache first when +nodes, and thus that subtree does not need to be walked. These nodes are stored +in the FullBelowCache. Subsequent walks check the FullBelowCache first when encountering a node, and ignore that subtree if found. -## `SHAMapTreeNode` ## +## `SHAMapTreeNode` -This is an abstract base class for the concrete node types. It holds the +This is an abstract base class for the concrete node types. It holds the following common data: 1. A hash 2. An identifier used to perform copy-on-write operations +### `SHAMapInnerNode` -### `SHAMapInnerNode` ### - -`SHAMapInnerNode` publicly inherits directly from `SHAMapTreeNode`. It holds +`SHAMapInnerNode` publicly inherits directly from `SHAMapTreeNode`. It holds the following data: 1. Up to 16 child nodes, each held with a shared_ptr. @@ -304,36 +303,34 @@ the following data: 4. An identifier used to determine whether the map below this node is fully populated -### `SHAMapLeafNode` ### +### `SHAMapLeafNode` `SHAMapLeafNode` is an abstract class which publicly inherits directly from -`SHAMapTreeNode`. It isIt holds the +`SHAMapTreeNode`. It isIt holds the following data: 1. A shared_ptr to a const SHAMapItem. -#### `SHAMapAccountStateLeafNode` #### +#### `SHAMapAccountStateLeafNode` `SHAMapAccountStateLeafNode` is a class which publicly inherits directly from `SHAMapLeafNode`. It is used to represent entries (i.e. account objects, escrow objects, trust lines, etc.) in a state map. -#### `SHAMapTxLeafNode` #### +#### `SHAMapTxLeafNode` `SHAMapTxLeafNode` is a class which publicly inherits directly from `SHAMapLeafNode`. It is used to represent transactions in a state map. -#### `SHAMapTxPlusMetaLeafNode` #### +#### `SHAMapTxPlusMetaLeafNode` `SHAMapTxPlusMetaLeafNode` is a class which publicly inherits directly from `SHAMapLeafNode`. It is used to represent transactions along with metadata associated with this transaction in a state map. -## SHAMapItem ## +## SHAMapItem This holds the following data: -1. uint256. The hash of the data. -2. vector. The data (transactions, account info). - - +1. uint256. The hash of the data. +2. vector. The data (transactions, account info). diff --git a/tests/README.md b/tests/README.md index 0306915b3b..c4a96005e7 100644 --- a/tests/README.md +++ b/tests/README.md @@ -1,4 +1,5 @@ # Integration tests + This directory contains integration tests for the project. These tests are run against the `libxrpl` library or `rippled` binary to verify they are working as expected. From b7ed99426b078ba1f256eeba4f36feb640cfe113 Mon Sep 17 00:00:00 2001 From: Oleksandr Hrabar <40630611+Afformativ@users.noreply.github.com> Date: Mon, 11 Aug 2025 14:12:36 -0400 Subject: [PATCH 115/244] fix: Make test suite names match the directory name (#5597) This change fixes the suite names all around the test files, to make them match to the folder name in which this test files are located. Also, the RCL test files are relocated to the consensus folder, because they are testing consensus functionality. --- src/test/app/Check_test.cpp | 2 +- src/test/app/CrossingLimits_test.cpp | 2 +- src/test/app/DNS_test.cpp | 2 +- src/test/app/FeeVote_test.cpp | 2 +- src/test/app/FixNFTokenPageLinks_test.cpp | 2 +- src/test/{ledger => app}/Invariants_test.cpp | 2 +- src/test/app/LoadFeeTrack_test.cpp | 2 +- src/test/app/MPToken_test.cpp | 2 +- src/test/app/NFTokenAuth_test.cpp | 2 +- src/test/app/NFTokenBurn_test.cpp | 10 +++++----- src/test/app/NFTokenDir_test.cpp | 2 +- src/test/app/NFToken_test.cpp | 16 ++++++++-------- src/test/app/OfferStream_test.cpp | 2 +- src/test/app/Offer_test.cpp | 14 +++++++------- src/test/app/OversizeMeta_test.cpp | 8 ++++---- src/test/app/ReducedOffer_test.cpp | 2 +- src/test/app/SetAuth_test.cpp | 2 +- src/test/app/Ticket_test.cpp | 2 +- src/test/app/Vault_test.cpp | 2 +- src/test/app/tx/apply_test.cpp | 2 +- src/test/basics/Buffer_test.cpp | 2 +- src/test/basics/DetectCrash_test.cpp | 2 +- src/test/basics/Expected_test.cpp | 2 +- src/test/basics/FeeUnits_test.cpp | 2 +- src/test/basics/FileUtilities_test.cpp | 2 +- src/test/basics/IOUAmount_test.cpp | 2 +- src/test/basics/IntrusiveShared_test.cpp | 2 +- src/test/basics/KeyCache_test.cpp | 2 +- src/test/basics/Number_test.cpp | 2 +- src/test/basics/StringUtilities_test.cpp | 2 +- src/test/basics/TaggedCache_test.cpp | 2 +- src/test/basics/XRPAmount_test.cpp | 2 +- src/test/basics/base58_test.cpp | 2 +- src/test/basics/base_uint_test.cpp | 2 +- src/test/basics/join_test.cpp | 2 +- src/test/beast/IPEndpoint_test.cpp | 2 +- src/test/beast/LexicalCast_test.cpp | 2 +- src/test/beast/SemanticVersion_test.cpp | 2 +- .../beast/aged_associative_container_test.cpp | 16 ++++++++-------- src/test/beast/beast_CurrentThreadName_test.cpp | 2 +- src/test/beast/beast_Journal_test.cpp | 2 +- src/test/beast/beast_PropertyStream_test.cpp | 2 +- src/test/beast/beast_Zero_test.cpp | 2 +- src/test/beast/beast_abstract_clock_test.cpp | 2 +- .../beast/beast_basic_seconds_clock_test.cpp | 2 +- src/test/beast/beast_io_latency_probe_test.cpp | 2 +- src/test/beast/define_print.cpp | 2 +- src/test/consensus/NegativeUNL_test.cpp | 4 ++-- .../RCLCensorshipDetector_test.cpp | 2 +- src/test/csf/BasicNetwork_test.cpp | 2 +- src/test/csf/Digraph_test.cpp | 2 +- src/test/csf/Histogram_test.cpp | 2 +- src/test/csf/Scheduler_test.cpp | 2 +- src/test/json/Object_test.cpp | 2 +- src/test/json/Output_test.cpp | 2 +- src/test/json/Writer_test.cpp | 2 +- src/test/jtx/Env_test.cpp | 2 +- src/test/jtx/WSClient_test.cpp | 2 +- src/test/nodestore/Backend_test.cpp | 2 +- src/test/nodestore/Basics_test.cpp | 2 +- src/test/nodestore/Database_test.cpp | 2 +- src/test/nodestore/Timing_test.cpp | 2 +- src/test/nodestore/import_test.cpp | 2 +- src/test/nodestore/varint_test.cpp | 2 +- src/test/overlay/compression_test.cpp | 2 +- src/test/overlay/handshake_test.cpp | 2 +- src/test/overlay/reduce_relay_test.cpp | 4 ++-- src/test/overlay/tx_reduce_relay_test.cpp | 2 +- src/test/peerfinder/PeerFinder_test.cpp | 2 +- src/test/protocol/InnerObjectFormats_test.cpp | 2 +- src/test/protocol/Memo_test.cpp | 2 +- src/test/protocol/STAmount_test.cpp | 2 +- src/test/protocol/STIssue_test.cpp | 2 +- src/test/protocol/STTx_test.cpp | 4 ++-- src/test/rpc/AMMInfo_test.cpp | 2 +- src/test/rpc/AccountSet_test.cpp | 2 +- src/test/rpc/AmendmentBlocked_test.cpp | 2 +- src/test/rpc/BookChanges_test.cpp | 2 +- src/test/rpc/Book_test.cpp | 2 +- src/test/rpc/DeliveredAmount_test.cpp | 2 +- src/test/rpc/DepositAuthorized_test.cpp | 2 +- src/test/rpc/GatewayBalances_test.cpp | 2 +- src/test/rpc/GetAggregatePrice_test.cpp | 2 +- src/test/rpc/Handler_test.cpp | 2 +- src/test/rpc/JSONRPC_test.cpp | 2 +- src/test/rpc/KeyGeneration_test.cpp | 2 +- src/test/rpc/LedgerClosed_test.cpp | 2 +- src/test/rpc/LedgerData_test.cpp | 2 +- src/test/rpc/LedgerEntry_test.cpp | 4 ++-- src/test/rpc/LedgerRPC_test.cpp | 2 +- src/test/rpc/LedgerRequestRPC_test.cpp | 2 +- src/test/rpc/NoRipple_test.cpp | 2 +- src/test/rpc/OwnerInfo_test.cpp | 2 +- src/test/rpc/RPCCall_test.cpp | 2 +- src/test/rpc/RPCHelpers_test.cpp | 2 +- src/test/rpc/RPCOverload_test.cpp | 2 +- src/test/rpc/RobustTransaction_test.cpp | 2 +- src/test/rpc/Roles_test.cpp | 2 +- src/test/rpc/ServerInfo_test.cpp | 2 +- src/test/rpc/Status_test.cpp | 4 ++-- src/test/rpc/Subscribe_test.cpp | 2 +- src/test/rpc/ValidatorRPC_test.cpp | 2 +- src/test/server/Server_test.cpp | 2 +- src/test/shamap/SHAMap_test.cpp | 4 ++-- 104 files changed, 137 insertions(+), 137 deletions(-) rename src/test/{ledger => app}/Invariants_test.cpp (99%) rename src/test/{app => consensus}/RCLCensorshipDetector_test.cpp (98%) diff --git a/src/test/app/Check_test.cpp b/src/test/app/Check_test.cpp index be38b22313..e724b83535 100644 --- a/src/test/app/Check_test.cpp +++ b/src/test/app/Check_test.cpp @@ -2729,6 +2729,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE(Check, tx, ripple); +BEAST_DEFINE_TESTSUITE(Check, app, ripple); } // namespace ripple diff --git a/src/test/app/CrossingLimits_test.cpp b/src/test/app/CrossingLimits_test.cpp index 6e76936199..18fc21078c 100644 --- a/src/test/app/CrossingLimits_test.cpp +++ b/src/test/app/CrossingLimits_test.cpp @@ -530,7 +530,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE_MANUAL_PRIO(CrossingLimits, tx, ripple, 10); +BEAST_DEFINE_TESTSUITE_MANUAL_PRIO(CrossingLimits, app, ripple, 10); } // namespace test } // namespace ripple diff --git a/src/test/app/DNS_test.cpp b/src/test/app/DNS_test.cpp index 7e209deb1c..28a143e93d 100644 --- a/src/test/app/DNS_test.cpp +++ b/src/test/app/DNS_test.cpp @@ -128,7 +128,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE_MANUAL_PRIO(DNS, ripple_data, ripple, 20); +BEAST_DEFINE_TESTSUITE_MANUAL_PRIO(DNS, app, ripple, 20); } // namespace test } // namespace ripple diff --git a/src/test/app/FeeVote_test.cpp b/src/test/app/FeeVote_test.cpp index 1cf2e67f83..ba3d379219 100644 --- a/src/test/app/FeeVote_test.cpp +++ b/src/test/app/FeeVote_test.cpp @@ -100,7 +100,7 @@ class FeeVote_test : public beast::unit_test::suite } }; -BEAST_DEFINE_TESTSUITE(FeeVote, server, ripple); +BEAST_DEFINE_TESTSUITE(FeeVote, app, ripple); } // namespace test } // namespace ripple diff --git a/src/test/app/FixNFTokenPageLinks_test.cpp b/src/test/app/FixNFTokenPageLinks_test.cpp index a54e889960..4acd650a08 100644 --- a/src/test/app/FixNFTokenPageLinks_test.cpp +++ b/src/test/app/FixNFTokenPageLinks_test.cpp @@ -663,6 +663,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE(FixNFTokenPageLinks, tx, ripple); +BEAST_DEFINE_TESTSUITE(FixNFTokenPageLinks, app, ripple); } // namespace ripple diff --git a/src/test/ledger/Invariants_test.cpp b/src/test/app/Invariants_test.cpp similarity index 99% rename from src/test/ledger/Invariants_test.cpp rename to src/test/app/Invariants_test.cpp index fadd9c0eae..ae2a1c45df 100644 --- a/src/test/ledger/Invariants_test.cpp +++ b/src/test/app/Invariants_test.cpp @@ -1626,6 +1626,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE(Invariants, ledger, ripple); +BEAST_DEFINE_TESTSUITE(Invariants, app, ripple); } // namespace ripple diff --git a/src/test/app/LoadFeeTrack_test.cpp b/src/test/app/LoadFeeTrack_test.cpp index 9b0cf2fa2d..8a88e0273f 100644 --- a/src/test/app/LoadFeeTrack_test.cpp +++ b/src/test/app/LoadFeeTrack_test.cpp @@ -87,6 +87,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE(LoadFeeTrack, ripple_core, ripple); +BEAST_DEFINE_TESTSUITE(LoadFeeTrack, app, ripple); } // namespace ripple diff --git a/src/test/app/MPToken_test.cpp b/src/test/app/MPToken_test.cpp index 2cb47780ba..6470962f2f 100644 --- a/src/test/app/MPToken_test.cpp +++ b/src/test/app/MPToken_test.cpp @@ -2797,7 +2797,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE_PRIO(MPToken, tx, ripple, 2); +BEAST_DEFINE_TESTSUITE_PRIO(MPToken, app, ripple, 2); } // namespace test } // namespace ripple diff --git a/src/test/app/NFTokenAuth_test.cpp b/src/test/app/NFTokenAuth_test.cpp index 1a59dc579a..f5eedfce77 100644 --- a/src/test/app/NFTokenAuth_test.cpp +++ b/src/test/app/NFTokenAuth_test.cpp @@ -619,6 +619,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE_PRIO(NFTokenAuth, tx, ripple, 2); +BEAST_DEFINE_TESTSUITE_PRIO(NFTokenAuth, app, ripple, 2); } // namespace ripple \ No newline at end of file diff --git a/src/test/app/NFTokenBurn_test.cpp b/src/test/app/NFTokenBurn_test.cpp index 21b0a1ffd8..44c55f2b8c 100644 --- a/src/test/app/NFTokenBurn_test.cpp +++ b/src/test/app/NFTokenBurn_test.cpp @@ -1454,10 +1454,10 @@ public: } }; -BEAST_DEFINE_TESTSUITE_PRIO(NFTokenBurnBaseUtil, tx, ripple, 3); -BEAST_DEFINE_TESTSUITE_PRIO(NFTokenBurnWOfixFungTokens, tx, ripple, 3); -BEAST_DEFINE_TESTSUITE_PRIO(NFTokenBurnWOFixTokenRemint, tx, ripple, 3); -BEAST_DEFINE_TESTSUITE_PRIO(NFTokenBurnWOFixNFTPageLinks, tx, ripple, 3); -BEAST_DEFINE_TESTSUITE_PRIO(NFTokenBurnAllFeatures, tx, ripple, 3); +BEAST_DEFINE_TESTSUITE_PRIO(NFTokenBurnBaseUtil, app, ripple, 3); +BEAST_DEFINE_TESTSUITE_PRIO(NFTokenBurnWOfixFungTokens, app, ripple, 3); +BEAST_DEFINE_TESTSUITE_PRIO(NFTokenBurnWOFixTokenRemint, app, ripple, 3); +BEAST_DEFINE_TESTSUITE_PRIO(NFTokenBurnWOFixNFTPageLinks, app, ripple, 3); +BEAST_DEFINE_TESTSUITE_PRIO(NFTokenBurnAllFeatures, app, ripple, 3); } // namespace ripple diff --git a/src/test/app/NFTokenDir_test.cpp b/src/test/app/NFTokenDir_test.cpp index df40781590..a63653d8dc 100644 --- a/src/test/app/NFTokenDir_test.cpp +++ b/src/test/app/NFTokenDir_test.cpp @@ -1110,7 +1110,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE_PRIO(NFTokenDir, tx, ripple, 1); +BEAST_DEFINE_TESTSUITE_PRIO(NFTokenDir, app, ripple, 1); } // namespace ripple diff --git a/src/test/app/NFToken_test.cpp b/src/test/app/NFToken_test.cpp index b79ebf3c40..1c4314643c 100644 --- a/src/test/app/NFToken_test.cpp +++ b/src/test/app/NFToken_test.cpp @@ -8171,13 +8171,13 @@ class NFTokenAllFeatures_test : public NFTokenBaseUtil_test } }; -BEAST_DEFINE_TESTSUITE_PRIO(NFTokenBaseUtil, tx, ripple, 2); -BEAST_DEFINE_TESTSUITE_PRIO(NFTokenDisallowIncoming, tx, ripple, 2); -BEAST_DEFINE_TESTSUITE_PRIO(NFTokenWOfixV1, tx, ripple, 2); -BEAST_DEFINE_TESTSUITE_PRIO(NFTokenWOTokenRemint, tx, ripple, 2); -BEAST_DEFINE_TESTSUITE_PRIO(NFTokenWOTokenReserve, tx, ripple, 2); -BEAST_DEFINE_TESTSUITE_PRIO(NFTokenWOMintOffer, tx, ripple, 2); -BEAST_DEFINE_TESTSUITE_PRIO(NFTokenWOModify, tx, ripple, 2); -BEAST_DEFINE_TESTSUITE_PRIO(NFTokenAllFeatures, tx, ripple, 2); +BEAST_DEFINE_TESTSUITE_PRIO(NFTokenBaseUtil, app, ripple, 2); +BEAST_DEFINE_TESTSUITE_PRIO(NFTokenDisallowIncoming, app, ripple, 2); +BEAST_DEFINE_TESTSUITE_PRIO(NFTokenWOfixV1, app, ripple, 2); +BEAST_DEFINE_TESTSUITE_PRIO(NFTokenWOTokenRemint, app, ripple, 2); +BEAST_DEFINE_TESTSUITE_PRIO(NFTokenWOTokenReserve, app, ripple, 2); +BEAST_DEFINE_TESTSUITE_PRIO(NFTokenWOMintOffer, app, ripple, 2); +BEAST_DEFINE_TESTSUITE_PRIO(NFTokenWOModify, app, ripple, 2); +BEAST_DEFINE_TESTSUITE_PRIO(NFTokenAllFeatures, app, ripple, 2); } // namespace ripple diff --git a/src/test/app/OfferStream_test.cpp b/src/test/app/OfferStream_test.cpp index 691d327cd8..35f38da29a 100644 --- a/src/test/app/OfferStream_test.cpp +++ b/src/test/app/OfferStream_test.cpp @@ -39,6 +39,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE(OfferStream, tx, ripple); +BEAST_DEFINE_TESTSUITE(OfferStream, app, ripple); } // namespace ripple diff --git a/src/test/app/Offer_test.cpp b/src/test/app/Offer_test.cpp index 96f68fb2ad..aa647925fa 100644 --- a/src/test/app/Offer_test.cpp +++ b/src/test/app/Offer_test.cpp @@ -5466,13 +5466,13 @@ class Offer_manual_test : public OfferBaseUtil_test } }; -BEAST_DEFINE_TESTSUITE_PRIO(OfferBaseUtil, tx, ripple, 2); -BEAST_DEFINE_TESTSUITE_PRIO(OfferWTakerDryOffer, tx, ripple, 2); -BEAST_DEFINE_TESTSUITE_PRIO(OfferWOSmallQOffers, tx, ripple, 2); -BEAST_DEFINE_TESTSUITE_PRIO(OfferWOFillOrKill, tx, ripple, 2); -BEAST_DEFINE_TESTSUITE_PRIO(OfferWOPermDEX, tx, ripple, 2); -BEAST_DEFINE_TESTSUITE_PRIO(OfferAllFeatures, tx, ripple, 2); -BEAST_DEFINE_TESTSUITE_MANUAL_PRIO(Offer_manual, tx, ripple, 20); +BEAST_DEFINE_TESTSUITE_PRIO(OfferBaseUtil, app, ripple, 2); +BEAST_DEFINE_TESTSUITE_PRIO(OfferWTakerDryOffer, app, ripple, 2); +BEAST_DEFINE_TESTSUITE_PRIO(OfferWOSmallQOffers, app, ripple, 2); +BEAST_DEFINE_TESTSUITE_PRIO(OfferWOFillOrKill, app, ripple, 2); +BEAST_DEFINE_TESTSUITE_PRIO(OfferWOPermDEX, app, ripple, 2); +BEAST_DEFINE_TESTSUITE_PRIO(OfferAllFeatures, app, ripple, 2); +BEAST_DEFINE_TESTSUITE_MANUAL_PRIO(Offer_manual, app, ripple, 20); } // namespace test } // namespace ripple diff --git a/src/test/app/OversizeMeta_test.cpp b/src/test/app/OversizeMeta_test.cpp index f6e4466bab..fb6faa3ec3 100644 --- a/src/test/app/OversizeMeta_test.cpp +++ b/src/test/app/OversizeMeta_test.cpp @@ -61,7 +61,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE_MANUAL_PRIO(PlumpBook, tx, ripple, 5); +BEAST_DEFINE_TESTSUITE_MANUAL_PRIO(PlumpBook, app, ripple, 5); //------------------------------------------------------------------------------ @@ -76,7 +76,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(ThinBook, tx, ripple); +BEAST_DEFINE_TESTSUITE(ThinBook, app, ripple); //------------------------------------------------------------------------------ @@ -119,7 +119,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE_MANUAL_PRIO(OversizeMeta, tx, ripple, 3); +BEAST_DEFINE_TESTSUITE_MANUAL_PRIO(OversizeMeta, app, ripple, 3); //------------------------------------------------------------------------------ @@ -185,7 +185,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE_MANUAL_PRIO(FindOversizeCross, tx, ripple, 50); +BEAST_DEFINE_TESTSUITE_MANUAL_PRIO(FindOversizeCross, app, ripple, 50); } // namespace test } // namespace ripple diff --git a/src/test/app/ReducedOffer_test.cpp b/src/test/app/ReducedOffer_test.cpp index 5142aaab0e..fa2be451fa 100644 --- a/src/test/app/ReducedOffer_test.cpp +++ b/src/test/app/ReducedOffer_test.cpp @@ -800,7 +800,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE_PRIO(ReducedOffer, tx, ripple, 2); +BEAST_DEFINE_TESTSUITE_PRIO(ReducedOffer, app, ripple, 2); } // namespace test } // namespace ripple diff --git a/src/test/app/SetAuth_test.cpp b/src/test/app/SetAuth_test.cpp index 4c63560770..dfa831a72e 100644 --- a/src/test/app/SetAuth_test.cpp +++ b/src/test/app/SetAuth_test.cpp @@ -80,7 +80,7 @@ struct SetAuth_test : public beast::unit_test::suite } }; -BEAST_DEFINE_TESTSUITE(SetAuth, test, ripple); +BEAST_DEFINE_TESTSUITE(SetAuth, app, ripple); } // namespace test } // namespace ripple diff --git a/src/test/app/Ticket_test.cpp b/src/test/app/Ticket_test.cpp index f8ac64679e..70a5a48adf 100644 --- a/src/test/app/Ticket_test.cpp +++ b/src/test/app/Ticket_test.cpp @@ -1000,6 +1000,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE(Ticket, tx, ripple); +BEAST_DEFINE_TESTSUITE(Ticket, app, ripple); } // namespace ripple diff --git a/src/test/app/Vault_test.cpp b/src/test/app/Vault_test.cpp index f9036719cd..7ea38db2b1 100644 --- a/src/test/app/Vault_test.cpp +++ b/src/test/app/Vault_test.cpp @@ -3342,6 +3342,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE_PRIO(Vault, tx, ripple, 1); +BEAST_DEFINE_TESTSUITE_PRIO(Vault, app, ripple, 1); } // namespace ripple diff --git a/src/test/app/tx/apply_test.cpp b/src/test/app/tx/apply_test.cpp index 0f5ccf5a55..a754866c7f 100644 --- a/src/test/app/tx/apply_test.cpp +++ b/src/test/app/tx/apply_test.cpp @@ -87,6 +87,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE(Apply, app, ripple); +BEAST_DEFINE_TESTSUITE(Apply, tx, ripple); } // namespace ripple diff --git a/src/test/basics/Buffer_test.cpp b/src/test/basics/Buffer_test.cpp index c59805f569..065c33c12f 100644 --- a/src/test/basics/Buffer_test.cpp +++ b/src/test/basics/Buffer_test.cpp @@ -280,7 +280,7 @@ struct Buffer_test : beast::unit_test::suite } }; -BEAST_DEFINE_TESTSUITE(Buffer, ripple_basics, ripple); +BEAST_DEFINE_TESTSUITE(Buffer, basics, ripple); } // namespace test } // namespace ripple diff --git a/src/test/basics/DetectCrash_test.cpp b/src/test/basics/DetectCrash_test.cpp index 1ae761f34a..5489ae5b26 100644 --- a/src/test/basics/DetectCrash_test.cpp +++ b/src/test/basics/DetectCrash_test.cpp @@ -41,7 +41,7 @@ struct DetectCrash_test : public beast::unit_test::suite } }; -BEAST_DEFINE_TESTSUITE_MANUAL(DetectCrash, unit_test, beast); +BEAST_DEFINE_TESTSUITE_MANUAL(DetectCrash, basics, beast); } // namespace test } // namespace ripple diff --git a/src/test/basics/Expected_test.cpp b/src/test/basics/Expected_test.cpp index d022d07461..faa6b98764 100644 --- a/src/test/basics/Expected_test.cpp +++ b/src/test/basics/Expected_test.cpp @@ -243,7 +243,7 @@ struct Expected_test : beast::unit_test::suite } }; -BEAST_DEFINE_TESTSUITE(Expected, ripple_basics, ripple); +BEAST_DEFINE_TESTSUITE(Expected, basics, ripple); } // namespace test } // namespace ripple diff --git a/src/test/basics/FeeUnits_test.cpp b/src/test/basics/FeeUnits_test.cpp index 6266288896..f9be632644 100644 --- a/src/test/basics/FeeUnits_test.cpp +++ b/src/test/basics/FeeUnits_test.cpp @@ -371,7 +371,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(feeunits, ripple_basics, ripple); +BEAST_DEFINE_TESTSUITE(feeunits, basics, ripple); } // namespace test } // namespace ripple diff --git a/src/test/basics/FileUtilities_test.cpp b/src/test/basics/FileUtilities_test.cpp index 4b4cbe70c8..9071ac7231 100644 --- a/src/test/basics/FileUtilities_test.cpp +++ b/src/test/basics/FileUtilities_test.cpp @@ -79,6 +79,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE(FileUtilities, ripple_basics, ripple); +BEAST_DEFINE_TESTSUITE(FileUtilities, basics, ripple); } // namespace ripple diff --git a/src/test/basics/IOUAmount_test.cpp b/src/test/basics/IOUAmount_test.cpp index 6ba1cfd6f1..dfc48c9be7 100644 --- a/src/test/basics/IOUAmount_test.cpp +++ b/src/test/basics/IOUAmount_test.cpp @@ -274,6 +274,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE(IOUAmount, protocol, ripple); +BEAST_DEFINE_TESTSUITE(IOUAmount, basics, ripple); } // namespace ripple diff --git a/src/test/basics/IntrusiveShared_test.cpp b/src/test/basics/IntrusiveShared_test.cpp index 736cc47345..a3acc54d45 100644 --- a/src/test/basics/IntrusiveShared_test.cpp +++ b/src/test/basics/IntrusiveShared_test.cpp @@ -887,6 +887,6 @@ public: } }; // namespace tests -BEAST_DEFINE_TESTSUITE(IntrusiveShared, ripple_basics, ripple); +BEAST_DEFINE_TESTSUITE(IntrusiveShared, basics, ripple); } // namespace tests } // namespace ripple diff --git a/src/test/basics/KeyCache_test.cpp b/src/test/basics/KeyCache_test.cpp index d12dd59af0..e1d57fb3e4 100644 --- a/src/test/basics/KeyCache_test.cpp +++ b/src/test/basics/KeyCache_test.cpp @@ -93,6 +93,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE(KeyCache, common, ripple); +BEAST_DEFINE_TESTSUITE(KeyCache, basics, ripple); } // namespace ripple diff --git a/src/test/basics/Number_test.cpp b/src/test/basics/Number_test.cpp index 13d8b259d1..964cfe9614 100644 --- a/src/test/basics/Number_test.cpp +++ b/src/test/basics/Number_test.cpp @@ -743,6 +743,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE(Number, ripple_basics, ripple); +BEAST_DEFINE_TESTSUITE(Number, basics, ripple); } // namespace ripple diff --git a/src/test/basics/StringUtilities_test.cpp b/src/test/basics/StringUtilities_test.cpp index cf916c6265..b3fac22b42 100644 --- a/src/test/basics/StringUtilities_test.cpp +++ b/src/test/basics/StringUtilities_test.cpp @@ -322,6 +322,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE(StringUtilities, ripple_basics, ripple); +BEAST_DEFINE_TESTSUITE(StringUtilities, basics, ripple); } // namespace ripple diff --git a/src/test/basics/TaggedCache_test.cpp b/src/test/basics/TaggedCache_test.cpp index ce33455110..ec450e46dd 100644 --- a/src/test/basics/TaggedCache_test.cpp +++ b/src/test/basics/TaggedCache_test.cpp @@ -151,6 +151,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE(TaggedCache, common, ripple); +BEAST_DEFINE_TESTSUITE(TaggedCache, basics, ripple); } // namespace ripple diff --git a/src/test/basics/XRPAmount_test.cpp b/src/test/basics/XRPAmount_test.cpp index 08745b61e3..452ab80dda 100644 --- a/src/test/basics/XRPAmount_test.cpp +++ b/src/test/basics/XRPAmount_test.cpp @@ -344,6 +344,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE(XRPAmount, protocol, ripple); +BEAST_DEFINE_TESTSUITE(XRPAmount, basics, ripple); } // namespace ripple diff --git a/src/test/basics/base58_test.cpp b/src/test/basics/base58_test.cpp index 799f6537dc..590f19a44e 100644 --- a/src/test/basics/base58_test.cpp +++ b/src/test/basics/base58_test.cpp @@ -470,7 +470,7 @@ class base58_test : public beast::unit_test::suite } }; -BEAST_DEFINE_TESTSUITE(base58, ripple_basics, ripple); +BEAST_DEFINE_TESTSUITE(base58, basics, ripple); } // namespace test } // namespace ripple diff --git a/src/test/basics/base_uint_test.cpp b/src/test/basics/base_uint_test.cpp index 8058e0d6f0..6ee9f0901a 100644 --- a/src/test/basics/base_uint_test.cpp +++ b/src/test/basics/base_uint_test.cpp @@ -366,7 +366,7 @@ struct base_uint_test : beast::unit_test::suite } }; -BEAST_DEFINE_TESTSUITE(base_uint, ripple_basics, ripple); +BEAST_DEFINE_TESTSUITE(base_uint, basics, ripple); } // namespace test } // namespace ripple diff --git a/src/test/basics/join_test.cpp b/src/test/basics/join_test.cpp index 1b09482824..e533635bcb 100644 --- a/src/test/basics/join_test.cpp +++ b/src/test/basics/join_test.cpp @@ -99,7 +99,7 @@ struct join_test : beast::unit_test::suite } }; // namespace test -BEAST_DEFINE_TESTSUITE(join, ripple_basics, ripple); +BEAST_DEFINE_TESTSUITE(join, basics, ripple); } // namespace test } // namespace ripple diff --git a/src/test/beast/IPEndpoint_test.cpp b/src/test/beast/IPEndpoint_test.cpp index 7b3eba55f1..aed6d715d4 100644 --- a/src/test/beast/IPEndpoint_test.cpp +++ b/src/test/beast/IPEndpoint_test.cpp @@ -478,7 +478,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(IPEndpoint, net, beast); +BEAST_DEFINE_TESTSUITE(IPEndpoint, beast, beast); } // namespace IP } // namespace beast diff --git a/src/test/beast/LexicalCast_test.cpp b/src/test/beast/LexicalCast_test.cpp index 22638f27e6..686546a475 100644 --- a/src/test/beast/LexicalCast_test.cpp +++ b/src/test/beast/LexicalCast_test.cpp @@ -289,6 +289,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE(LexicalCast, beast_core, beast); +BEAST_DEFINE_TESTSUITE(LexicalCast, beast, beast); } // namespace beast diff --git a/src/test/beast/SemanticVersion_test.cpp b/src/test/beast/SemanticVersion_test.cpp index fb7a3e3e4f..af9d2808fb 100644 --- a/src/test/beast/SemanticVersion_test.cpp +++ b/src/test/beast/SemanticVersion_test.cpp @@ -280,5 +280,5 @@ public: } }; -BEAST_DEFINE_TESTSUITE(SemanticVersion, beast_core, beast); +BEAST_DEFINE_TESTSUITE(SemanticVersion, beast, beast); } // namespace beast diff --git a/src/test/beast/aged_associative_container_test.cpp b/src/test/beast/aged_associative_container_test.cpp index 586f486872..017181df22 100644 --- a/src/test/beast/aged_associative_container_test.cpp +++ b/src/test/beast/aged_associative_container_test.cpp @@ -1936,13 +1936,13 @@ public: } }; -BEAST_DEFINE_TESTSUITE(aged_set, container, beast); -BEAST_DEFINE_TESTSUITE(aged_map, container, beast); -BEAST_DEFINE_TESTSUITE(aged_multiset, container, beast); -BEAST_DEFINE_TESTSUITE(aged_multimap, container, beast); -BEAST_DEFINE_TESTSUITE(aged_unordered_set, container, beast); -BEAST_DEFINE_TESTSUITE(aged_unordered_map, container, beast); -BEAST_DEFINE_TESTSUITE(aged_unordered_multiset, container, beast); -BEAST_DEFINE_TESTSUITE(aged_unordered_multimap, container, beast); +BEAST_DEFINE_TESTSUITE(aged_set, beast, beast); +BEAST_DEFINE_TESTSUITE(aged_map, beast, beast); +BEAST_DEFINE_TESTSUITE(aged_multiset, beast, beast); +BEAST_DEFINE_TESTSUITE(aged_multimap, beast, beast); +BEAST_DEFINE_TESTSUITE(aged_unordered_set, beast, beast); +BEAST_DEFINE_TESTSUITE(aged_unordered_map, beast, beast); +BEAST_DEFINE_TESTSUITE(aged_unordered_multiset, beast, beast); +BEAST_DEFINE_TESTSUITE(aged_unordered_multimap, beast, beast); } // namespace beast diff --git a/src/test/beast/beast_CurrentThreadName_test.cpp b/src/test/beast/beast_CurrentThreadName_test.cpp index 839aaaef0a..e1de5d9ae9 100644 --- a/src/test/beast/beast_CurrentThreadName_test.cpp +++ b/src/test/beast/beast_CurrentThreadName_test.cpp @@ -86,7 +86,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(CurrentThreadName, core, beast); +BEAST_DEFINE_TESTSUITE(CurrentThreadName, beast, beast); } // namespace test } // namespace ripple diff --git a/src/test/beast/beast_Journal_test.cpp b/src/test/beast/beast_Journal_test.cpp index 99badc0fc4..13e2726c89 100644 --- a/src/test/beast/beast_Journal_test.cpp +++ b/src/test/beast/beast_Journal_test.cpp @@ -103,6 +103,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE(Journal, utility, beast); +BEAST_DEFINE_TESTSUITE(Journal, beast, beast); } // namespace beast diff --git a/src/test/beast/beast_PropertyStream_test.cpp b/src/test/beast/beast_PropertyStream_test.cpp index bbbc2fc0e0..585c7cfe6b 100644 --- a/src/test/beast/beast_PropertyStream_test.cpp +++ b/src/test/beast/beast_PropertyStream_test.cpp @@ -238,5 +238,5 @@ public: } }; -BEAST_DEFINE_TESTSUITE(PropertyStream, utility, beast); +BEAST_DEFINE_TESTSUITE(PropertyStream, beast, beast); } // namespace beast diff --git a/src/test/beast/beast_Zero_test.cpp b/src/test/beast/beast_Zero_test.cpp index 9a645334ab..64239fbe85 100644 --- a/src/test/beast/beast_Zero_test.cpp +++ b/src/test/beast/beast_Zero_test.cpp @@ -129,6 +129,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE(Zero, types, beast); +BEAST_DEFINE_TESTSUITE(Zero, beast, beast); } // namespace beast diff --git a/src/test/beast/beast_abstract_clock_test.cpp b/src/test/beast/beast_abstract_clock_test.cpp index b86afdb139..74ab833e9d 100644 --- a/src/test/beast/beast_abstract_clock_test.cpp +++ b/src/test/beast/beast_abstract_clock_test.cpp @@ -84,6 +84,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE_MANUAL(abstract_clock, chrono, beast); +BEAST_DEFINE_TESTSUITE_MANUAL(abstract_clock, beast, beast); } // namespace beast diff --git a/src/test/beast/beast_basic_seconds_clock_test.cpp b/src/test/beast/beast_basic_seconds_clock_test.cpp index c769cde07a..10e5e466f3 100644 --- a/src/test/beast/beast_basic_seconds_clock_test.cpp +++ b/src/test/beast/beast_basic_seconds_clock_test.cpp @@ -33,6 +33,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE(basic_seconds_clock, chrono, beast); +BEAST_DEFINE_TESTSUITE(basic_seconds_clock, beast, beast); } // namespace beast diff --git a/src/test/beast/beast_io_latency_probe_test.cpp b/src/test/beast/beast_io_latency_probe_test.cpp index de3c5d1d20..c72336bf27 100644 --- a/src/test/beast/beast_io_latency_probe_test.cpp +++ b/src/test/beast/beast_io_latency_probe_test.cpp @@ -238,4 +238,4 @@ public: } }; -BEAST_DEFINE_TESTSUITE(io_latency_probe, asio, beast); +BEAST_DEFINE_TESTSUITE(io_latency_probe, beast, beast); diff --git a/src/test/beast/define_print.cpp b/src/test/beast/define_print.cpp index ec86d5d7d7..eca6a70c90 100644 --- a/src/test/beast/define_print.cpp +++ b/src/test/beast/define_print.cpp @@ -46,7 +46,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE_MANUAL(print, unit_test, beast); +BEAST_DEFINE_TESTSUITE_MANUAL(print, beast, beast); } // namespace unit_test } // namespace beast diff --git a/src/test/consensus/NegativeUNL_test.cpp b/src/test/consensus/NegativeUNL_test.cpp index 56558f525f..b56b834726 100644 --- a/src/test/consensus/NegativeUNL_test.cpp +++ b/src/test/consensus/NegativeUNL_test.cpp @@ -1885,8 +1885,8 @@ class NegativeUNLVoteFilterValidations_test : public beast::unit_test::suite } }; -BEAST_DEFINE_TESTSUITE(NegativeUNL, ledger, ripple); -BEAST_DEFINE_TESTSUITE(NegativeUNLNoAmendment, ledger, ripple); +BEAST_DEFINE_TESTSUITE(NegativeUNL, consensus, ripple); +BEAST_DEFINE_TESTSUITE(NegativeUNLNoAmendment, consensus, ripple); BEAST_DEFINE_TESTSUITE(NegativeUNLVoteInternal, consensus, ripple); BEAST_DEFINE_TESTSUITE_MANUAL(NegativeUNLVoteScoreTable, consensus, ripple); diff --git a/src/test/app/RCLCensorshipDetector_test.cpp b/src/test/consensus/RCLCensorshipDetector_test.cpp similarity index 98% rename from src/test/app/RCLCensorshipDetector_test.cpp rename to src/test/consensus/RCLCensorshipDetector_test.cpp index 85ba0ab78d..1581dc81c4 100644 --- a/src/test/app/RCLCensorshipDetector_test.cpp +++ b/src/test/consensus/RCLCensorshipDetector_test.cpp @@ -98,6 +98,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE(RCLCensorshipDetector, app, ripple); +BEAST_DEFINE_TESTSUITE(RCLCensorshipDetector, consensus, ripple); } // namespace test } // namespace ripple diff --git a/src/test/csf/BasicNetwork_test.cpp b/src/test/csf/BasicNetwork_test.cpp index 4173db6502..4580dab468 100644 --- a/src/test/csf/BasicNetwork_test.cpp +++ b/src/test/csf/BasicNetwork_test.cpp @@ -146,7 +146,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(BasicNetwork, test, ripple); +BEAST_DEFINE_TESTSUITE(BasicNetwork, csf, ripple); } // namespace test } // namespace ripple diff --git a/src/test/csf/Digraph_test.cpp b/src/test/csf/Digraph_test.cpp index 0cc4be1976..df78a10733 100644 --- a/src/test/csf/Digraph_test.cpp +++ b/src/test/csf/Digraph_test.cpp @@ -92,7 +92,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(Digraph, test, ripple); +BEAST_DEFINE_TESTSUITE(Digraph, csf, ripple); } // namespace test } // namespace ripple diff --git a/src/test/csf/Histogram_test.cpp b/src/test/csf/Histogram_test.cpp index 40274c9046..60f12e9a66 100644 --- a/src/test/csf/Histogram_test.cpp +++ b/src/test/csf/Histogram_test.cpp @@ -81,7 +81,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(Histogram, test, ripple); +BEAST_DEFINE_TESTSUITE(Histogram, csf, ripple); } // namespace test } // namespace ripple diff --git a/src/test/csf/Scheduler_test.cpp b/src/test/csf/Scheduler_test.cpp index 1935e40236..c31b881b03 100644 --- a/src/test/csf/Scheduler_test.cpp +++ b/src/test/csf/Scheduler_test.cpp @@ -83,7 +83,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(Scheduler, test, ripple); +BEAST_DEFINE_TESTSUITE(Scheduler, csf, ripple); } // namespace test } // namespace ripple diff --git a/src/test/json/Object_test.cpp b/src/test/json/Object_test.cpp index 94a9e96cf6..0ad5f76307 100644 --- a/src/test/json/Object_test.cpp +++ b/src/test/json/Object_test.cpp @@ -253,6 +253,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE(JsonObject, ripple_basics, ripple); +BEAST_DEFINE_TESTSUITE(JsonObject, json, ripple); } // namespace Json diff --git a/src/test/json/Output_test.cpp b/src/test/json/Output_test.cpp index 2343843fe3..6421682b01 100644 --- a/src/test/json/Output_test.cpp +++ b/src/test/json/Output_test.cpp @@ -61,6 +61,6 @@ struct Output_test : ripple::test::TestOutputSuite } }; -BEAST_DEFINE_TESTSUITE(Output, ripple_basics, ripple); +BEAST_DEFINE_TESTSUITE(Output, json, ripple); } // namespace Json diff --git a/src/test/json/Writer_test.cpp b/src/test/json/Writer_test.cpp index c5305876ff..3739af07e1 100644 --- a/src/test/json/Writer_test.cpp +++ b/src/test/json/Writer_test.cpp @@ -212,6 +212,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE(JsonWriter, ripple_basics, ripple); +BEAST_DEFINE_TESTSUITE(JsonWriter, json, ripple); } // namespace Json diff --git a/src/test/jtx/Env_test.cpp b/src/test/jtx/Env_test.cpp index 2be20d6e33..34d9f6c0e8 100644 --- a/src/test/jtx/Env_test.cpp +++ b/src/test/jtx/Env_test.cpp @@ -944,7 +944,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(Env, app, ripple); +BEAST_DEFINE_TESTSUITE(Env, jtx, ripple); } // namespace test } // namespace ripple diff --git a/src/test/jtx/WSClient_test.cpp b/src/test/jtx/WSClient_test.cpp index 471e6ff31b..431c57558a 100644 --- a/src/test/jtx/WSClient_test.cpp +++ b/src/test/jtx/WSClient_test.cpp @@ -46,7 +46,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(WSClient, test, ripple); +BEAST_DEFINE_TESTSUITE(WSClient, jtx, ripple); } // namespace test } // namespace ripple diff --git a/src/test/nodestore/Backend_test.cpp b/src/test/nodestore/Backend_test.cpp index 488370fbe9..f161f7a0c0 100644 --- a/src/test/nodestore/Backend_test.cpp +++ b/src/test/nodestore/Backend_test.cpp @@ -121,7 +121,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(Backend, ripple_core, ripple); +BEAST_DEFINE_TESTSUITE(Backend, nodestore, ripple); } // namespace NodeStore } // namespace ripple diff --git a/src/test/nodestore/Basics_test.cpp b/src/test/nodestore/Basics_test.cpp index 62a66c9dce..d781bb0c78 100644 --- a/src/test/nodestore/Basics_test.cpp +++ b/src/test/nodestore/Basics_test.cpp @@ -85,7 +85,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(NodeStoreBasic, ripple_core, ripple); +BEAST_DEFINE_TESTSUITE(NodeStoreBasic, nodestore, ripple); } // namespace NodeStore } // namespace ripple diff --git a/src/test/nodestore/Database_test.cpp b/src/test/nodestore/Database_test.cpp index bbf6381ee5..5ecb5b94e8 100644 --- a/src/test/nodestore/Database_test.cpp +++ b/src/test/nodestore/Database_test.cpp @@ -765,7 +765,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(Database, NodeStore, ripple); +BEAST_DEFINE_TESTSUITE(Database, nodestore, ripple); } // namespace NodeStore } // namespace ripple diff --git a/src/test/nodestore/Timing_test.cpp b/src/test/nodestore/Timing_test.cpp index 3df18eee66..1ba5903cbe 100644 --- a/src/test/nodestore/Timing_test.cpp +++ b/src/test/nodestore/Timing_test.cpp @@ -779,7 +779,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE_MANUAL_PRIO(Timing, NodeStore, ripple, 1); +BEAST_DEFINE_TESTSUITE_MANUAL_PRIO(Timing, nodestore, ripple, 1); } // namespace NodeStore } // namespace ripple diff --git a/src/test/nodestore/import_test.cpp b/src/test/nodestore/import_test.cpp index d7865a20fc..11009ec5be 100644 --- a/src/test/nodestore/import_test.cpp +++ b/src/test/nodestore/import_test.cpp @@ -549,7 +549,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE_MANUAL(import, NodeStore, ripple); +BEAST_DEFINE_TESTSUITE_MANUAL(import, nodestore, ripple); #endif diff --git a/src/test/nodestore/varint_test.cpp b/src/test/nodestore/varint_test.cpp index 4f6d3c141c..f047616d79 100644 --- a/src/test/nodestore/varint_test.cpp +++ b/src/test/nodestore/varint_test.cpp @@ -72,7 +72,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(varint, NodeStore, ripple); +BEAST_DEFINE_TESTSUITE(varint, nodestore, ripple); } // namespace tests } // namespace NodeStore diff --git a/src/test/overlay/compression_test.cpp b/src/test/overlay/compression_test.cpp index 4ecbe7f232..01be43d58b 100644 --- a/src/test/overlay/compression_test.cpp +++ b/src/test/overlay/compression_test.cpp @@ -537,7 +537,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE_MANUAL(compression, ripple_data, ripple); +BEAST_DEFINE_TESTSUITE_MANUAL(compression, overlay, ripple); } // namespace test } // namespace ripple diff --git a/src/test/overlay/handshake_test.cpp b/src/test/overlay/handshake_test.cpp index 2d5155aaee..936b6e5fff 100644 --- a/src/test/overlay/handshake_test.cpp +++ b/src/test/overlay/handshake_test.cpp @@ -59,7 +59,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(handshake, ripple_data, ripple); +BEAST_DEFINE_TESTSUITE(handshake, overlay, ripple); } // namespace test } // namespace ripple diff --git a/src/test/overlay/reduce_relay_test.cpp b/src/test/overlay/reduce_relay_test.cpp index a8aafcfa06..0047454cf9 100644 --- a/src/test/overlay/reduce_relay_test.cpp +++ b/src/test/overlay/reduce_relay_test.cpp @@ -1748,8 +1748,8 @@ class reduce_relay_simulate_test : public reduce_relay_test } }; -BEAST_DEFINE_TESTSUITE(reduce_relay, ripple_data, ripple); -BEAST_DEFINE_TESTSUITE_MANUAL(reduce_relay_simulate, ripple_data, ripple); +BEAST_DEFINE_TESTSUITE(reduce_relay, overlay, ripple); +BEAST_DEFINE_TESTSUITE_MANUAL(reduce_relay_simulate, overlay, ripple); } // namespace test diff --git a/src/test/overlay/tx_reduce_relay_test.cpp b/src/test/overlay/tx_reduce_relay_test.cpp index 7a6b36ecd2..0024f2b98e 100644 --- a/src/test/overlay/tx_reduce_relay_test.cpp +++ b/src/test/overlay/tx_reduce_relay_test.cpp @@ -284,6 +284,6 @@ private: } }; -BEAST_DEFINE_TESTSUITE(tx_reduce_relay, ripple_data, ripple); +BEAST_DEFINE_TESTSUITE(tx_reduce_relay, overlay, ripple); } // namespace test } // namespace ripple diff --git a/src/test/peerfinder/PeerFinder_test.cpp b/src/test/peerfinder/PeerFinder_test.cpp index 9006b8c1c7..f35cbbdaae 100644 --- a/src/test/peerfinder/PeerFinder_test.cpp +++ b/src/test/peerfinder/PeerFinder_test.cpp @@ -367,7 +367,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(PeerFinder, PeerFinder, ripple); +BEAST_DEFINE_TESTSUITE(PeerFinder, peerfinder, ripple); } // namespace PeerFinder } // namespace ripple diff --git a/src/test/protocol/InnerObjectFormats_test.cpp b/src/test/protocol/InnerObjectFormats_test.cpp index f4d9722392..daf9548b8b 100644 --- a/src/test/protocol/InnerObjectFormats_test.cpp +++ b/src/test/protocol/InnerObjectFormats_test.cpp @@ -201,6 +201,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE(InnerObjectFormatsParsedJSON, ripple_app, ripple); +BEAST_DEFINE_TESTSUITE(InnerObjectFormatsParsedJSON, protocol, ripple); } // namespace ripple diff --git a/src/test/protocol/Memo_test.cpp b/src/test/protocol/Memo_test.cpp index a7fa846a4d..3b36cfc368 100644 --- a/src/test/protocol/Memo_test.cpp +++ b/src/test/protocol/Memo_test.cpp @@ -135,6 +135,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE(Memo, ripple_data, ripple); +BEAST_DEFINE_TESTSUITE(Memo, protocol, ripple); } // namespace ripple diff --git a/src/test/protocol/STAmount_test.cpp b/src/test/protocol/STAmount_test.cpp index d62241f2f4..5d3fdfb28a 100644 --- a/src/test/protocol/STAmount_test.cpp +++ b/src/test/protocol/STAmount_test.cpp @@ -1052,6 +1052,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE(STAmount, ripple_data, ripple); +BEAST_DEFINE_TESTSUITE(STAmount, protocol, ripple); } // namespace ripple diff --git a/src/test/protocol/STIssue_test.cpp b/src/test/protocol/STIssue_test.cpp index 6e8d37331b..6ef80cd379 100644 --- a/src/test/protocol/STIssue_test.cpp +++ b/src/test/protocol/STIssue_test.cpp @@ -159,7 +159,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(STIssue, ripple_data, ripple); +BEAST_DEFINE_TESTSUITE(STIssue, protocol, ripple); } // namespace test } // namespace ripple diff --git a/src/test/protocol/STTx_test.cpp b/src/test/protocol/STTx_test.cpp index f48bea11aa..eaa7a15212 100644 --- a/src/test/protocol/STTx_test.cpp +++ b/src/test/protocol/STTx_test.cpp @@ -1857,7 +1857,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(STTx, ripple_app, ripple); -BEAST_DEFINE_TESTSUITE(InnerObjectFormatsSerializer, ripple_app, ripple); +BEAST_DEFINE_TESTSUITE(STTx, protocol, ripple); +BEAST_DEFINE_TESTSUITE(InnerObjectFormatsSerializer, protocol, ripple); } // namespace ripple diff --git a/src/test/rpc/AMMInfo_test.cpp b/src/test/rpc/AMMInfo_test.cpp index a6e866b1c8..d5bfc8e83d 100644 --- a/src/test/rpc/AMMInfo_test.cpp +++ b/src/test/rpc/AMMInfo_test.cpp @@ -369,7 +369,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(AMMInfo, app, ripple); +BEAST_DEFINE_TESTSUITE(AMMInfo, rpc, ripple); } // namespace test } // namespace ripple diff --git a/src/test/rpc/AccountSet_test.cpp b/src/test/rpc/AccountSet_test.cpp index 5c0ca89305..3615a715cd 100644 --- a/src/test/rpc/AccountSet_test.cpp +++ b/src/test/rpc/AccountSet_test.cpp @@ -597,6 +597,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE_PRIO(AccountSet, app, ripple, 1); +BEAST_DEFINE_TESTSUITE_PRIO(AccountSet, rpc, ripple, 1); } // namespace ripple diff --git a/src/test/rpc/AmendmentBlocked_test.cpp b/src/test/rpc/AmendmentBlocked_test.cpp index bea9cdf57d..4708a873f6 100644 --- a/src/test/rpc/AmendmentBlocked_test.cpp +++ b/src/test/rpc/AmendmentBlocked_test.cpp @@ -255,6 +255,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE(AmendmentBlocked, app, ripple); +BEAST_DEFINE_TESTSUITE(AmendmentBlocked, rpc, ripple); } // namespace ripple diff --git a/src/test/rpc/BookChanges_test.cpp b/src/test/rpc/BookChanges_test.cpp index 1f7b6775f2..41b26415af 100644 --- a/src/test/rpc/BookChanges_test.cpp +++ b/src/test/rpc/BookChanges_test.cpp @@ -143,7 +143,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(BookChanges, app, ripple); +BEAST_DEFINE_TESTSUITE(BookChanges, rpc, ripple); } // namespace test } // namespace ripple diff --git a/src/test/rpc/Book_test.cpp b/src/test/rpc/Book_test.cpp index e885762644..7177ab847c 100644 --- a/src/test/rpc/Book_test.cpp +++ b/src/test/rpc/Book_test.cpp @@ -2019,7 +2019,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE_PRIO(Book, app, ripple, 1); +BEAST_DEFINE_TESTSUITE_PRIO(Book, rpc, ripple, 1); } // namespace test } // namespace ripple diff --git a/src/test/rpc/DeliveredAmount_test.cpp b/src/test/rpc/DeliveredAmount_test.cpp index d084f92f25..c1aa77695d 100644 --- a/src/test/rpc/DeliveredAmount_test.cpp +++ b/src/test/rpc/DeliveredAmount_test.cpp @@ -422,7 +422,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(DeliveredAmount, app, ripple); +BEAST_DEFINE_TESTSUITE(DeliveredAmount, rpc, ripple); } // namespace test } // namespace ripple diff --git a/src/test/rpc/DepositAuthorized_test.cpp b/src/test/rpc/DepositAuthorized_test.cpp index 647f9e25ed..326766221a 100644 --- a/src/test/rpc/DepositAuthorized_test.cpp +++ b/src/test/rpc/DepositAuthorized_test.cpp @@ -638,7 +638,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(DepositAuthorized, app, ripple); +BEAST_DEFINE_TESTSUITE(DepositAuthorized, rpc, ripple); } // namespace test } // namespace ripple diff --git a/src/test/rpc/GatewayBalances_test.cpp b/src/test/rpc/GatewayBalances_test.cpp index 691f32317e..a13e5bc20c 100644 --- a/src/test/rpc/GatewayBalances_test.cpp +++ b/src/test/rpc/GatewayBalances_test.cpp @@ -262,7 +262,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(GatewayBalances, app, ripple); +BEAST_DEFINE_TESTSUITE(GatewayBalances, rpc, ripple); } // namespace test } // namespace ripple diff --git a/src/test/rpc/GetAggregatePrice_test.cpp b/src/test/rpc/GetAggregatePrice_test.cpp index 4e9b950690..9d007f7f52 100644 --- a/src/test/rpc/GetAggregatePrice_test.cpp +++ b/src/test/rpc/GetAggregatePrice_test.cpp @@ -346,7 +346,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(GetAggregatePrice, app, ripple); +BEAST_DEFINE_TESTSUITE(GetAggregatePrice, rpc, ripple); } // namespace oracle } // namespace jtx diff --git a/src/test/rpc/Handler_test.cpp b/src/test/rpc/Handler_test.cpp index 8eb0c8d01d..c58d29252b 100644 --- a/src/test/rpc/Handler_test.cpp +++ b/src/test/rpc/Handler_test.cpp @@ -128,6 +128,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE_MANUAL(Handler, test, ripple); +BEAST_DEFINE_TESTSUITE_MANUAL(Handler, rpc, ripple); } // namespace ripple::test diff --git a/src/test/rpc/JSONRPC_test.cpp b/src/test/rpc/JSONRPC_test.cpp index 1612d1b455..31bdacfb9c 100644 --- a/src/test/rpc/JSONRPC_test.cpp +++ b/src/test/rpc/JSONRPC_test.cpp @@ -2936,7 +2936,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(JSONRPC, ripple_app, ripple); +BEAST_DEFINE_TESTSUITE(JSONRPC, rpc, ripple); } // namespace RPC } // namespace ripple diff --git a/src/test/rpc/KeyGeneration_test.cpp b/src/test/rpc/KeyGeneration_test.cpp index 07ebd93dd3..3ea6a07e94 100644 --- a/src/test/rpc/KeyGeneration_test.cpp +++ b/src/test/rpc/KeyGeneration_test.cpp @@ -894,7 +894,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(WalletPropose, ripple_basics, ripple); +BEAST_DEFINE_TESTSUITE(WalletPropose, rpc, ripple); } // namespace RPC } // namespace ripple diff --git a/src/test/rpc/LedgerClosed_test.cpp b/src/test/rpc/LedgerClosed_test.cpp index fc7b3a7dac..37d6b1e393 100644 --- a/src/test/rpc/LedgerClosed_test.cpp +++ b/src/test/rpc/LedgerClosed_test.cpp @@ -68,6 +68,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE(LedgerClosed, app, ripple); +BEAST_DEFINE_TESTSUITE(LedgerClosed, rpc, ripple); } // namespace ripple diff --git a/src/test/rpc/LedgerData_test.cpp b/src/test/rpc/LedgerData_test.cpp index 54f51255d1..d57b33013a 100644 --- a/src/test/rpc/LedgerData_test.cpp +++ b/src/test/rpc/LedgerData_test.cpp @@ -523,6 +523,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE_PRIO(LedgerData, app, ripple, 1); +BEAST_DEFINE_TESTSUITE_PRIO(LedgerData, rpc, ripple, 1); } // namespace ripple diff --git a/src/test/rpc/LedgerEntry_test.cpp b/src/test/rpc/LedgerEntry_test.cpp index b5cab9d13c..89cb7b72eb 100644 --- a/src/test/rpc/LedgerEntry_test.cpp +++ b/src/test/rpc/LedgerEntry_test.cpp @@ -2711,8 +2711,8 @@ public: } }; -BEAST_DEFINE_TESTSUITE(LedgerEntry, app, ripple); -BEAST_DEFINE_TESTSUITE(LedgerEntry_XChain, app, ripple); +BEAST_DEFINE_TESTSUITE(LedgerEntry, rpc, ripple); +BEAST_DEFINE_TESTSUITE(LedgerEntry_XChain, rpc, ripple); } // namespace test } // namespace ripple diff --git a/src/test/rpc/LedgerRPC_test.cpp b/src/test/rpc/LedgerRPC_test.cpp index 9ba9c9a655..9309fbdd6c 100644 --- a/src/test/rpc/LedgerRPC_test.cpp +++ b/src/test/rpc/LedgerRPC_test.cpp @@ -785,7 +785,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(LedgerRPC, app, ripple); +BEAST_DEFINE_TESTSUITE(LedgerRPC, rpc, ripple); } // namespace test } // namespace ripple diff --git a/src/test/rpc/LedgerRequestRPC_test.cpp b/src/test/rpc/LedgerRequestRPC_test.cpp index b34233991e..03be9fb29b 100644 --- a/src/test/rpc/LedgerRequestRPC_test.cpp +++ b/src/test/rpc/LedgerRequestRPC_test.cpp @@ -376,7 +376,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(LedgerRequestRPC, app, ripple); +BEAST_DEFINE_TESTSUITE(LedgerRequestRPC, rpc, ripple); } // namespace RPC } // namespace ripple diff --git a/src/test/rpc/NoRipple_test.cpp b/src/test/rpc/NoRipple_test.cpp index 926de31e83..93457ada8c 100644 --- a/src/test/rpc/NoRipple_test.cpp +++ b/src/test/rpc/NoRipple_test.cpp @@ -299,7 +299,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(NoRipple, app, ripple); +BEAST_DEFINE_TESTSUITE(NoRipple, rpc, ripple); } // namespace test } // namespace ripple diff --git a/src/test/rpc/OwnerInfo_test.cpp b/src/test/rpc/OwnerInfo_test.cpp index 15bbad42e3..0c517058ca 100644 --- a/src/test/rpc/OwnerInfo_test.cpp +++ b/src/test/rpc/OwnerInfo_test.cpp @@ -219,6 +219,6 @@ public: } }; -BEAST_DEFINE_TESTSUITE(OwnerInfo, app, ripple); +BEAST_DEFINE_TESTSUITE(OwnerInfo, rpc, ripple); } // namespace ripple diff --git a/src/test/rpc/RPCCall_test.cpp b/src/test/rpc/RPCCall_test.cpp index be0f32b5ce..b73f2e11a0 100644 --- a/src/test/rpc/RPCCall_test.cpp +++ b/src/test/rpc/RPCCall_test.cpp @@ -5948,7 +5948,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(RPCCall, app, ripple); +BEAST_DEFINE_TESTSUITE(RPCCall, rpc, ripple); } // namespace test } // namespace ripple diff --git a/src/test/rpc/RPCHelpers_test.cpp b/src/test/rpc/RPCHelpers_test.cpp index 531649b8af..1716301dae 100644 --- a/src/test/rpc/RPCHelpers_test.cpp +++ b/src/test/rpc/RPCHelpers_test.cpp @@ -87,7 +87,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(RPCHelpers, app, ripple); +BEAST_DEFINE_TESTSUITE(RPCHelpers, rpc, ripple); } // namespace test } // namespace ripple diff --git a/src/test/rpc/RPCOverload_test.cpp b/src/test/rpc/RPCOverload_test.cpp index efe69496fb..35755eff20 100644 --- a/src/test/rpc/RPCOverload_test.cpp +++ b/src/test/rpc/RPCOverload_test.cpp @@ -87,7 +87,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(RPCOverload, app, ripple); +BEAST_DEFINE_TESTSUITE(RPCOverload, rpc, ripple); } // namespace test } // namespace ripple diff --git a/src/test/rpc/RobustTransaction_test.cpp b/src/test/rpc/RobustTransaction_test.cpp index aa53bd6e0a..bfd9e6251b 100644 --- a/src/test/rpc/RobustTransaction_test.cpp +++ b/src/test/rpc/RobustTransaction_test.cpp @@ -510,7 +510,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(RobustTransaction, app, ripple); +BEAST_DEFINE_TESTSUITE(RobustTransaction, rpc, ripple); } // namespace test } // namespace ripple diff --git a/src/test/rpc/Roles_test.cpp b/src/test/rpc/Roles_test.cpp index 5ac8654330..949d84c1e5 100644 --- a/src/test/rpc/Roles_test.cpp +++ b/src/test/rpc/Roles_test.cpp @@ -389,7 +389,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(Roles, app, ripple); +BEAST_DEFINE_TESTSUITE(Roles, rpc, ripple); } // namespace test diff --git a/src/test/rpc/ServerInfo_test.cpp b/src/test/rpc/ServerInfo_test.cpp index 21312ef8ff..b5780635cd 100644 --- a/src/test/rpc/ServerInfo_test.cpp +++ b/src/test/rpc/ServerInfo_test.cpp @@ -308,7 +308,7 @@ admin = 127.0.0.1 } }; -BEAST_DEFINE_TESTSUITE(ServerInfo, app, ripple); +BEAST_DEFINE_TESTSUITE(ServerInfo, rpc, ripple); } // namespace test } // namespace ripple diff --git a/src/test/rpc/Status_test.cpp b/src/test/rpc/Status_test.cpp index e37fe2dabd..884e684fb3 100644 --- a/src/test/rpc/Status_test.cpp +++ b/src/test/rpc/Status_test.cpp @@ -94,7 +94,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(codeString, Status, RPC); +BEAST_DEFINE_TESTSUITE(codeString, rpc, RPC); class fillJson_test : public beast::unit_test::suite { @@ -218,7 +218,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(fillJson, Status, RPC); +BEAST_DEFINE_TESTSUITE(fillJson, rpc, RPC); } // namespace RPC } // namespace ripple diff --git a/src/test/rpc/Subscribe_test.cpp b/src/test/rpc/Subscribe_test.cpp index e0db79bf53..9ed02fa532 100644 --- a/src/test/rpc/Subscribe_test.cpp +++ b/src/test/rpc/Subscribe_test.cpp @@ -1608,7 +1608,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(Subscribe, app, ripple); +BEAST_DEFINE_TESTSUITE(Subscribe, rpc, ripple); } // namespace test } // namespace ripple diff --git a/src/test/rpc/ValidatorRPC_test.cpp b/src/test/rpc/ValidatorRPC_test.cpp index d03cbbf841..d139a662de 100644 --- a/src/test/rpc/ValidatorRPC_test.cpp +++ b/src/test/rpc/ValidatorRPC_test.cpp @@ -594,7 +594,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(ValidatorRPC, app, ripple); +BEAST_DEFINE_TESTSUITE(ValidatorRPC, rpc, ripple); } // namespace test } // namespace ripple diff --git a/src/test/server/Server_test.cpp b/src/test/server/Server_test.cpp index f8895ae7e9..fab271ff1c 100644 --- a/src/test/server/Server_test.cpp +++ b/src/test/server/Server_test.cpp @@ -533,7 +533,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(Server, http, ripple); +BEAST_DEFINE_TESTSUITE(Server, server, ripple); } // namespace test } // namespace ripple diff --git a/src/test/shamap/SHAMap_test.cpp b/src/test/shamap/SHAMap_test.cpp index 3197e0944d..1a15310b58 100644 --- a/src/test/shamap/SHAMap_test.cpp +++ b/src/test/shamap/SHAMap_test.cpp @@ -402,7 +402,7 @@ class SHAMapPathProof_test : public beast::unit_test::suite } }; -BEAST_DEFINE_TESTSUITE(SHAMap, ripple_app, ripple); -BEAST_DEFINE_TESTSUITE(SHAMapPathProof, ripple_app, ripple); +BEAST_DEFINE_TESTSUITE(SHAMap, shamap, ripple); +BEAST_DEFINE_TESTSUITE(SHAMapPathProof, shamap, ripple); } // namespace tests } // namespace ripple From c9d73b613596c50e6ab5c2d3719d4bba5536d631 Mon Sep 17 00:00:00 2001 From: Vito Tumas <5780819+Tapanito@users.noreply.github.com> Date: Mon, 11 Aug 2025 20:52:47 +0200 Subject: [PATCH 116/244] fix: Improve logging of the reason to refuse a peer connection (#5664) Currently, all peer connection rejections are logged with the reason "slots full". This is inaccurate, as the PeerFinder can also reject connections if they are a duplicate. This change updates the logging logic to correctly report the specific reason (full or duplicate) for a rejected peer connection, providing more accurate diagnostic information. --- src/xrpld/overlay/detail/ConnectAttempt.cpp | 2 +- src/xrpld/overlay/detail/OverlayImpl.cpp | 7 ++--- src/xrpld/peerfinder/PeerfinderManager.h | 32 +++++++++++++++++++++ 3 files changed, 36 insertions(+), 5 deletions(-) diff --git a/src/xrpld/overlay/detail/ConnectAttempt.cpp b/src/xrpld/overlay/detail/ConnectAttempt.cpp index 84fbd36d32..61049579c5 100644 --- a/src/xrpld/overlay/detail/ConnectAttempt.cpp +++ b/src/xrpld/overlay/detail/ConnectAttempt.cpp @@ -379,7 +379,7 @@ ConnectAttempt::processResponse() auto const result = overlay_.peerFinder().activate( slot_, publicKey, static_cast(member)); if (result != PeerFinder::Result::success) - return fail("Outbound slots full"); + return fail("Outbound " + std::string(to_string(result))); auto const peer = std::make_shared( app_, diff --git a/src/xrpld/overlay/detail/OverlayImpl.cpp b/src/xrpld/overlay/detail/OverlayImpl.cpp index 53b4cad646..874f951f56 100644 --- a/src/xrpld/overlay/detail/OverlayImpl.cpp +++ b/src/xrpld/overlay/detail/OverlayImpl.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -41,8 +42,6 @@ #include -#include "xrpld/overlay/detail/TrafficCount.h" - namespace ripple { namespace CrawlOptions { @@ -269,8 +268,8 @@ OverlayImpl::onHandoff( if (result != PeerFinder::Result::success) { m_peerFinder->on_closed(slot); - JLOG(journal.debug()) - << "Peer " << remote_endpoint << " redirected, slots full"; + JLOG(journal.debug()) << "Peer " << remote_endpoint + << " redirected, " << to_string(result); handoff.moved = false; handoff.response = makeRedirectResponse( slot, request, remote_endpoint.address()); diff --git a/src/xrpld/peerfinder/PeerfinderManager.h b/src/xrpld/peerfinder/PeerfinderManager.h index a670fb8780..f399251c38 100644 --- a/src/xrpld/peerfinder/PeerfinderManager.h +++ b/src/xrpld/peerfinder/PeerfinderManager.h @@ -28,6 +28,8 @@ #include +#include + namespace ripple { namespace PeerFinder { @@ -136,6 +138,36 @@ using Endpoints = std::vector; /** Possible results from activating a slot. */ enum class Result { duplicate, full, success }; +/** + * @brief Converts a `Result` enum value to its string representation. + * + * This function provides a human-readable string for a given `Result` enum, + * which is useful for logging, debugging, or displaying status messages. + * + * @param result The `Result` enum value to convert. + * @return A `std::string_view` representing the enum value. Returns "unknown" + * if the enum value is not explicitly handled. + * + * @note This function returns a `std::string_view` for performance. + * A `std::string` would need to allocate memory on the heap and copy the + * string literal into it every time the function is called. + */ +inline std::string_view +to_string(Result result) noexcept +{ + switch (result) + { + case Result::success: + return "success"; + case Result::duplicate: + return "duplicate connection"; + case Result::full: + return "slots full"; + } + + return "unknown"; +} + /** Maintains a set of IP addresses used for getting into the network. */ class Manager : public beast::PropertyStream::Source { From da82e526136bf05d7a60dd6262f6f3186e820db5 Mon Sep 17 00:00:00 2001 From: Michael Legleux Date: Tue, 12 Aug 2025 13:40:34 -0700 Subject: [PATCH 117/244] Set version to 2.6.0-rc1 --- src/libxrpl/protocol/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libxrpl/protocol/BuildInfo.cpp b/src/libxrpl/protocol/BuildInfo.cpp index 4cb6fbfd36..fb4bc086f6 100644 --- a/src/libxrpl/protocol/BuildInfo.cpp +++ b/src/libxrpl/protocol/BuildInfo.cpp @@ -36,7 +36,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "2.5.0" +char const* const versionString = "2.6.0-rc1" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From c9a723128a16ce8b78469aa53fc3d334cae0aa1d Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Wed, 13 Aug 2025 12:23:36 +0100 Subject: [PATCH 118/244] Fix BUILD.md instruction (#5676) --- BUILD.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/BUILD.md b/BUILD.md index 958bf19b8c..c8ec31f826 100644 --- a/BUILD.md +++ b/BUILD.md @@ -147,8 +147,9 @@ git sparse-checkout set recipes/snappy git sparse-checkout add recipes/soci git fetch origin master git checkout master -conan export --version 1.1.10 external/recipes/snappy -conan export --version 4.0.3 external/recipes/soci +conan export --version 1.1.10 recipes/snappy/all +conan export --version 4.0.3 recipes/soci/all +rm -rf .git ``` In the case we switch to a newer version of a dependency that still requires a From 28eec6ce1b606adff043885cd1f9d85124baa4a5 Mon Sep 17 00:00:00 2001 From: "Elliot." Date: Wed, 13 Aug 2025 11:00:22 -0700 Subject: [PATCH 119/244] Update .git-blame-ignore-revs for #5657 (#5675) Now that #5657 has been squashed and merged, we can add its commit hash to .git-blame-ignore-revs. --- .git-blame-ignore-revs | 1 + 1 file changed, 1 insertion(+) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index a72fc4afd8..a9805e705c 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -11,3 +11,4 @@ b9d007813378ad0ff45660dc07285b823c7e9855 fe9a5365b8a52d4acc42eb27369247e6f238a4f9 9a93577314e6a8d4b4a8368cc9d2b15a5d8303e8 552377c76f55b403a1c876df873a23d780fcc81c +97f0747e103f13e26e45b731731059b32f7679ac From de33a6a241ef76b049b440afbfce20a31101c186 Mon Sep 17 00:00:00 2001 From: Bart Date: Thu, 14 Aug 2025 06:07:09 -0400 Subject: [PATCH 120/244] fix: Add -Wno-deprecated-declarations for Clang only (#5680) This change adds `-Wno-deprecated-declarations` for Clang only (not for GCC) builds in `cmake/RippledCompiler.cmake`. --- cmake/RippledCompiler.cmake | 1 + include/xrpl/basics/Expected.h | 10 ---------- include/xrpl/beast/hash/hash_append.h | 27 +-------------------------- 3 files changed, 2 insertions(+), 36 deletions(-) diff --git a/cmake/RippledCompiler.cmake b/cmake/RippledCompiler.cmake index 30058fd503..bc3a62a48c 100644 --- a/cmake/RippledCompiler.cmake +++ b/cmake/RippledCompiler.cmake @@ -94,6 +94,7 @@ else () INTERFACE -Wall -Wdeprecated + $<$:-Wno-deprecated-declarations> $<$:-Wextra -Wno-unused-parameter> $<$:-Werror> -fstack-protector diff --git a/include/xrpl/basics/Expected.h b/include/xrpl/basics/Expected.h index d2440f63ab..9afb160d9d 100644 --- a/include/xrpl/basics/Expected.h +++ b/include/xrpl/basics/Expected.h @@ -22,18 +22,8 @@ #include -#if defined(__clang__) -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wdeprecated" -#pragma clang diagnostic ignored "-Wdeprecated-declarations" -#endif - #include -#if defined(__clang__) -#pragma clang diagnostic pop -#endif - #include namespace ripple { diff --git a/include/xrpl/beast/hash/hash_append.h b/include/xrpl/beast/hash/hash_append.h index e113567ab1..a4ffeaf30c 100644 --- a/include/xrpl/beast/hash/hash_append.h +++ b/include/xrpl/beast/hash/hash_append.h @@ -24,35 +24,10 @@ #include #include -/* - -Workaround for overzealous clang warning, which trips on libstdc++ headers - - In file included from - /usr/lib/gcc/x86_64-linux-gnu/12/../../../../include/c++/12/bits/stl_algo.h:61: - /usr/lib/gcc/x86_64-linux-gnu/12/../../../../include/c++/12/bits/stl_tempbuf.h:263:8: - error: 'get_temporary_buffer> *>>' is deprecated - [-Werror,-Wdeprecated-declarations] 263 | - std::get_temporary_buffer(_M_original_len)); - ^ -*/ - -#if defined(__clang__) -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wdeprecated" -#pragma clang diagnostic ignored "-Wdeprecated-declarations" -#endif - -#include - -#if defined(__clang__) -#pragma clang diagnostic pop -#endif - #include #include #include +#include #include #include #include From a14551b1517382ae0626471fb852ee55333edf4e Mon Sep 17 00:00:00 2001 From: "Elliot." Date: Thu, 14 Aug 2025 09:28:01 -0700 Subject: [PATCH 121/244] fix: Change log to debug level for AMM offer retrieval and IOU payment check (#5686) Reduce log noise by changing two log statements from error/warn level to debug level. These logs occur during normal operation when AMM offers are not available or when IOU authorization checks fail, which are expected scenarios that don't require an elevated log level. --- src/xrpld/app/paths/detail/AMMLiquidity.cpp | 2 +- src/xrpld/app/paths/detail/DirectStep.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/xrpld/app/paths/detail/AMMLiquidity.cpp b/src/xrpld/app/paths/detail/AMMLiquidity.cpp index 83894b2e76..f24e67c7e1 100644 --- a/src/xrpld/app/paths/detail/AMMLiquidity.cpp +++ b/src/xrpld/app/paths/detail/AMMLiquidity.cpp @@ -248,7 +248,7 @@ AMMLiquidity::getOffer( return offer; } - JLOG(j_.error()) << "AMMLiquidity::getOffer, failed " + JLOG(j_.debug()) << "AMMLiquidity::getOffer, no valid offer " << ammContext_.multiPath() << " " << ammContext_.curIters() << " " << (clobQuality ? clobQuality->rate() : STAmount{}) diff --git a/src/xrpld/app/paths/detail/DirectStep.cpp b/src/xrpld/app/paths/detail/DirectStep.cpp index 4dc9cbf20d..5e62a289a3 100644 --- a/src/xrpld/app/paths/detail/DirectStep.cpp +++ b/src/xrpld/app/paths/detail/DirectStep.cpp @@ -423,7 +423,7 @@ DirectIPaymentStep::check( !((*sleLine)[sfFlags] & authField) && (*sleLine)[sfBalance] == beast::zero) { - JLOG(j_.warn()) + JLOG(j_.debug()) << "DirectStepI: can't receive IOUs from issuer without auth." << " src: " << src_; return terNO_AUTH; From d8628d481d0c439635c6ae7d26ca24f7e9c115b9 Mon Sep 17 00:00:00 2001 From: Bart Date: Thu, 14 Aug 2025 16:17:37 -0400 Subject: [PATCH 122/244] docs: Updates list of maintainers and reviewers (#5687) --- CONTRIBUTING.md | 38 +++++++++++++++++--------------------- 1 file changed, 17 insertions(+), 21 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fb29de5b7e..b0ae72ae54 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -384,9 +384,8 @@ Maintainers are users with maintain or admin access to the repo. - [bthomee](https://github.com/bthomee) (Ripple) - [intelliot](https://github.com/intelliot) (Ripple) - [JoelKatz](https://github.com/JoelKatz) (Ripple) -- [nixer89](https://github.com/nixer89) (XRP Ledger Foundation) -- [RichardAH](https://github.com/RichardAH) (XRP Ledger Foundation) -- [Silkjaer](https://github.com/Silkjaer) (XRP Ledger Foundation) +- [legleux](https://github.com/legleux) (Ripple) +- [mankins](https://github.com/mankins) (XRP Ledger Foundation) - [WietseWind](https://github.com/WietseWind) (XRPL Labs + XRP Ledger Foundation) - [ximinez](https://github.com/ximinez) (Ripple) @@ -395,27 +394,24 @@ Maintainers are users with maintain or admin access to the repo. Code Reviewers are developers who have the ability to review, approve, and in some cases merge source code changes. -- [HowardHinnant](https://github.com/HowardHinnant) (Ripple) -- [scottschurr](https://github.com/scottschurr) (Ripple) -- [seelabs](https://github.com/seelabs) (Ripple) -- [Ed Hennis](https://github.com/ximinez) (Ripple) -- [mvadari](https://github.com/mvadari) (Ripple) -- [thejohnfreeman](https://github.com/thejohnfreeman) (Ripple) +- [a1q123456](https://github.com/a1q123456) (Ripple) - [Bronek](https://github.com/Bronek) (Ripple) -- [manojsdoshi](https://github.com/manojsdoshi) (Ripple) -- [godexsoft](https://github.com/godexsoft) (Ripple) -- [mDuo13](https://github.com/mDuo13) (Ripple) -- [ckniffen](https://github.com/ckniffen) (Ripple) -- [arihantkothari](https://github.com/arihantkothari) (Ripple) -- [pwang200](https://github.com/pwang200) (Ripple) -- [sophiax851](https://github.com/sophiax851) (Ripple) -- [shawnxie999](https://github.com/shawnxie999) (Ripple) -- [gregtatcam](https://github.com/gregtatcam) (Ripple) -- [mtrippled](https://github.com/mtrippled) (Ripple) +- [bthomee](https://github.com/bthomee) (Ripple) - [ckeshava](https://github.com/ckeshava) (Ripple) -- [nbougalis](https://github.com/nbougalis) None -- [RichardAH](https://github.com/RichardAH) (XRPL Labs + XRP Ledger Foundation) - [dangell7](https://github.com/dangell7) (XRPL Labs) +- [godexsoft](https://github.com/godexsoft) (Ripple) +- [gregtatcam](https://github.com/gregtatcam) (Ripple) +- [kuznetsss](https://github.com/kuznetsss) (Ripple) +- [lmaisons](https://github.com/lmaisons) (Ripple) +- [mathbunnyru](https://github.com/mathbunnyru) (Ripple) +- [mvadari](https://github.com/mvadari) (Ripple) +- [oleks-rip](https://github.com/oleks-rip) (Ripple) +- [PeterChen13579](https://github.com/PeterChen13579) (Ripple) +- [pwang200](https://github.com/pwang200) (Ripple) +- [q73zhao](https://github.com/q73zhao) (Ripple) +- [shawnxie999](https://github.com/shawnxie999) (Ripple) +- [Tapanito](https://github.com/Tapanito) (Ripple) +- [ximinez](https://github.com/ximinez) (Ripple) Developers not on this list are able and encouraged to submit feedback on pending code changes (open pull requests). From fb89213d4db171c4bb88703d9f4dee4ea98360a5 Mon Sep 17 00:00:00 2001 From: Michael Legleux Date: Fri, 15 Aug 2025 14:50:35 -0700 Subject: [PATCH 123/244] Set version to 2.6.0-rc2 --- src/libxrpl/protocol/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libxrpl/protocol/BuildInfo.cpp b/src/libxrpl/protocol/BuildInfo.cpp index fb4bc086f6..0d7ea1a7ca 100644 --- a/src/libxrpl/protocol/BuildInfo.cpp +++ b/src/libxrpl/protocol/BuildInfo.cpp @@ -36,7 +36,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "2.6.0-rc1" +char const* const versionString = "2.6.0-rc2" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From ceb0ce5634e57eaa5207bd13da8705cb0117ebc3 Mon Sep 17 00:00:00 2001 From: Jingchen Date: Sat, 16 Aug 2025 00:27:13 +0100 Subject: [PATCH 124/244] refactor: Decouple net from xrpld and move rpc-related classes to the rpc folder (#5477) As a step of modularisation, this change moves code from `xrpld` to `libxrpl`. --- Builds/levelization/results/loops.txt | 9 -------- Builds/levelization/results/ordering.txt | 13 ++++++----- cmake/RippledCore.cmake | 10 +++++++++ cmake/RippledInstall.cmake | 1 + {src/xrpld => include/xrpl}/net/AutoSocket.h | 0 {src/xrpld => include/xrpl}/net/HTTPClient.h | 9 +++++--- .../xrpl}/net/HTTPClientSSLContext.h | 21 +++++++++--------- .../xrpl}/net/RegisterSSLCerts.h | 0 .../net/detail => libxrpl/net}/HTTPClient.cpp | 15 ++++++++----- .../net}/RegisterSSLCerts.cpp | 2 +- .../net/images/interrupt_sequence.png | Bin src/{xrpld => libxrpl}/net/images/states.png | Bin src/test/jtx/impl/Env.cpp | 10 ++++++--- src/test/jtx/impl/utility.cpp | 2 +- src/test/rpc/RPCCall_test.cpp | 2 +- src/xrpld/app/ledger/BookListeners.h | 2 +- src/xrpld/app/main/GRPCServer.h | 2 +- src/xrpld/app/main/Main.cpp | 2 +- src/xrpld/app/misc/NetworkOPs.h | 2 +- src/xrpld/app/misc/detail/WorkSSL.cpp | 7 +++++- src/xrpld/app/misc/detail/WorkSSL.h | 2 +- src/xrpld/app/paths/PathRequest.h | 2 +- src/xrpld/core/detail/Config.cpp | 5 +++-- src/xrpld/rpc/Context.h | 2 +- src/xrpld/{net => rpc}/InfoSub.h | 0 src/xrpld/{net => rpc}/RPCCall.h | 0 src/xrpld/{net => rpc}/RPCSub.h | 2 +- src/xrpld/{net => rpc}/detail/InfoSub.cpp | 2 +- src/xrpld/{net => rpc}/detail/RPCCall.cpp | 13 +++++------ src/xrpld/rpc/detail/RPCHandler.cpp | 2 +- src/xrpld/{net => rpc}/detail/RPCSub.cpp | 4 ++-- src/xrpld/rpc/detail/WSInfoSub.h | 2 +- src/xrpld/rpc/handlers/Subscribe.cpp | 2 +- 33 files changed, 83 insertions(+), 64 deletions(-) rename {src/xrpld => include/xrpl}/net/AutoSocket.h (100%) rename {src/xrpld => include/xrpl}/net/HTTPClient.h (93%) rename {src/xrpld => include/xrpl}/net/HTTPClientSSLContext.h (92%) rename {src/xrpld => include/xrpl}/net/RegisterSSLCerts.h (100%) rename src/{xrpld/net/detail => libxrpl/net}/HTTPClient.cpp (98%) rename src/{xrpld/net/detail => libxrpl/net}/RegisterSSLCerts.cpp (98%) rename src/{xrpld => libxrpl}/net/images/interrupt_sequence.png (100%) rename src/{xrpld => libxrpl}/net/images/states.png (100%) rename src/xrpld/{net => rpc}/InfoSub.h (100%) rename src/xrpld/{net => rpc}/RPCCall.h (100%) rename src/xrpld/{net => rpc}/RPCSub.h (98%) rename src/xrpld/{net => rpc}/detail/InfoSub.cpp (99%) rename src/xrpld/{net => rpc}/detail/RPCCall.cpp (99%) rename src/xrpld/{net => rpc}/detail/RPCSub.cpp (99%) diff --git a/Builds/levelization/results/loops.txt b/Builds/levelization/results/loops.txt index df1d273f93..0bbd65a9e4 100644 --- a/Builds/levelization/results/loops.txt +++ b/Builds/levelization/results/loops.txt @@ -10,9 +10,6 @@ Loop: xrpld.app xrpld.core Loop: xrpld.app xrpld.ledger xrpld.app > xrpld.ledger -Loop: xrpld.app xrpld.net - xrpld.app > xrpld.net - Loop: xrpld.app xrpld.overlay xrpld.overlay > xrpld.app @@ -25,15 +22,9 @@ Loop: xrpld.app xrpld.rpc Loop: xrpld.app xrpld.shamap xrpld.app > xrpld.shamap -Loop: xrpld.core xrpld.net - xrpld.net > xrpld.core - Loop: xrpld.core xrpld.perflog xrpld.perflog == xrpld.core -Loop: xrpld.net xrpld.rpc - xrpld.rpc ~= xrpld.net - Loop: xrpld.overlay xrpld.rpc xrpld.rpc ~= xrpld.overlay diff --git a/Builds/levelization/results/ordering.txt b/Builds/levelization/results/ordering.txt index ce22d8edb0..bf2d1db693 100644 --- a/Builds/levelization/results/ordering.txt +++ b/Builds/levelization/results/ordering.txt @@ -2,6 +2,8 @@ libxrpl.basics > xrpl.basics libxrpl.crypto > xrpl.basics libxrpl.json > xrpl.basics libxrpl.json > xrpl.json +libxrpl.net > xrpl.basics +libxrpl.net > xrpl.net libxrpl.protocol > xrpl.basics libxrpl.protocol > xrpl.json libxrpl.protocol > xrpl.protocol @@ -62,9 +64,9 @@ test.jtx > xrpl.basics test.jtx > xrpld.app test.jtx > xrpld.core test.jtx > xrpld.ledger -test.jtx > xrpld.net test.jtx > xrpld.rpc test.jtx > xrpl.json +test.jtx > xrpl.net test.jtx > xrpl.protocol test.jtx > xrpl.resource test.jtx > xrpl.server @@ -109,7 +111,6 @@ test.rpc > test.toplevel test.rpc > xrpl.basics test.rpc > xrpld.app test.rpc > xrpld.core -test.rpc > xrpld.net test.rpc > xrpld.overlay test.rpc > xrpld.rpc test.rpc > xrpl.json @@ -134,6 +135,7 @@ test.toplevel > xrpl.json test.unit_test > xrpl.basics tests.libxrpl > xrpl.basics xrpl.json > xrpl.basics +xrpl.net > xrpl.basics xrpl.protocol > xrpl.basics xrpl.protocol > xrpl.json xrpl.resource > xrpl.basics @@ -149,6 +151,7 @@ xrpld.app > xrpld.consensus xrpld.app > xrpld.nodestore xrpld.app > xrpld.perflog xrpld.app > xrpl.json +xrpld.app > xrpl.net xrpld.app > xrpl.protocol xrpld.app > xrpl.resource xrpld.conditions > xrpl.basics @@ -158,14 +161,11 @@ xrpld.consensus > xrpl.json xrpld.consensus > xrpl.protocol xrpld.core > xrpl.basics xrpld.core > xrpl.json +xrpld.core > xrpl.net xrpld.core > xrpl.protocol xrpld.ledger > xrpl.basics xrpld.ledger > xrpl.json xrpld.ledger > xrpl.protocol -xrpld.net > xrpl.basics -xrpld.net > xrpl.json -xrpld.net > xrpl.protocol -xrpld.net > xrpl.resource xrpld.nodestore > xrpl.basics xrpld.nodestore > xrpld.core xrpld.nodestore > xrpld.unity @@ -189,6 +189,7 @@ xrpld.rpc > xrpld.core xrpld.rpc > xrpld.ledger xrpld.rpc > xrpld.nodestore xrpld.rpc > xrpl.json +xrpld.rpc > xrpl.net xrpld.rpc > xrpl.protocol xrpld.rpc > xrpl.resource xrpld.rpc > xrpl.server diff --git a/cmake/RippledCore.cmake b/cmake/RippledCore.cmake index 1ef5a4ad68..83b27e6c4f 100644 --- a/cmake/RippledCore.cmake +++ b/cmake/RippledCore.cmake @@ -99,6 +99,15 @@ target_link_libraries(xrpl.libxrpl.protocol PUBLIC add_module(xrpl resource) target_link_libraries(xrpl.libxrpl.resource PUBLIC xrpl.libxrpl.protocol) +# Level 06 +add_module(xrpl net) +target_link_libraries(xrpl.libxrpl.net PUBLIC + xrpl.libxrpl.basics + xrpl.libxrpl.json + xrpl.libxrpl.protocol + xrpl.libxrpl.resource +) + add_module(xrpl server) target_link_libraries(xrpl.libxrpl.server PUBLIC xrpl.libxrpl.protocol) @@ -121,6 +130,7 @@ target_link_modules(xrpl PUBLIC protocol resource server + net ) # All headers in libxrpl are in modules. diff --git a/cmake/RippledInstall.cmake b/cmake/RippledInstall.cmake index 9ce288d785..f32781f596 100644 --- a/cmake/RippledInstall.cmake +++ b/cmake/RippledInstall.cmake @@ -19,6 +19,7 @@ install ( xrpl.libxrpl.protocol xrpl.libxrpl.resource xrpl.libxrpl.server + xrpl.libxrpl.net xrpl.libxrpl antithesis-sdk-cpp EXPORT RippleExports diff --git a/src/xrpld/net/AutoSocket.h b/include/xrpl/net/AutoSocket.h similarity index 100% rename from src/xrpld/net/AutoSocket.h rename to include/xrpl/net/AutoSocket.h diff --git a/src/xrpld/net/HTTPClient.h b/include/xrpl/net/HTTPClient.h similarity index 93% rename from src/xrpld/net/HTTPClient.h rename to include/xrpl/net/HTTPClient.h index a11b885290..ef295e8e5a 100644 --- a/src/xrpld/net/HTTPClient.h +++ b/include/xrpl/net/HTTPClient.h @@ -20,9 +20,8 @@ #ifndef RIPPLE_NET_HTTPCLIENT_H_INCLUDED #define RIPPLE_NET_HTTPCLIENT_H_INCLUDED -#include - #include +#include #include #include @@ -44,7 +43,11 @@ public: static constexpr auto maxClientHeaderBytes = kilobytes(32); static void - initializeSSLContext(Config const& config, beast::Journal j); + initializeSSLContext( + std::string const& sslVerifyDir, + std::string const& sslVerifyFile, + bool sslVerify, + beast::Journal j); static void get(bool bSSL, diff --git a/src/xrpld/net/HTTPClientSSLContext.h b/include/xrpl/net/HTTPClientSSLContext.h similarity index 92% rename from src/xrpld/net/HTTPClientSSLContext.h rename to include/xrpl/net/HTTPClientSSLContext.h index 68f91b18b0..2f7d6c005e 100644 --- a/src/xrpld/net/HTTPClientSSLContext.h +++ b/include/xrpl/net/HTTPClientSSLContext.h @@ -20,11 +20,10 @@ #ifndef RIPPLE_NET_HTTPCLIENTSSLCONTEXT_H_INCLUDED #define RIPPLE_NET_HTTPCLIENTSSLCONTEXT_H_INCLUDED -#include -#include - #include #include +#include +#include #include #include @@ -37,31 +36,33 @@ class HTTPClientSSLContext { public: explicit HTTPClientSSLContext( - Config const& config, + std::string const& sslVerifyDir, + std::string const& sslVerifyFile, + bool sslVerify, beast::Journal j, boost::asio::ssl::context_base::method method = boost::asio::ssl::context::sslv23) - : ssl_context_{method}, j_(j), verify_{config.SSL_VERIFY} + : ssl_context_{method}, j_(j), verify_{sslVerify} { boost::system::error_code ec; - if (config.SSL_VERIFY_FILE.empty()) + if (sslVerifyFile.empty()) { registerSSLCerts(ssl_context_, ec, j_); - if (ec && config.SSL_VERIFY_DIR.empty()) + if (ec && sslVerifyDir.empty()) Throw(boost::str( boost::format("Failed to set_default_verify_paths: %s") % ec.message())); } else { - ssl_context_.load_verify_file(config.SSL_VERIFY_FILE); + ssl_context_.load_verify_file(sslVerifyFile); } - if (!config.SSL_VERIFY_DIR.empty()) + if (!sslVerifyDir.empty()) { - ssl_context_.add_verify_path(config.SSL_VERIFY_DIR, ec); + ssl_context_.add_verify_path(sslVerifyDir, ec); if (ec) Throw(boost::str( diff --git a/src/xrpld/net/RegisterSSLCerts.h b/include/xrpl/net/RegisterSSLCerts.h similarity index 100% rename from src/xrpld/net/RegisterSSLCerts.h rename to include/xrpl/net/RegisterSSLCerts.h diff --git a/src/xrpld/net/detail/HTTPClient.cpp b/src/libxrpl/net/HTTPClient.cpp similarity index 98% rename from src/xrpld/net/detail/HTTPClient.cpp rename to src/libxrpl/net/HTTPClient.cpp index 901237e1e3..f7d540750a 100644 --- a/src/xrpld/net/detail/HTTPClient.cpp +++ b/src/libxrpl/net/HTTPClient.cpp @@ -17,12 +17,11 @@ */ //============================================================================== -#include -#include -#include - #include #include +#include +#include +#include #include #include @@ -36,9 +35,13 @@ namespace ripple { static std::optional httpClientSSLContext; void -HTTPClient::initializeSSLContext(Config const& config, beast::Journal j) +HTTPClient::initializeSSLContext( + std::string const& sslVerifyDir, + std::string const& sslVerifyFile, + bool sslVerify, + beast::Journal j) { - httpClientSSLContext.emplace(config, j); + httpClientSSLContext.emplace(sslVerifyDir, sslVerifyFile, sslVerify, j); } //------------------------------------------------------------------------------ diff --git a/src/xrpld/net/detail/RegisterSSLCerts.cpp b/src/libxrpl/net/RegisterSSLCerts.cpp similarity index 98% rename from src/xrpld/net/detail/RegisterSSLCerts.cpp rename to src/libxrpl/net/RegisterSSLCerts.cpp index 5a710323ad..cd5bd631aa 100644 --- a/src/xrpld/net/detail/RegisterSSLCerts.cpp +++ b/src/libxrpl/net/RegisterSSLCerts.cpp @@ -17,7 +17,7 @@ */ //============================================================================== -#include +#include #if BOOST_OS_WINDOWS #include diff --git a/src/xrpld/net/images/interrupt_sequence.png b/src/libxrpl/net/images/interrupt_sequence.png similarity index 100% rename from src/xrpld/net/images/interrupt_sequence.png rename to src/libxrpl/net/images/interrupt_sequence.png diff --git a/src/xrpld/net/images/states.png b/src/libxrpl/net/images/states.png similarity index 100% rename from src/xrpld/net/images/states.png rename to src/libxrpl/net/images/states.png diff --git a/src/test/jtx/impl/Env.cpp b/src/test/jtx/impl/Env.cpp index 7c17687eee..46558a188a 100644 --- a/src/test/jtx/impl/Env.cpp +++ b/src/test/jtx/impl/Env.cpp @@ -30,12 +30,12 @@ #include #include -#include -#include +#include #include #include #include +#include #include #include #include @@ -74,7 +74,11 @@ Env::AppBundle::AppBundle( auto timeKeeper_ = std::make_unique(); timeKeeper = timeKeeper_.get(); // Hack so we don't have to call Config::setup - HTTPClient::initializeSSLContext(*config, debugLog()); + HTTPClient::initializeSSLContext( + config->SSL_VERIFY_DIR, + config->SSL_VERIFY_FILE, + config->SSL_VERIFY, + debugLog()); owned = make_Application( std::move(config), std::move(logs), std::move(timeKeeper_)); app = owned.get(); diff --git a/src/test/jtx/impl/utility.cpp b/src/test/jtx/impl/utility.cpp index afa7ee8f35..27b45a32cb 100644 --- a/src/test/jtx/impl/utility.cpp +++ b/src/test/jtx/impl/utility.cpp @@ -19,7 +19,7 @@ #include -#include +#include #include #include diff --git a/src/test/rpc/RPCCall_test.cpp b/src/test/rpc/RPCCall_test.cpp index b73f2e11a0..d22896388d 100644 --- a/src/test/rpc/RPCCall_test.cpp +++ b/src/test/rpc/RPCCall_test.cpp @@ -18,7 +18,7 @@ #include #include -#include +#include #include #include diff --git a/src/xrpld/app/ledger/BookListeners.h b/src/xrpld/app/ledger/BookListeners.h index ca58bf3058..5522ad3ec0 100644 --- a/src/xrpld/app/ledger/BookListeners.h +++ b/src/xrpld/app/ledger/BookListeners.h @@ -20,7 +20,7 @@ #ifndef RIPPLE_APP_LEDGER_BOOKLISTENERS_H_INCLUDED #define RIPPLE_APP_LEDGER_BOOKLISTENERS_H_INCLUDED -#include +#include #include diff --git a/src/xrpld/app/main/GRPCServer.h b/src/xrpld/app/main/GRPCServer.h index 5ed4ba8454..c48138cd92 100644 --- a/src/xrpld/app/main/GRPCServer.h +++ b/src/xrpld/app/main/GRPCServer.h @@ -22,9 +22,9 @@ #include #include -#include #include #include +#include #include #include #include diff --git a/src/xrpld/app/main/Main.cpp b/src/xrpld/app/main/Main.cpp index 19c8c9910d..3fdf362dd9 100644 --- a/src/xrpld/app/main/Main.cpp +++ b/src/xrpld/app/main/Main.cpp @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include diff --git a/src/xrpld/app/misc/NetworkOPs.h b/src/xrpld/app/misc/NetworkOPs.h index b8da7d7dc7..639cd782b7 100644 --- a/src/xrpld/app/misc/NetworkOPs.h +++ b/src/xrpld/app/misc/NetworkOPs.h @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include diff --git a/src/xrpld/app/misc/detail/WorkSSL.cpp b/src/xrpld/app/misc/detail/WorkSSL.cpp index 0285f43502..0d6801ab84 100644 --- a/src/xrpld/app/misc/detail/WorkSSL.cpp +++ b/src/xrpld/app/misc/detail/WorkSSL.cpp @@ -33,7 +33,12 @@ WorkSSL::WorkSSL( bool lastStatus, callback_type cb) : WorkBase(host, path, port, ios, lastEndpoint, lastStatus, cb) - , context_(config, j, boost::asio::ssl::context::tlsv12_client) + , context_( + config.SSL_VERIFY_DIR, + config.SSL_VERIFY_FILE, + config.SSL_VERIFY, + j, + boost::asio::ssl::context::tlsv12_client) , stream_(socket_, context_.context()) { auto ec = context_.preConnectVerify(stream_, host_); diff --git a/src/xrpld/app/misc/detail/WorkSSL.h b/src/xrpld/app/misc/detail/WorkSSL.h index 2d423a9e50..6a310986e7 100644 --- a/src/xrpld/app/misc/detail/WorkSSL.h +++ b/src/xrpld/app/misc/detail/WorkSSL.h @@ -22,9 +22,9 @@ #include #include -#include #include +#include #include #include diff --git a/src/xrpld/app/paths/PathRequest.h b/src/xrpld/app/paths/PathRequest.h index aea0e564fb..854a0f6129 100644 --- a/src/xrpld/app/paths/PathRequest.h +++ b/src/xrpld/app/paths/PathRequest.h @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include diff --git a/src/xrpld/core/detail/Config.cpp b/src/xrpld/core/detail/Config.cpp index 1a07109b74..95147e23d5 100644 --- a/src/xrpld/core/detail/Config.cpp +++ b/src/xrpld/core/detail/Config.cpp @@ -19,7 +19,6 @@ #include #include -#include #include #include @@ -27,6 +26,7 @@ #include #include #include +#include #include #include @@ -409,7 +409,8 @@ Config::setup( legacy("database_path", boost::filesystem::absolute(dataDir).string()); } - HTTPClient::initializeSSLContext(*this, j_); + HTTPClient::initializeSSLContext( + this->SSL_VERIFY_DIR, this->SSL_VERIFY_FILE, this->SSL_VERIFY, j_); if (RUN_STANDALONE) LEDGER_HISTORY = 0; diff --git a/src/xrpld/rpc/Context.h b/src/xrpld/rpc/Context.h index 32a7cca653..0b1a8dfbf5 100644 --- a/src/xrpld/rpc/Context.h +++ b/src/xrpld/rpc/Context.h @@ -21,7 +21,7 @@ #define RIPPLE_RPC_CONTEXT_H_INCLUDED #include -#include +#include #include #include diff --git a/src/xrpld/net/InfoSub.h b/src/xrpld/rpc/InfoSub.h similarity index 100% rename from src/xrpld/net/InfoSub.h rename to src/xrpld/rpc/InfoSub.h diff --git a/src/xrpld/net/RPCCall.h b/src/xrpld/rpc/RPCCall.h similarity index 100% rename from src/xrpld/net/RPCCall.h rename to src/xrpld/rpc/RPCCall.h diff --git a/src/xrpld/net/RPCSub.h b/src/xrpld/rpc/RPCSub.h similarity index 98% rename from src/xrpld/net/RPCSub.h rename to src/xrpld/rpc/RPCSub.h index 9730ca2dec..0f106be018 100644 --- a/src/xrpld/net/RPCSub.h +++ b/src/xrpld/rpc/RPCSub.h @@ -21,7 +21,7 @@ #define RIPPLE_NET_RPCSUB_H_INCLUDED #include -#include +#include #include diff --git a/src/xrpld/net/detail/InfoSub.cpp b/src/xrpld/rpc/detail/InfoSub.cpp similarity index 99% rename from src/xrpld/net/detail/InfoSub.cpp rename to src/xrpld/rpc/detail/InfoSub.cpp index 9f394cf08e..de00f518a5 100644 --- a/src/xrpld/net/detail/InfoSub.cpp +++ b/src/xrpld/rpc/detail/InfoSub.cpp @@ -17,7 +17,7 @@ */ //============================================================================== -#include +#include namespace ripple { diff --git a/src/xrpld/net/detail/RPCCall.cpp b/src/xrpld/rpc/detail/RPCCall.cpp similarity index 99% rename from src/xrpld/net/detail/RPCCall.cpp rename to src/xrpld/rpc/detail/RPCCall.cpp index 0cc3cb6618..aa8c80fff7 100644 --- a/src/xrpld/net/detail/RPCCall.cpp +++ b/src/xrpld/rpc/detail/RPCCall.cpp @@ -17,12 +17,8 @@ */ //============================================================================== -#include -#include -#include -#include +#include #include -#include #include #include @@ -33,7 +29,10 @@ #include #include #include +#include +#include #include +#include #include #include #include @@ -160,7 +159,7 @@ private: std::string const& strPk, TokenType type = TokenType::AccountPublic) { - if (parseBase58(type, strPk)) + if (parseBase58(type, strPk)) return true; auto pkHex = strUnHex(strPk); @@ -1508,7 +1507,7 @@ rpcClient( } else { - ServerHandler::Setup setup; + ripple::ServerHandler::Setup setup; try { setup = setup_ServerHandler( diff --git a/src/xrpld/rpc/detail/RPCHandler.cpp b/src/xrpld/rpc/detail/RPCHandler.cpp index c261666eb9..b2e4c2c440 100644 --- a/src/xrpld/rpc/detail/RPCHandler.cpp +++ b/src/xrpld/rpc/detail/RPCHandler.cpp @@ -24,9 +24,9 @@ #include #include #include -#include #include #include +#include #include #include #include diff --git a/src/xrpld/net/detail/RPCSub.cpp b/src/xrpld/rpc/detail/RPCSub.cpp similarity index 99% rename from src/xrpld/net/detail/RPCSub.cpp rename to src/xrpld/rpc/detail/RPCSub.cpp index 3f0c923e13..966ad6df4b 100644 --- a/src/xrpld/net/detail/RPCSub.cpp +++ b/src/xrpld/rpc/detail/RPCSub.cpp @@ -17,8 +17,8 @@ */ //============================================================================== -#include -#include +#include +#include #include #include diff --git a/src/xrpld/rpc/detail/WSInfoSub.h b/src/xrpld/rpc/detail/WSInfoSub.h index 1652617455..030eac318e 100644 --- a/src/xrpld/rpc/detail/WSInfoSub.h +++ b/src/xrpld/rpc/detail/WSInfoSub.h @@ -20,7 +20,7 @@ #ifndef RIPPLE_RPC_WSINFOSUB_H #define RIPPLE_RPC_WSINFOSUB_H -#include +#include #include #include diff --git a/src/xrpld/rpc/handlers/Subscribe.cpp b/src/xrpld/rpc/handlers/Subscribe.cpp index e71d973b7b..c089f0255d 100644 --- a/src/xrpld/rpc/handlers/Subscribe.cpp +++ b/src/xrpld/rpc/handlers/Subscribe.cpp @@ -21,8 +21,8 @@ #include #include #include -#include #include +#include #include #include From dc1caa41b2577f5bfd4601aa3590a324f1be8a34 Mon Sep 17 00:00:00 2001 From: Bart Date: Mon, 18 Aug 2025 10:21:43 -0400 Subject: [PATCH 125/244] refactor: Revamp CI workflows (#5661) This change refactors the CI workflows to leverage the new CI Docker images for Debian, Red Hat, and Ubuntu. --- .github/actions/build-deps/action.yml | 71 +++ .github/actions/build-test/action.yml | 102 +++++ .github/actions/build/action.yml | 34 -- .github/actions/dependencies/action.yml | 38 -- .../scripts}/levelization/README.md | 10 +- .../scripts/levelization/generate.sh | 4 +- .../scripts}/levelization/results/loops.txt | 0 .../levelization/results/ordering.txt | 0 .github/scripts/strategy-matrix/generate.py | 153 +++++++ .github/scripts/strategy-matrix/linux.json | 170 +++++++ .github/scripts/strategy-matrix/macos.json | 29 ++ .github/scripts/strategy-matrix/windows.json | 26 ++ .github/workflows/build-test.yml | 191 ++++++++ .github/workflows/check-format.yml | 112 +++++ .github/workflows/check-levelization.yml | 46 ++ .github/workflows/check-missing-commits.yml | 62 +++ .github/workflows/clang-format.yml | 64 --- .github/workflows/doxygen.yml | 37 -- .github/workflows/levelization.yml | 53 --- .github/workflows/libxrpl.yml | 91 ---- .github/workflows/macos.yml | 112 ----- .github/workflows/missing-commits.yml | 60 --- .github/workflows/nix.yml | 422 ------------------ .github/workflows/notify-clio.yml | 80 ++++ .github/workflows/on-pr.yml | 133 ++++++ .github/workflows/on-trigger.yml | 140 ++++++ .github/workflows/publish-docs.yml | 58 +++ .github/workflows/windows.yml | 106 ----- .gitignore | 7 +- .prettierignore | 2 + CONTRIBUTING.md | 2 +- README.md | 2 +- conan/global.conf | 9 + external/README.md | 3 +- 34 files changed, 1397 insertions(+), 1032 deletions(-) create mode 100644 .github/actions/build-deps/action.yml create mode 100644 .github/actions/build-test/action.yml delete mode 100644 .github/actions/build/action.yml delete mode 100644 .github/actions/dependencies/action.yml rename {Builds => .github/scripts}/levelization/README.md (95%) rename Builds/levelization/levelization.sh => .github/scripts/levelization/generate.sh (98%) rename {Builds => .github/scripts}/levelization/results/loops.txt (100%) rename {Builds => .github/scripts}/levelization/results/ordering.txt (100%) create mode 100644 .github/scripts/strategy-matrix/generate.py create mode 100644 .github/scripts/strategy-matrix/linux.json create mode 100644 .github/scripts/strategy-matrix/macos.json create mode 100644 .github/scripts/strategy-matrix/windows.json create mode 100644 .github/workflows/build-test.yml create mode 100644 .github/workflows/check-format.yml create mode 100644 .github/workflows/check-levelization.yml create mode 100644 .github/workflows/check-missing-commits.yml delete mode 100644 .github/workflows/clang-format.yml delete mode 100644 .github/workflows/doxygen.yml delete mode 100644 .github/workflows/levelization.yml delete mode 100644 .github/workflows/libxrpl.yml delete mode 100644 .github/workflows/macos.yml delete mode 100644 .github/workflows/missing-commits.yml delete mode 100644 .github/workflows/nix.yml create mode 100644 .github/workflows/notify-clio.yml create mode 100644 .github/workflows/on-pr.yml create mode 100644 .github/workflows/on-trigger.yml create mode 100644 .github/workflows/publish-docs.yml delete mode 100644 .github/workflows/windows.yml create mode 100644 .prettierignore create mode 100644 conan/global.conf diff --git a/.github/actions/build-deps/action.yml b/.github/actions/build-deps/action.yml new file mode 100644 index 0000000000..12d80e859c --- /dev/null +++ b/.github/actions/build-deps/action.yml @@ -0,0 +1,71 @@ +# This action installs and optionally uploads Conan dependencies to a remote +# repository. The dependencies will only be uploaded if the credentials are +# provided. +name: Build Conan dependencies + +inputs: + build_dir: + description: 'The directory where to build.' + required: true + type: string + build_type: + description: 'The build type to use.' + required: true + type: choice + options: + - 'Debug' + - 'Release' + conan_remote_name: + description: 'The name of the Conan remote to use.' + required: true + type: string + conan_remote_url: + description: 'The URL of the Conan endpoint to use.' + required: true + type: string + conan_remote_username: + description: 'The username for logging into the Conan remote. If not provided, the dependencies will not be uploaded.' + required: false + type: string + default: '' + conan_remote_password: + description: 'The password for logging into the Conan remote. If not provided, the dependencies will not be uploaded.' + required: false + type: string + default: '' + force_build: + description: 'Force building of all dependencies.' + required: false + type: boolean + default: false + force_upload: + description: 'Force uploading of all dependencies.' + required: false + type: boolean + default: false + +runs: + using: composite + steps: + - name: Install Conan dependencies + shell: bash + run: | + echo 'Installing dependencies.' + mkdir -p ${{ inputs.build_dir }} + cd ${{ inputs.build_dir }} + conan install \ + --output-folder . \ + --build ${{ inputs.force_build && '"*"' || 'missing' }} \ + --options:host '&:tests=True' \ + --options:host '&:xrpld=True' \ + --settings:all build_type=${{ inputs.build_type }} \ + --format=json .. + - name: Upload Conan dependencies + if: ${{ inputs.conan_remote_username && inputs.conan_remote_password }} + shell: bash + working-directory: ${{ inputs.build_dir }} + run: | + echo "Logging into Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}." + conan remote login ${{ inputs.conan_remote_name }} "${{ inputs.conan_remote_username }}" --password "${{ inputs.conan_remote_password }}" + echo 'Uploading dependencies.' + conan upload '*' --confirm --check ${{ inputs.force_upload && '--force' || '' }} --remote=${{ inputs.conan_remote_name }} diff --git a/.github/actions/build-test/action.yml b/.github/actions/build-test/action.yml new file mode 100644 index 0000000000..30337ddb98 --- /dev/null +++ b/.github/actions/build-test/action.yml @@ -0,0 +1,102 @@ +# This action build and tests the binary. The Conan dependencies must have +# already been installed (see the build-deps action). +name: Build and Test + +inputs: + build_dir: + description: 'The directory where to build.' + required: true + type: string + build_type: + description: 'The build type to use.' + required: true + type: choice + options: + - 'Debug' + - 'Release' + cmake_args: + description: 'Additional arguments to pass to CMake.' + required: false + type: string + default: '' + cmake_target: + description: 'The CMake target to build.' + required: true + type: string + codecov_token: + description: 'The Codecov token to use for uploading coverage reports.' + required: false + type: string + default: '' + os: + description: 'The operating system to use for the build (linux, macos, or windows).' + required: true + type: choice + options: + - 'linux' + - 'macos' + - 'windows' + +runs: + using: composite + steps: + - name: Configure CMake + shell: bash + working-directory: ${{ inputs.build_dir }} + run: | + echo 'Configuring CMake.' + cmake \ + -G '${{ inputs.os == 'windows' && 'Visual Studio 17 2022' || 'Ninja' }}' \ + -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \ + -DCMAKE_BUILD_TYPE=${{ inputs.build_type }} \ + ${{ inputs.cmake_args }} \ + .. + - name: Build the binary + shell: bash + working-directory: ${{ inputs.build_dir }} + run: | + echo 'Building binary.' + cmake \ + --build . \ + --config ${{ inputs.build_type }} \ + --parallel $(nproc) \ + --target ${{ inputs.cmake_target }} + - name: Check linking + if: ${{ inputs.os == 'linux' }} + shell: bash + working-directory: ${{ inputs.build_dir }} + run: | + echo 'Checking linking.' + ldd ./rippled + if [ "$(ldd ./rippled | grep -E '(libstdc\+\+|libgcc)' | wc -l)" -eq 0 ]; then + echo 'The binary is statically linked.' + else + echo 'The binary is dynamically linked.' + exit 1 + fi + - name: Verify voidstar + if: ${{ contains(inputs.cmake_args, '-Dvoidstar=ON') }} + shell: bash + working-directory: ${{ inputs.build_dir }} + run: | + echo 'Verifying presence of instrumentation.' + ./rippled --version | grep libvoidstar + - name: Test the binary + if: ${{ inputs.cmake_target != 'coverage' }} + shell: bash + working-directory: ${{ inputs.build_dir }}/${{ inputs.os == 'windows' && inputs.build_type || '' }} + run: | + echo 'Testing binary.' + ./rippled --unittest --unittest-jobs $(nproc) + ctest -j $(nproc) --output-on-failure + - name: Upload coverage report + if: ${{ inputs.cmake_target == 'coverage' && inputs.codecov_token }} + uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3 + with: + disable_search: true + disable_telem: true + fail_ci_if_error: true + files: ${{ inputs.build_dir }}/coverage.xml + plugins: noop + token: ${{ inputs.codecov_token }} + verbose: true diff --git a/.github/actions/build/action.yml b/.github/actions/build/action.yml deleted file mode 100644 index 6714369155..0000000000 --- a/.github/actions/build/action.yml +++ /dev/null @@ -1,34 +0,0 @@ -name: build -inputs: - generator: - default: null - configuration: - required: true - cmake-args: - default: null - cmake-target: - default: all -# An implicit input is the environment variable `build_dir`. -runs: - using: composite - steps: - - name: configure - shell: bash - run: | - cd ${build_dir} - cmake \ - ${{ inputs.generator && format('-G "{0}"', inputs.generator) || '' }} \ - -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \ - -DCMAKE_BUILD_TYPE=${{ inputs.configuration }} \ - -Dtests=TRUE \ - -Dxrpld=TRUE \ - ${{ inputs.cmake-args }} \ - .. - - name: build - shell: bash - run: | - cmake \ - --build ${build_dir} \ - --config ${{ inputs.configuration }} \ - --parallel ${NUM_PROCESSORS:-$(nproc)} \ - --target ${{ inputs.cmake-target }} diff --git a/.github/actions/dependencies/action.yml b/.github/actions/dependencies/action.yml deleted file mode 100644 index 0bd28f15dd..0000000000 --- a/.github/actions/dependencies/action.yml +++ /dev/null @@ -1,38 +0,0 @@ -name: dependencies -inputs: - configuration: - required: true -# Implicit inputs are the environment variables `build_dir`, CONAN_REMOTE_URL, -# CONAN_REMOTE_USERNAME, and CONAN_REMOTE_PASSWORD. The latter two are only -# used to upload newly built dependencies to the Conan remote. -runs: - using: composite - steps: - - name: add Conan remote - if: ${{ env.CONAN_REMOTE_URL != '' }} - shell: bash - run: | - echo "Adding Conan remote 'xrplf' at ${{ env.CONAN_REMOTE_URL }}." - conan remote add --index 0 --force xrplf ${{ env.CONAN_REMOTE_URL }} - echo "Listing Conan remotes." - conan remote list - - name: install dependencies - shell: bash - run: | - mkdir -p ${{ env.build_dir }} - cd ${{ env.build_dir }} - conan install \ - --output-folder . \ - --build missing \ - --options:host "&:tests=True" \ - --options:host "&:xrpld=True" \ - --settings:all build_type=${{ inputs.configuration }} \ - .. - - name: upload dependencies - if: ${{ env.CONAN_REMOTE_URL != '' && env.CONAN_REMOTE_USERNAME != '' && env.CONAN_REMOTE_PASSWORD != '' && github.ref_type == 'branch' && github.ref_name == github.event.repository.default_branch }} - shell: bash - run: | - echo "Logging into Conan remote 'xrplf' at ${{ env.CONAN_REMOTE_URL }}." - conan remote login xrplf "${{ env.CONAN_REMOTE_USERNAME }}" --password "${{ env.CONAN_REMOTE_PASSWORD }}" - echo "Uploading dependencies." - conan upload '*' --confirm --check --remote xrplf diff --git a/Builds/levelization/README.md b/.github/scripts/levelization/README.md similarity index 95% rename from Builds/levelization/README.md rename to .github/scripts/levelization/README.md index 93aa316b61..ec41a021cc 100644 --- a/Builds/levelization/README.md +++ b/.github/scripts/levelization/README.md @@ -50,7 +50,7 @@ that `test` code should _never_ be included in `ripple` code.) ## Validation -The [levelization.sh](levelization.sh) script takes no parameters, +The [levelization](generate.sh) script takes no parameters, reads no environment variables, and can be run from any directory, as long as it is in the expected location in the rippled repo. It can be run at any time from within a checked out repo, and will @@ -72,15 +72,15 @@ It generates many files of [results](results): desired as described above. In a perfect repo, this file will be empty. This file is committed to the repo, and is used by the [levelization - Github workflow](../../.github/workflows/levelization.yml) to validate + Github workflow](../../workflows/check-levelization.yml) to validate that nothing changed. - [`ordering.txt`](results/ordering.txt): A list showing relationships between modules where there are no loops as they actually exist, as opposed to how they are desired as described above. This file is committed to the repo, and is used by the [levelization - Github workflow](../../.github/workflows/levelization.yml) to validate + Github workflow](../../workflows/check-levelization.yml) to validate that nothing changed. -- [`levelization.yml`](../../.github/workflows/levelization.yml) +- [`levelization.yml`](../../workflows/check-levelization.yml) Github Actions workflow to test that levelization loops haven't changed. Unfortunately, if changes are detected, it can't tell if they are improvements or not, so if you have resolved any issues or @@ -111,4 +111,4 @@ get those details locally. 1. Run `levelization.sh` 2. Grep the modules in `paths.txt`. - For example, if a cycle is found `A ~= B`, simply `grep -w -A Builds/levelization/results/paths.txt | grep -w B` + A .github/scripts/levelization/results/paths.txt | grep -w B` diff --git a/Builds/levelization/levelization.sh b/.github/scripts/levelization/generate.sh similarity index 98% rename from Builds/levelization/levelization.sh rename to .github/scripts/levelization/generate.sh index c18ca703f7..775ddf789f 100755 --- a/Builds/levelization/levelization.sh +++ b/.github/scripts/levelization/generate.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Usage: levelization.sh +# Usage: generate.sh # This script takes no parameters, reads no environment variables, # and can be run from any directory, as long as it is in the expected # location in the repo. @@ -19,7 +19,7 @@ export LANG=C rm -rfv results mkdir results includes="$( pwd )/results/rawincludes.txt" -pushd ../.. +pushd ../../.. echo Raw includes: grep -r '^[ ]*#include.*/.*\.h' include src | \ grep -v boost | tee ${includes} diff --git a/Builds/levelization/results/loops.txt b/.github/scripts/levelization/results/loops.txt similarity index 100% rename from Builds/levelization/results/loops.txt rename to .github/scripts/levelization/results/loops.txt diff --git a/Builds/levelization/results/ordering.txt b/.github/scripts/levelization/results/ordering.txt similarity index 100% rename from Builds/levelization/results/ordering.txt rename to .github/scripts/levelization/results/ordering.txt diff --git a/.github/scripts/strategy-matrix/generate.py b/.github/scripts/strategy-matrix/generate.py new file mode 100644 index 0000000000..a5180c942d --- /dev/null +++ b/.github/scripts/strategy-matrix/generate.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python3 +import argparse +import itertools +import json +import re + +''' +Generate a strategy matrix for GitHub Actions CI. + +On each PR commit we will build a selection of Debian, RHEL, Ubuntu, MacOS, and +Windows configurations, while upon merge into the develop, release, or master +branches, we will build all configurations. + +We will further set additional CMake arguments as follows: +- All builds will have the `tests`, `werr`, and `xrpld` options. +- All builds will have the `wextra` option except for GCC 12 and Clang 16. +- All release builds will have the `assert` option. +- Certain Debian Bookworm configurations will change the reference fee, enable + codecov, and enable voidstar in PRs. +''' +def generate_strategy_matrix(all: bool, architecture: list[dict], os: list[dict], build_type: list[str], cmake_args: list[str]) -> dict: + configurations = [] + for architecture, os, build_type, cmake_args in itertools.product(architecture, os, build_type, cmake_args): + # The default CMake target is 'all' for Linux and MacOS and 'install' + # for Windows, but it can get overridden for certain configurations. + cmake_target = 'install' if os["distro_name"] == 'windows' else 'all' + + # Only generate a subset of configurations in PRs. + if not all: + # Debian: + # - Bookworm using GCC 13: Release and Unity on linux/arm64, set + # the reference fee to 500. + # - Bookworm using GCC 15: Debug and no Unity on linux/amd64, enable + # code coverage. + # - Bookworm using Clang 16: Debug and no Unity on linux/arm64, + # enable voidstar. + # - Bookworm using Clang 17: Release and no Unity on linux/amd64, + # set the reference fee to 1000. + # - Bookworm using Clang 20: Debug and Unity on linux/amd64. + if os['distro_name'] == 'debian': + skip = True + if os['distro_version'] == 'bookworm': + if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-13' and build_type == 'Release' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/arm64': + cmake_args = f'{cmake_args} -DUNIT_TEST_REFERENCE_FEE=500' + skip = False + if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-15' and build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/amd64': + cmake_args = f'{cmake_args} -Dcoverage=ON -Dcoverage_format=xml -DCODE_COVERAGE_VERBOSE=ON -DCMAKE_C_FLAGS=-O0 -DCMAKE_CXX_FLAGS=-O0' + cmake_target = 'coverage' + skip = False + if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-16' and build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/arm64': + cmake_args = f'{cmake_args} -Dvoidstar=ON' + skip = False + if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-17' and build_type == 'Release' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/amd64': + cmake_args = f'{cmake_args} -DUNIT_TEST_REFERENCE_FEE=1000' + skip = False + if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-20' and build_type == 'Debug' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/amd64': + skip = False + if skip: + continue + + # RHEL: + # - 9.4 using GCC 12: Debug and Unity on linux/amd64. + # - 9.6 using Clang: Release and no Unity on linux/amd64. + if os['distro_name'] == 'rhel': + skip = True + if os['distro_version'] == '9.4': + if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-12' and build_type == 'Debug' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/amd64': + skip = False + elif os['distro_version'] == '9.6': + if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-any' and build_type == 'Release' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/amd64': + skip = False + if skip: + continue + + # Ubuntu: + # - Jammy using GCC 12: Debug and no Unity on linux/arm64. + # - Noble using GCC 14: Release and Unity on linux/amd64. + # - Noble using Clang 18: Debug and no Unity on linux/amd64. + # - Noble using Clang 19: Release and Unity on linux/arm64. + if os['distro_name'] == 'ubuntu': + skip = True + if os['distro_version'] == 'jammy': + if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-12' and build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/arm64': + skip = False + elif os['distro_version'] == 'noble': + if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-14' and build_type == 'Release' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/amd64': + skip = False + if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-18' and build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/amd64': + skip = False + if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-19' and build_type == 'Release' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/arm64': + skip = False + if skip: + continue + + # MacOS: + # - Debug and no Unity on macos/arm64. + if os['distro_name'] == 'macos' and not (build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'macos/arm64'): + continue + + # Windows: + # - Release and Unity on windows/amd64. + if os['distro_name'] == 'windows' and not (build_type == 'Release' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'windows/amd64'): + continue + + + # Additional CMake arguments. + cmake_args = f'{cmake_args} -Dtests=ON -Dwerr=ON -Dxrpld=ON' + if not f'{os['compiler_name']}-{os['compiler_version']}' in ['gcc-12', 'clang-16']: + cmake_args = f'{cmake_args} -Dwextra=ON' + if build_type == 'Release': + cmake_args = f'{cmake_args} -Dassert=ON' + + # Generate a unique name for the configuration, e.g. macos-arm64-debug + # or debian-bookworm-gcc-12-amd64-release-unity. + config_name = os['distro_name'] + if (n := os['distro_version']) != '': + config_name += f'-{n}' + if (n := os['compiler_name']) != '': + config_name += f'-{n}' + if (n := os['compiler_version']) != '': + config_name += f'-{n}' + config_name += f'-{architecture['platform'][architecture['platform'].find('/')+1:]}' + config_name += f'-{build_type.lower()}' + if '-Dunity=ON' in cmake_args: + config_name += '-unity' + + configurations.append({ + 'architecture': architecture, + 'os': os, + 'build_type': build_type, + 'cmake_args': cmake_args, + 'cmake_target': cmake_target, + 'config_name': config_name, + }) + + return {'include': configurations} + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('-a', '--all', help='Set to generate all configurations (generally used when merging a PR) or leave unset to generate a subset of configurations (generally used when committing to a PR).', action="store_true") + parser.add_argument('-c', '--config', help='Path to the JSON file containing the strategy matrix configurations.', required=True, type=str) + args = parser.parse_args() + + # Load the JSON configuration file. + config = None + with open(args.config, 'r') as f: + config = json.load(f) + if config['architecture'] is None or config['os'] is None or config['build_type'] is None or config['cmake_args'] is None: + raise Exception('Invalid configuration file.') + + # Generate the strategy matrix. + print(f'matrix={json.dumps(generate_strategy_matrix(args.all, config['architecture'], config['os'], config['build_type'], config['cmake_args']))}') diff --git a/.github/scripts/strategy-matrix/linux.json b/.github/scripts/strategy-matrix/linux.json new file mode 100644 index 0000000000..d8f176273d --- /dev/null +++ b/.github/scripts/strategy-matrix/linux.json @@ -0,0 +1,170 @@ +{ + "architecture": [ + { + "platform": "linux/amd64", + "runner": [ + "self-hosted", + "Linux", + "X64", + "heavy" + ] + }, + { + "platform": "linux/arm64", + "runner": [ + "self-hosted", + "Linux", + "ARM64", + "heavy-arm64" + ] + } + ], + "os": [ + { + "distro_name": "debian", + "distro_version": "bookworm", + "compiler_name": "gcc", + "compiler_version": "12" + }, + { + "distro_name": "debian", + "distro_version": "bookworm", + "compiler_name": "gcc", + "compiler_version": "13" + }, + { + "distro_name": "debian", + "distro_version": "bookworm", + "compiler_name": "gcc", + "compiler_version": "14" + }, + { + "distro_name": "debian", + "distro_version": "bookworm", + "compiler_name": "gcc", + "compiler_version": "15" + }, + { + "distro_name": "debian", + "distro_version": "bookworm", + "compiler_name": "clang", + "compiler_version": "16" + }, + { + "distro_name": "debian", + "distro_version": "bookworm", + "compiler_name": "clang", + "compiler_version": "17" + }, + { + "distro_name": "debian", + "distro_version": "bookworm", + "compiler_name": "clang", + "compiler_version": "18" + }, + { + "distro_name": "debian", + "distro_version": "bookworm", + "compiler_name": "clang", + "compiler_version": "19" + }, + { + "distro_name": "debian", + "distro_version": "bookworm", + "compiler_name": "clang", + "compiler_version": "20" + }, + { + "distro_name": "rhel", + "distro_version": "9.4", + "compiler_name": "gcc", + "compiler_version": "12" + }, + { + "distro_name": "rhel", + "distro_version": "9.4", + "compiler_name": "gcc", + "compiler_version": "13" + }, + { + "distro_name": "rhel", + "distro_version": "9.4", + "compiler_name": "gcc", + "compiler_version": "14" + }, + { + "distro_name": "rhel", + "distro_version": "9.6", + "compiler_name": "gcc", + "compiler_version": "13" + }, + { + "distro_name": "rhel", + "distro_version": "9.6", + "compiler_name": "gcc", + "compiler_version": "14" + }, + { + "distro_name": "rhel", + "distro_version": "9.4", + "compiler_name": "clang", + "compiler_version": "any" + }, + { + "distro_name": "rhel", + "distro_version": "9.6", + "compiler_name": "clang", + "compiler_version": "any" + }, + { + "distro_name": "ubuntu", + "distro_version": "jammy", + "compiler_name": "gcc", + "compiler_version": "12" + }, + { + "distro_name": "ubuntu", + "distro_version": "noble", + "compiler_name": "gcc", + "compiler_version": "13" + }, + { + "distro_name": "ubuntu", + "distro_version": "noble", + "compiler_name": "gcc", + "compiler_version": "14" + }, + { + "distro_name": "ubuntu", + "distro_version": "noble", + "compiler_name": "clang", + "compiler_version": "16" + }, + { + "distro_name": "ubuntu", + "distro_version": "noble", + "compiler_name": "clang", + "compiler_version": "17" + }, + { + "distro_name": "ubuntu", + "distro_version": "noble", + "compiler_name": "clang", + "compiler_version": "18" + }, + { + "distro_name": "ubuntu", + "distro_version": "noble", + "compiler_name": "clang", + "compiler_version": "19" + } + ], + "build_type": [ + "Debug", + "Release" + ], + "cmake_args": [ + "-Dunity=OFF", + "-Dunity=ON" + ] +} diff --git a/.github/scripts/strategy-matrix/macos.json b/.github/scripts/strategy-matrix/macos.json new file mode 100644 index 0000000000..a6ffdf14b7 --- /dev/null +++ b/.github/scripts/strategy-matrix/macos.json @@ -0,0 +1,29 @@ +{ + "architecture": [ + { + "platform": "macos/arm64", + "runner": [ + "self-hosted", + "macOS", + "ARM64", + "mac-runner-m1" + ] + } + ], + "os": [ + { + "distro_name": "macos", + "distro_version": "", + "compiler_name": "", + "compiler_version": "" + } + ], + "build_type": [ + "Debug", + "Release" + ], + "cmake_args": [ + "-Dunity=OFF -DCMAKE_POLICY_VERSION_MINIMUM=3.5", + "-Dunity=ON -DCMAKE_POLICY_VERSION_MINIMUM=3.5" + ] +} diff --git a/.github/scripts/strategy-matrix/windows.json b/.github/scripts/strategy-matrix/windows.json new file mode 100644 index 0000000000..aaa8c94411 --- /dev/null +++ b/.github/scripts/strategy-matrix/windows.json @@ -0,0 +1,26 @@ +{ + "architecture": [ + { + "platform": "windows/amd64", + "runner": [ + "windows-latest" + ] + } + ], + "os": [ + { + "distro_name": "windows", + "distro_version": "", + "compiler_name": "", + "compiler_version": "" + } + ], + "build_type": [ + "Debug", + "Release" + ], + "cmake_args": [ + "-Dunity=OFF", + "-Dunity=ON" + ] +} diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml new file mode 100644 index 0000000000..f3b3374090 --- /dev/null +++ b/.github/workflows/build-test.yml @@ -0,0 +1,191 @@ +# This workflow builds and tests the binary for various configurations. +name: Build and test + +# This workflow can only be triggered by other workflows. +on: + workflow_call: + inputs: + build_dir: + description: 'The directory where to build.' + required: false + type: string + default: '.build' + conan_remote_name: + description: 'The name of the Conan remote to use.' + required: true + type: string + conan_remote_url: + description: 'The URL of the Conan endpoint to use.' + required: true + type: string + dependencies_force_build: + description: 'Force building of all dependencies.' + required: false + type: boolean + default: false + dependencies_force_upload: + description: 'Force uploading of all dependencies.' + required: false + type: boolean + default: false + os: + description: 'The operating system to use for the build (linux, macos, or windows).' + required: true + type: string + strategy_matrix_all: + description: 'Generate a strategy matrix containing all configurations.' + required: false + type: boolean + default: false + secrets: + codecov_token: + description: 'The Codecov token to use for uploading coverage reports.' + required: false + conan_remote_username: + description: 'The username for logging into the Conan remote. If not provided, the dependencies will not be uploaded.' + required: false + conan_remote_password: + description: 'The password for logging into the Conan remote. If not provided, the dependencies will not be uploaded.' + required: false + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-${{ inputs.os }} + cancel-in-progress: true + +defaults: + run: + shell: bash + +jobs: + # Generate the strategy matrix to be used by the following job. + generate-matrix: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 + - name: Set up Python + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + with: + python-version: 3.13 + - name: Generate strategy matrix + working-directory: .github/scripts/strategy-matrix + id: generate + run: python generate.py ${{ inputs.strategy_matrix_all && '--all' || '' }} --config=${{ inputs.os }}.json | tee "${GITHUB_OUTPUT}" + outputs: + matrix: ${{ steps.generate.outputs.matrix }} + + # Build and test the binary. + build-test: + needs: + - generate-matrix + strategy: + fail-fast: false + matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }} + runs-on: ${{ matrix.architecture.runner }} + container: ${{ inputs.os == 'linux' && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version) || null }} + steps: + - name: Clean workspace (MacOS) + if: ${{ inputs.os == 'macos' }} + run: | + WORKSPACE=${{ github.workspace }} + echo "Cleaning workspace '${WORKSPACE}'." + if [ -z "${WORKSPACE}" ] || [ "${WORKSPACE}" = "/" ]; then + echo "Invalid working directory '${WORKSPACE}'." + exit 1 + fi + find "${WORKSPACE}" -depth 1 | xargs rm -rfv + - name: Checkout repository + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 + - name: Set up Python (Windows) + if: ${{ inputs.os == 'windows' }} + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + with: + python-version: 3.13 + - name: Install Conan (Windows) + if: ${{ inputs.os == 'windows' }} + run: pip install wheel conan + - name: Check configuration (Windows) + if: ${{ inputs.os == 'windows' }} + run: | + echo 'Checking environment variables.' + set + + echo 'Checking CMake version.' + cmake --version + + echo 'Checking Conan version.' + conan --version + - name: Check configuration (Linux and MacOS) + if: ${{ inputs.os == 'linux' || inputs.os == 'macos' }} + run: | + echo 'Checking path.' + echo ${PATH} | tr ':' '\n' + + echo 'Checking environment variables.' + env | sort + + echo 'Checking CMake version.' + cmake --version + + echo 'Checking compiler version.' + ${{ inputs.os == 'linux' && '${CC}' || 'clang' }} --version + + echo 'Checking Conan version.' + conan --version + + echo 'Checking Ninja version.' + ninja --version + - name: Set up Conan home directory (MacOS) + if: ${{ inputs.os == 'macos' }} + run: | + echo 'Setting up Conan home directory.' + export CONAN_HOME=${{ github.workspace }}/.conan + mkdir -p ${CONAN_HOME} + - name: Set up Conan home directory (Windows) + if: ${{ inputs.os == 'windows' }} + run: | + echo 'Setting up Conan home directory.' + set CONAN_HOME=${{ github.workspace }}\.conan + mkdir -p %CONAN_HOME% + - name: Set up Conan configuration + run: | + echo 'Installing configuration.' + cat conan/global.conf ${{ inputs.os == 'linux' && '>>' || '>' }} $(conan config home)/global.conf + + echo 'Conan configuration:' + conan config show '*' + - name: Set up Conan profile + run: | + echo 'Installing profile.' + conan config install conan/profiles/default -tf $(conan config home)/profiles/ + + echo 'Conan profile:' + conan profile show + - name: Set up Conan remote + shell: bash + run: | + echo "Adding Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}." + conan remote add --index 0 --force ${{ inputs.conan_remote_name }} ${{ inputs.conan_remote_url }} + + echo 'Listing Conan remotes.' + conan remote list + - name: Build dependencies + uses: ./.github/actions/build-deps + with: + build_dir: ${{ inputs.build_dir }} + build_type: ${{ matrix.build_type }} + conan_remote_name: ${{ inputs.conan_remote_name }} + conan_remote_url: ${{ inputs.conan_remote_url }} + conan_remote_username: ${{ secrets.conan_remote_username }} + conan_remote_password: ${{ secrets.conan_remote_password }} + force_build: ${{ inputs.dependencies_force_build }} + force_upload: ${{ inputs.dependencies_force_upload }} + - name: Build and test binary + uses: ./.github/actions/build-test + with: + build_dir: ${{ inputs.build_dir }} + build_type: ${{ matrix.build_type }} + cmake_args: ${{ matrix.cmake_args }} + cmake_target: ${{ matrix.cmake_target }} + codecov_token: ${{ secrets.codecov_token }} + os: ${{ inputs.os }} diff --git a/.github/workflows/check-format.yml b/.github/workflows/check-format.yml new file mode 100644 index 0000000000..5e3da10028 --- /dev/null +++ b/.github/workflows/check-format.yml @@ -0,0 +1,112 @@ +# This workflow checks if the code is properly formatted. +name: Check format + +# This workflow can only be triggered by other workflows. +on: workflow_call + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-format + cancel-in-progress: true + +defaults: + run: + shell: bash + +jobs: + clang-format: + runs-on: ubuntu-latest + container: ghcr.io/xrplf/ci/tools-rippled-clang-format + steps: + # The $GITHUB_WORKSPACE and ${{ github.workspace }} might not point to the + # same directory for jobs running in containers. The actions/checkout step + # is *supposed* to checkout into $GITHUB_WORKSPACE and then add it to + # safe.directory (see instructions at https://github.com/actions/checkout) + # but that is apparently not happening for some container images. We + # therefore preemptively add both directories to safe.directory. See also + # https://github.com/actions/runner/issues/2058 for more details. + - name: Configure git safe.directory + run: | + git config --global --add safe.directory $GITHUB_WORKSPACE + git config --global --add safe.directory ${{ github.workspace }} + - name: Checkout repository + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 + - name: Check configuration + run: | + echo 'Checking path.' + echo ${PATH} | tr ':' '\n' + + echo 'Checking environment variables.' + env | sort + + echo 'Checking clang-format version.' + clang-format --version + - name: Format code + run: find include src tests -type f \( -name '*.cpp' -o -name '*.hpp' -o -name '*.h' -o -name '*.ipp' \) -exec clang-format -i {} + + - name: Check for differences + env: + MESSAGE: | + One or more files did not conform to the formatting specified in + .clang-format. Maybe you did not run 'git-clang-format' or + 'clang-format' before committing, or your version of clang-format + has an incompatibility with the one used here (see the "Check + configuration" step above). + + Run 'git-clang-format --extensions cpp,h,hpp,ipp develop' in your + repo, and then commit and push the changes. + run: | + DIFF=$(git status --porcelain) + if [ -n "${DIFF}" ]; then + # Print the files that changed to give the contributor a hint about + # what to expect when running git-clang-format on their own machine. + git status + echo "${MESSAGE}" + exit 1 + fi + + prettier: + runs-on: ubuntu-latest + container: ghcr.io/xrplf/ci/tools-rippled-prettier + steps: + - name: Configure git safe.directory + run: | + git config --global --add safe.directory $GITHUB_WORKSPACE + git config --global --add safe.directory ${{ github.workspace }} + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + - name: Check configuration + run: | + echo 'Checking path.' + echo ${PATH} | tr ':' '\n' + + echo 'Checking environment variables.' + env | sort + + echo 'Checking NPM version.' + npm --version + + echo 'Checking Node.js version.' + node --version + + echo 'Checking prettier version.' + prettier --version + - name: Format code + run: prettier --check . + - name: Check for differences + env: + MESSAGE: | + One or more files did not conform to the formatting rules specified + by Prettier. Maybe you did not run 'prettier' before committing, or + your version of prettier has an incompatibility with the one used + here (see the "Check configuration" step above). + + Run 'prettier --check .' in your repo, and then commit and push the + changes. + run: | + DIFF=$(git status --porcelain) + if [ -n "${DIFF}" ]; then + # Print the files that changed to give the contributor a hint about + # what to expect when running prettier on their own machine. + git status + echo "${MESSAGE}" + exit 1 + fi diff --git a/.github/workflows/check-levelization.yml b/.github/workflows/check-levelization.yml new file mode 100644 index 0000000000..3430ca28a2 --- /dev/null +++ b/.github/workflows/check-levelization.yml @@ -0,0 +1,46 @@ +# This workflow checks if the dependencies between the modules are correctly +# indexed. +name: Check levelization + +# This workflow can only be triggered by other workflows. +on: workflow_call + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-levelization + cancel-in-progress: true + +defaults: + run: + shell: bash + +jobs: + levelization: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 + - name: Check levelization + run: .github/scripts/levelization/generate.sh + - name: Check for differences + env: + MESSAGE: | + + The dependency relationships between the modules in rippled have + changed, which may be an improvement or a regression. + + A rule of thumb is that if your changes caused something to be + removed from loops.txt, it's probably an improvement, while if + something was added, it's probably a regression. + + Run '.github/scripts/levelization/generate.sh' in your repo, commit + and push the changes. See .github/scripts/levelization/README.md for + more info. + run: | + DIFF=$(git status --porcelain) + if [ -n "${DIFF}" ]; then + # Print the differences to give the contributor a hint about what to + # expect when running levelization on their own machine. + git diff + echo "${MESSAGE}" + exit 1 + fi diff --git a/.github/workflows/check-missing-commits.yml b/.github/workflows/check-missing-commits.yml new file mode 100644 index 0000000000..da0e296e70 --- /dev/null +++ b/.github/workflows/check-missing-commits.yml @@ -0,0 +1,62 @@ +# This workflow checks that all commits in the "master" branch are also in the +# "release" and "develop" branches, and that all commits in the "release" branch +# are also in the "develop" branch. +name: Check for missing commits + +# This workflow can only be triggered by other workflows. +on: workflow_call + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-missing-commits + cancel-in-progress: true + +defaults: + run: + shell: bash + +jobs: + check: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 + with: + fetch-depth: 0 + - name: Check for missing commits + env: + MESSAGE: | + + If you are reading this, then the commits indicated above are missing + from the "develop" and/or "release" branch. Do a reverse-merge as soon + as possible. See CONTRIBUTING.md for instructions. + run: | + set -o pipefail + # Branches are ordered by how "canonical" they are. Every commit in one + # branch should be in all the branches behind it. + order=(master release develop) + branches=() + for branch in "${order[@]}"; do + # Check that the branches exist so that this job will work on forked + # repos, which don't necessarily have master and release branches. + echo "Checking if ${branch} exists." + if git ls-remote --exit-code --heads origin \ + refs/heads/${branch} > /dev/null; then + branches+=(origin/${branch}) + fi + done + + prior=() + for branch in "${branches[@]}"; do + if [[ ${#prior[@]} -ne 0 ]]; then + echo "Checking ${prior[@]} for commits missing from ${branch}." + git log --oneline --no-merges "${prior[@]}" \ + ^$branch | tee -a "missing-commits.txt" + echo + fi + prior+=("${branch}") + done + + if [[ $(cat missing-commits.txt | wc -l) -ne 0 ]]; then + echo "${MESSAGE}" + exit 1 + fi diff --git a/.github/workflows/clang-format.yml b/.github/workflows/clang-format.yml deleted file mode 100644 index 0d81f87791..0000000000 --- a/.github/workflows/clang-format.yml +++ /dev/null @@ -1,64 +0,0 @@ -name: clang-format - -on: - push: - pull_request: - types: [opened, reopened, synchronize, ready_for_review] - -jobs: - check: - if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }} - runs-on: ubuntu-24.04 - container: ghcr.io/xrplf/ci/tools-rippled-clang-format - steps: - # For jobs running in containers, $GITHUB_WORKSPACE and ${{ github.workspace }} might not be the - # same directory. The actions/checkout step is *supposed* to checkout into $GITHUB_WORKSPACE and - # then add it to safe.directory (see instructions at https://github.com/actions/checkout) - # but that's apparently not happening for some container images. We can't be sure what is actually - # happening, so let's pre-emptively add both directories to safe.directory. There's a - # Github issue opened in 2022 and not resolved in 2025 https://github.com/actions/runner/issues/2058 ¯\_(ツ)_/¯ - - run: | - git config --global --add safe.directory $GITHUB_WORKSPACE - git config --global --add safe.directory ${{ github.workspace }} - - uses: actions/checkout@v4 - - name: Format first-party sources - run: | - clang-format --version - find include src tests -type f \( -name '*.cpp' -o -name '*.hpp' -o -name '*.h' -o -name '*.ipp' \) -exec clang-format -i {} + - - name: Check for differences - id: assert - shell: bash - run: | - set -o pipefail - git diff --exit-code | tee "clang-format.patch" - - name: Upload patch - if: failure() && steps.assert.outcome == 'failure' - uses: actions/upload-artifact@v4 - continue-on-error: true - with: - name: clang-format.patch - if-no-files-found: ignore - path: clang-format.patch - - name: What happened? - if: failure() && steps.assert.outcome == 'failure' - env: - PREAMBLE: | - If you are reading this, you are looking at a failed Github Actions - job. That means you pushed one or more files that did not conform - to the formatting specified in .clang-format. That may be because - you neglected to run 'git clang-format' or 'clang-format' before - committing, or that your version of clang-format has an - incompatibility with the one on this - machine, which is: - SUGGESTION: | - - To fix it, you can do one of two things: - 1. Download and apply the patch generated as an artifact of this - job to your repo, commit, and push. - 2. Run 'git-clang-format --extensions cpp,h,hpp,ipp develop' - in your repo, commit, and push. - run: | - echo "${PREAMBLE}" - clang-format --version - echo "${SUGGESTION}" - exit 1 diff --git a/.github/workflows/doxygen.yml b/.github/workflows/doxygen.yml deleted file mode 100644 index 01e04a3f5a..0000000000 --- a/.github/workflows/doxygen.yml +++ /dev/null @@ -1,37 +0,0 @@ -name: Build and publish Doxygen documentation -# To test this workflow, push your changes to your fork's `develop` branch. -on: - push: - branches: - - develop - - doxygen -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - documentation: - runs-on: ubuntu-latest - permissions: - contents: write - container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e - steps: - - name: checkout - uses: actions/checkout@v4 - - name: check environment - run: | - echo ${PATH} | tr ':' '\n' - cmake --version - doxygen --version - env | sort - - name: build - run: | - mkdir build - cd build - cmake -Donly_docs=TRUE .. - cmake --build . --target docs --parallel $(nproc) - - name: publish - uses: peaceiris/actions-gh-pages@v3 - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - publish_dir: build/docs/html diff --git a/.github/workflows/levelization.yml b/.github/workflows/levelization.yml deleted file mode 100644 index 979049d630..0000000000 --- a/.github/workflows/levelization.yml +++ /dev/null @@ -1,53 +0,0 @@ -name: levelization - -on: - push: - pull_request: - types: [opened, reopened, synchronize, ready_for_review] - -jobs: - check: - if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }} - runs-on: ubuntu-latest - env: - CLANG_VERSION: 10 - steps: - - uses: actions/checkout@v4 - - name: Check levelization - run: Builds/levelization/levelization.sh - - name: Check for differences - id: assert - run: | - set -o pipefail - git diff --exit-code | tee "levelization.patch" - - name: Upload patch - if: failure() && steps.assert.outcome == 'failure' - uses: actions/upload-artifact@v4 - continue-on-error: true - with: - name: levelization.patch - if-no-files-found: ignore - path: levelization.patch - - name: What happened? - if: failure() && steps.assert.outcome == 'failure' - env: - MESSAGE: | - If you are reading this, you are looking at a failed Github - Actions job. That means you changed the dependency relationships - between the modules in rippled. That may be an improvement or a - regression. This check doesn't judge. - - A rule of thumb, though, is that if your changes caused - something to be removed from loops.txt, that's probably an - improvement. If something was added, it's probably a regression. - - To fix it, you can do one of two things: - 1. Download and apply the patch generated as an artifact of this - job to your repo, commit, and push. - 2. Run './Builds/levelization/levelization.sh' in your repo, - commit, and push. - - See Builds/levelization/README.md for more info. - run: | - echo "${MESSAGE}" - exit 1 diff --git a/.github/workflows/libxrpl.yml b/.github/workflows/libxrpl.yml deleted file mode 100644 index 5880c03d71..0000000000 --- a/.github/workflows/libxrpl.yml +++ /dev/null @@ -1,91 +0,0 @@ -name: Check libXRPL compatibility with Clio -env: - CONAN_REMOTE_URL: https://conan.ripplex.io - CONAN_LOGIN_USERNAME_XRPLF: ${{ secrets.CONAN_REMOTE_USERNAME }} - CONAN_PASSWORD_XRPLF: ${{ secrets.CONAN_REMOTE_PASSWORD }} -on: - pull_request: - paths: - - "src/libxrpl/protocol/BuildInfo.cpp" - - ".github/workflows/libxrpl.yml" - types: [opened, reopened, synchronize, ready_for_review] -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - publish: - if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }} - name: Publish libXRPL - outputs: - outcome: ${{ steps.upload.outputs.outcome }} - version: ${{ steps.version.outputs.version }} - channel: ${{ steps.channel.outputs.channel }} - runs-on: [self-hosted, heavy] - container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e - steps: - - name: Wait for essential checks to succeed - uses: lewagon/wait-on-check-action@v1.3.4 - with: - ref: ${{ github.event.pull_request.head.sha || github.sha }} - running-workflow-name: wait-for-check-regexp - check-regexp: "(dependencies|test).*linux.*" # Ignore windows and mac tests but make sure linux passes - repo-token: ${{ secrets.GITHUB_TOKEN }} - wait-interval: 10 - - name: Checkout - uses: actions/checkout@v4 - - name: Generate channel - id: channel - shell: bash - run: | - echo channel="clio/pr_${{ github.event.pull_request.number }}" | tee ${GITHUB_OUTPUT} - - name: Export new package - shell: bash - run: | - conan export . ${{ steps.channel.outputs.channel }} - - name: Add Conan remote - shell: bash - run: | - echo "Adding Conan remote 'xrplf' at ${{ env.CONAN_REMOTE_URL }}." - conan remote add xrplf ${{ env.CONAN_REMOTE_URL }} --insert 0 --force - echo "Listing Conan remotes." - conan remote list - - name: Parse new version - id: version - shell: bash - run: | - echo version="$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" \ - | awk -F '"' '{print $2}')" | tee ${GITHUB_OUTPUT} - - name: Try to authenticate to Conan remote - id: remote - shell: bash - run: | - # `conan user` implicitly uses the environment variables CONAN_LOGIN_USERNAME_ and CONAN_PASSWORD_. - # https://docs.conan.io/1/reference/commands/misc/user.html#using-environment-variables - # https://docs.conan.io/1/reference/env_vars.html#conan-login-username-conan-login-username-remote-name - # https://docs.conan.io/1/reference/env_vars.html#conan-password-conan-password-remote-name - echo outcome=$(conan user --remote xrplf --password >&2 \ - && echo success || echo failure) | tee ${GITHUB_OUTPUT} - - name: Upload new package - id: upload - if: (steps.remote.outputs.outcome == 'success') - shell: bash - run: | - echo "conan upload version ${{ steps.version.outputs.version }} on channel ${{ steps.channel.outputs.channel }}" - echo outcome=$(conan upload xrpl/${{ steps.version.outputs.version }}@${{ steps.channel.outputs.channel }} --remote ripple --confirm >&2 \ - && echo success || echo failure) | tee ${GITHUB_OUTPUT} - notify_clio: - name: Notify Clio - runs-on: ubuntu-latest - needs: publish - env: - GH_TOKEN: ${{ secrets.CLIO_NOTIFY_TOKEN }} - steps: - - name: Notify Clio about new version - if: (needs.publish.outputs.outcome == 'success') - shell: bash - run: | - gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \ - /repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \ - -F "client_payload[version]=${{ needs.publish.outputs.version }}@${{ needs.publish.outputs.channel }}" \ - -F "client_payload[pr]=${{ github.event.pull_request.number }}" diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml deleted file mode 100644 index 73e25c357f..0000000000 --- a/.github/workflows/macos.yml +++ /dev/null @@ -1,112 +0,0 @@ -name: macos -on: - pull_request: - types: [opened, reopened, synchronize, ready_for_review] - push: - # If the branches list is ever changed, be sure to change it on all - # build/test jobs (nix, macos, windows, instrumentation) - branches: - # Always build the package branches - - develop - - release - - master - # Branches that opt-in to running - - "ci/**" -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true -# This part of Conan configuration is specific to this workflow only; we do not want -# to pollute conan/profiles directory with settings which might not work for others -env: - CONAN_REMOTE_URL: https://conan.ripplex.io - CONAN_REMOTE_USERNAME: ${{ secrets.CONAN_REMOTE_USERNAME }} - CONAN_REMOTE_PASSWORD: ${{ secrets.CONAN_REMOTE_PASSWORD }} - # This part of the Conan configuration is specific to this workflow only; we - # do not want to pollute the 'conan/profiles' directory with settings that - # might not work for other workflows. - CONAN_GLOBAL_CONF: | - core.download:parallel={{os.cpu_count()}} - core.upload:parallel={{os.cpu_count()}} - tools.build:jobs={{ (os.cpu_count() * 4/5) | int }} - tools.build:verbosity=verbose - tools.compilation:verbosity=verbose - -jobs: - test: - if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }} - strategy: - matrix: - platform: - - macos - generator: - - Ninja - configuration: - - Release - runs-on: [self-hosted, macOS, mac-runner-m1] - env: - # The `build` action requires these variables. - build_dir: .build - NUM_PROCESSORS: 12 - steps: - - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - - name: install Conan - run: | - brew install conan - - name: install Ninja - if: matrix.generator == 'Ninja' - run: brew install ninja - - name: install python - run: | - if which python > /dev/null 2>&1; then - echo "Python executable exists" - else - brew install python@3.13 - ln -s /opt/homebrew/bin/python3 /opt/homebrew/bin/python - fi - - name: install cmake - run: | - if which cmake > /dev/null 2>&1; then - echo "cmake executable exists" - else - brew install cmake - fi - - name: install nproc - run: | - brew install coreutils - - name: check environment - run: | - env | sort - echo ${PATH} | tr ':' '\n' - python --version - conan --version - cmake --version - nproc --version - echo -n "nproc returns: " - nproc - system_profiler SPHardwareDataType - sysctl -n hw.logicalcpu - clang --version - - name: configure Conan - run: | - echo "${CONAN_GLOBAL_CONF}" > $(conan config home)/global.conf - conan config install conan/profiles/ -tf $(conan config home)/profiles/ - conan profile show - - name: build dependencies - uses: ./.github/actions/dependencies - with: - configuration: ${{ matrix.configuration }} - - name: build - uses: ./.github/actions/build - with: - generator: ${{ matrix.generator }} - configuration: ${{ matrix.configuration }} - cmake-args: "-Dassert=TRUE -Dwerr=TRUE ${{ matrix.cmake-args }}" - - name: test - run: | - n=$(nproc) - echo "Using $n test jobs" - - cd ${build_dir} - ./rippled --unittest --unittest-jobs $n - ctest -j $n --output-on-failure diff --git a/.github/workflows/missing-commits.yml b/.github/workflows/missing-commits.yml deleted file mode 100644 index ed478a2327..0000000000 --- a/.github/workflows/missing-commits.yml +++ /dev/null @@ -1,60 +0,0 @@ -name: missing-commits - -on: - push: - branches: - # Only check that the branches are up to date when updating the - # relevant branches. - - develop - - release - -jobs: - up_to_date: - runs-on: ubuntu-24.04 - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - name: Check for missing commits - id: commits - env: - SUGGESTION: | - - If you are reading this, then the commits indicated above are - missing from "develop" and/or "release". Do a reverse-merge - as soon as possible. See CONTRIBUTING.md for instructions. - run: | - set -o pipefail - # Branches ordered by how "canonical" they are. Every commit in - # one branch should be in all the branches behind it - order=( master release develop ) - branches=() - for branch in "${order[@]}" - do - # Check that the branches exist so that this job will work on - # forked repos, which don't necessarily have master and - # release branches. - if git ls-remote --exit-code --heads origin \ - refs/heads/${branch} > /dev/null - then - branches+=( origin/${branch} ) - fi - done - - prior=() - for branch in "${branches[@]}" - do - if [[ ${#prior[@]} -ne 0 ]] - then - echo "Checking ${prior[@]} for commits missing from ${branch}" - git log --oneline --no-merges "${prior[@]}" \ - ^$branch | tee -a "missing-commits.txt" - echo - fi - prior+=( "${branch}" ) - done - if [[ $( cat missing-commits.txt | wc -l ) -ne 0 ]] - then - echo "${SUGGESTION}" - exit 1 - fi diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml deleted file mode 100644 index 395bd72b8d..0000000000 --- a/.github/workflows/nix.yml +++ /dev/null @@ -1,422 +0,0 @@ -name: nix -on: - pull_request: - types: [opened, reopened, synchronize, ready_for_review] - push: - # If the branches list is ever changed, be sure to change it on all - # build/test jobs (nix, macos, windows) - branches: - # Always build the package branches - - develop - - release - - master - # Branches that opt-in to running - - "ci/**" -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -env: - CONAN_REMOTE_URL: https://conan.ripplex.io - CONAN_REMOTE_USERNAME: ${{ secrets.CONAN_REMOTE_USERNAME }} - CONAN_REMOTE_PASSWORD: ${{ secrets.CONAN_REMOTE_PASSWORD }} - # This part of the Conan configuration is specific to this workflow only; we - # do not want to pollute the 'conan/profiles' directory with settings that - # might not work for other workflows. - CONAN_GLOBAL_CONF: | - core.download:parallel={{ os.cpu_count() }} - core.upload:parallel={{ os.cpu_count() }} - tools.build:jobs={{ (os.cpu_count() * 4/5) | int }} - tools.build:verbosity=verbose - tools.compilation:verbosity=verbose - -# This workflow has multiple job matrixes. -# They can be considered phases because most of the matrices ("test", -# "coverage", "conan", ) depend on the first ("dependencies"). -# -# The first phase has a job in the matrix for each combination of -# variables that affects dependency ABI: -# platform, compiler, and configuration. -# It creates a GitHub artifact holding the Conan profile, -# and builds and caches binaries for all the dependencies. -# If an Artifactory remote is configured, they are cached there. -# If not, they are added to the GitHub artifact. -# GitHub's "cache" action has a size limit (10 GB) that is too small -# to hold the binaries if they are built locally. -# We must use the "{upload,download}-artifact" actions instead. -# -# The remaining phases have a job in the matrix for each test -# configuration. They install dependency binaries from the cache, -# whichever was used, and build and test rippled. -# -# "instrumentation" is independent, but is included here because it also -# builds on linux in the same "on:" conditions. - -jobs: - dependencies: - if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }} - strategy: - fail-fast: false - matrix: - platform: - - linux - compiler: - - gcc - - clang - configuration: - - Debug - - Release - include: - - compiler: gcc - compiler_version: 12 - distro: ubuntu - codename: jammy - - compiler: clang - compiler_version: 16 - distro: debian - codename: bookworm - runs-on: [self-hosted, heavy] - container: ghcr.io/xrplf/ci/${{ matrix.distro }}-${{ matrix.codename }}:${{ matrix.compiler }}-${{ matrix.compiler_version }} - env: - build_dir: .build - steps: - - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - - name: check environment - run: | - echo ${PATH} | tr ':' '\n' - lsb_release -a || true - ${{ matrix.compiler }}-${{ matrix.compiler_version }} --version - conan --version - cmake --version - env | sort - - name: configure Conan - run: | - echo "${CONAN_GLOBAL_CONF}" >> $(conan config home)/global.conf - conan config install conan/profiles/ -tf $(conan config home)/profiles/ - conan profile show - - name: archive profile - # Create this archive before dependencies are added to the local cache. - run: tar -czf conan.tar.gz -C ${CONAN_HOME} . - - name: build dependencies - uses: ./.github/actions/dependencies - with: - configuration: ${{ matrix.configuration }} - - name: upload archive - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 - with: - name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }} - path: conan.tar.gz - if-no-files-found: error - - test: - strategy: - fail-fast: false - matrix: - platform: - - linux - compiler: - - gcc - - clang - configuration: - - Debug - - Release - include: - - compiler: gcc - compiler_version: 12 - distro: ubuntu - codename: jammy - - compiler: clang - compiler_version: 16 - distro: debian - codename: bookworm - cmake-args: - - - - "-Dunity=ON" - needs: dependencies - runs-on: [self-hosted, heavy] - container: ghcr.io/xrplf/ci/${{ matrix.distro }}-${{ matrix.codename }}:${{ matrix.compiler }}-${{ matrix.compiler_version }} - env: - build_dir: .build - steps: - - name: download cache - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 - with: - name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }} - - name: extract cache - run: | - mkdir -p ${CONAN_HOME} - tar -xzf conan.tar.gz -C ${CONAN_HOME} - - name: check environment - run: | - env | sort - echo ${PATH} | tr ':' '\n' - conan --version - cmake --version - - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - - name: dependencies - uses: ./.github/actions/dependencies - with: - configuration: ${{ matrix.configuration }} - - name: build - uses: ./.github/actions/build - with: - generator: Ninja - configuration: ${{ matrix.configuration }} - cmake-args: "-Dassert=TRUE -Dwerr=TRUE ${{ matrix.cmake-args }}" - - name: check linking - run: | - cd ${build_dir} - ldd ./rippled - if [ "$(ldd ./rippled | grep -E '(libstdc\+\+|libgcc)' | wc -l)" -eq 0 ]; then - echo 'The binary is statically linked.' - else - echo 'The binary is dynamically linked.' - exit 1 - fi - - name: test - run: | - cd ${build_dir} - ./rippled --unittest --unittest-jobs $(nproc) - ctest -j $(nproc) --output-on-failure - - reference-fee-test: - strategy: - fail-fast: false - matrix: - platform: - - linux - compiler: - - gcc - configuration: - - Debug - cmake-args: - - "-DUNIT_TEST_REFERENCE_FEE=200" - - "-DUNIT_TEST_REFERENCE_FEE=1000" - needs: dependencies - runs-on: [self-hosted, heavy] - container: ghcr.io/xrplf/ci/ubuntu-jammy:gcc-12 - env: - build_dir: .build - steps: - - name: download cache - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 - with: - name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }} - - name: extract cache - run: | - mkdir -p ${CONAN_HOME} - tar -xzf conan.tar.gz -C ${CONAN_HOME} - - name: check environment - run: | - env | sort - echo ${PATH} | tr ':' '\n' - conan --version - cmake --version - - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - - name: dependencies - uses: ./.github/actions/dependencies - with: - configuration: ${{ matrix.configuration }} - - name: build - uses: ./.github/actions/build - with: - generator: Ninja - configuration: ${{ matrix.configuration }} - cmake-args: "-Dassert=TRUE -Dwerr=TRUE ${{ matrix.cmake-args }}" - - name: test - run: | - cd ${build_dir} - ./rippled --unittest --unittest-jobs $(nproc) - ctest -j $(nproc) --output-on-failure - - coverage: - strategy: - fail-fast: false - matrix: - platform: - - linux - compiler: - - gcc - configuration: - - Debug - needs: dependencies - runs-on: [self-hosted, heavy] - container: ghcr.io/xrplf/ci/ubuntu-jammy:gcc-12 - env: - build_dir: .build - steps: - - name: download cache - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 - with: - name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }} - - name: extract cache - run: | - mkdir -p ${CONAN_HOME} - tar -xzf conan.tar.gz -C ${CONAN_HOME} - - name: check environment - run: | - echo ${PATH} | tr ':' '\n' - conan --version - cmake --version - gcovr --version - env | sort - ls ${CONAN_HOME} - - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - - name: dependencies - uses: ./.github/actions/dependencies - with: - configuration: ${{ matrix.configuration }} - - name: build - uses: ./.github/actions/build - with: - generator: Ninja - configuration: ${{ matrix.configuration }} - cmake-args: >- - -Dassert=TRUE - -Dwerr=TRUE - -Dcoverage=ON - -Dcoverage_format=xml - -DCODE_COVERAGE_VERBOSE=ON - -DCMAKE_CXX_FLAGS="-O0" - -DCMAKE_C_FLAGS="-O0" - cmake-target: coverage - - name: move coverage report - shell: bash - run: | - mv "${build_dir}/coverage.xml" ./ - - name: archive coverage report - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 - with: - name: coverage.xml - path: coverage.xml - retention-days: 30 - - name: upload coverage report - uses: wandalen/wretry.action@v1.4.10 - with: - action: codecov/codecov-action@v4.5.0 - with: | - files: coverage.xml - fail_ci_if_error: true - disable_search: true - verbose: true - plugin: noop - token: ${{ secrets.CODECOV_TOKEN }} - attempt_limit: 5 - attempt_delay: 210000 # in milliseconds - - conan: - needs: dependencies - runs-on: [self-hosted, heavy] - container: - image: ghcr.io/xrplf/ci/ubuntu-jammy:gcc-12 - env: - build_dir: .build - platform: linux - compiler: gcc - compiler_version: 12 - configuration: Release - steps: - - name: download cache - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 - with: - name: ${{ env.platform }}-${{ env.compiler }}-${{ env.configuration }} - - name: extract cache - run: | - mkdir -p ${CONAN_HOME} - tar -xzf conan.tar.gz -C ${CONAN_HOME} - - name: check environment - run: | - env | sort - echo ${PATH} | tr ':' '\n' - conan --version - cmake --version - - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - - name: dependencies - uses: ./.github/actions/dependencies - with: - configuration: ${{ env.configuration }} - - name: export - run: | - conan export . --version head - - name: build - run: | - cd tests/conan - mkdir ${build_dir} && cd ${build_dir} - conan install .. \ - --settings:all build_type=${configuration} \ - --output-folder . \ - --build missing - cmake .. \ - -DCMAKE_TOOLCHAIN_FILE:FILEPATH=./build/${configuration}/generators/conan_toolchain.cmake \ - -DCMAKE_BUILD_TYPE=${configuration} - cmake --build . - ./example | grep '^[[:digit:]]\+\.[[:digit:]]\+\.[[:digit:]]\+' - - instrumentation-build: - needs: dependencies - runs-on: [self-hosted, heavy] - container: ghcr.io/xrplf/ci/debian-bookworm:clang-16 - env: - build_dir: .build - steps: - - name: download cache - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 - with: - name: linux-clang-Debug - - - name: extract cache - run: | - mkdir -p ${CONAN_HOME} - tar -xzf conan.tar.gz -C ${CONAN_HOME} - - - name: check environment - run: | - echo ${PATH} | tr ':' '\n' - conan --version - cmake --version - env | sort - ls ${CONAN_HOME} - - - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - - - name: dependencies - uses: ./.github/actions/dependencies - with: - configuration: Debug - - - name: prepare environment - run: | - mkdir -p ${build_dir} - echo "SOURCE_DIR=$(pwd)" >> $GITHUB_ENV - echo "BUILD_DIR=$(pwd)/${build_dir}" >> $GITHUB_ENV - - - name: build with instrumentation - run: | - cd ${BUILD_DIR} - cmake -S ${SOURCE_DIR} -B ${BUILD_DIR} \ - -Dvoidstar=ON \ - -Dtests=ON \ - -Dxrpld=ON \ - -DCMAKE_BUILD_TYPE=Debug \ - -DSECP256K1_BUILD_BENCHMARK=OFF \ - -DSECP256K1_BUILD_TESTS=OFF \ - -DSECP256K1_BUILD_EXHAUSTIVE_TESTS=OFF \ - -DCMAKE_TOOLCHAIN_FILE=${BUILD_DIR}/build/generators/conan_toolchain.cmake - cmake --build . --parallel $(nproc) - - - name: verify instrumentation enabled - run: | - cd ${BUILD_DIR} - ./rippled --version | grep libvoidstar - - - name: run unit tests - run: | - cd ${BUILD_DIR} - ./rippled -u --unittest-jobs $(( $(nproc)/4 )) - ctest -j $(nproc) --output-on-failure diff --git a/.github/workflows/notify-clio.yml b/.github/workflows/notify-clio.yml new file mode 100644 index 0000000000..32ac01d61a --- /dev/null +++ b/.github/workflows/notify-clio.yml @@ -0,0 +1,80 @@ +# This workflow exports the built libxrpl package to the Conan remote on a +# a channel named after the pull request, and notifies the Clio repository about +# the new version so it can check for compatibility. +name: Notify Clio + +# This workflow can only be triggered by other workflows. +on: + workflow_call: + inputs: + conan_remote_name: + description: 'The name of the Conan remote to use.' + required: true + type: string + conan_remote_url: + description: 'The URL of the Conan endpoint to use.' + required: true + type: string + secrets: + clio_notify_token: + description: 'The GitHub token to notify Clio about new versions.' + required: true + conan_remote_username: + description: 'The username for logging into the Conan remote.' + required: true + conan_remote_password: + description: 'The password for logging into the Conan remote.' + required: true + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-clio + cancel-in-progress: true + +defaults: + run: + shell: bash + +jobs: + upload: + runs-on: ubuntu-latest + container: ghcr.io/xrplf/ci/ubuntu-noble:gcc-13 + steps: + - name: Checkout repository + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 + - name: Generate outputs + id: generate + run: | + echo 'Generating channel.' + echo channel="clio/pr_${{ github.event.pull_request.number }}" | tee "${GITHUB_OUTPUT}" + echo 'Extracting version.' + echo version="$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" | awk -F '"' '{print $2}')" | tee "${GITHUB_OUTPUT}" + - name: Add Conan remote + run: | + echo "Adding Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}." + conan remote add --index 0 --force ${{ inputs.conan_remote_name }} ${{ inputs.conan_remote_url }} + echo 'Listing Conan remotes.' + conan remote list + - name: Log into Conan remote + run: conan remote login ${{ inputs.conan_remote_name }} "${{ secrets.conan_remote_username }}" --password "${{ secrets.conan_remote_password }}" + - name: Upload package + run: | + echo 'Exporting package to channel ${{ steps.generate.outputs.channel }}.' + conan export --channel=${{ steps.generate.outputs.channel }} . + echo 'Uploading package version ${{ steps.generate.outputs.version }} on channel ${{ steps.generate.outputs.channel }}.' + conan upload --confirm --check --remote=${{ inputs.conan_remote_name }} xrpl/${{ steps.generate.outputs.version }}@${{ steps.generate.outputs.channel }} + outputs: + channel: ${{ steps.generate.outputs.channel }} + version: ${{ steps.generate.outputs.version }} + + notify: + needs: upload + runs-on: ubuntu-latest + env: + GH_TOKEN: ${{ secrets.clio_notify_token }} + steps: + - name: Notify Clio + run: | + gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \ + /repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \ + -F "client_payload[version]=${{ needs.upload.outputs.version }}@${{ needs.upload.outputs.channel }}" \ + -F "client_payload[pr]=${{ github.event.pull_request.number }}" diff --git a/.github/workflows/on-pr.yml b/.github/workflows/on-pr.yml new file mode 100644 index 0000000000..10bc61797a --- /dev/null +++ b/.github/workflows/on-pr.yml @@ -0,0 +1,133 @@ +# This workflow runs all workflows to check, build and test the project on +# various Linux flavors, as well as on MacOS and Windows, on every push to a +# user branch. However, it will not run if the pull request is a draft unless it +# has the 'DraftRunCI' label. +name: PR + +on: + pull_request: + paths: + - '.github/actions/build-deps/**' + - '.github/actions/build-test/**' + - '.github/scripts/levelization/**' + - '.github/scripts/strategy-matrix/**' + - '.github/workflows/build-test.yml' + - '.github/workflows/check-format.yml' + - '.github/workflows/check-levelization.yml' + - '.github/workflows/notify-clio.yml' + - '.github/workflows/on-pr.yml' + # Keep the list of paths below in sync with those in the `on-trigger.yml` + # file. + - 'cmake/**' + - 'conan/**' + - 'external/**' + - 'include/**' + - 'src/**' + - 'tests/**' + - '.clang-format' + - '.codecov.yml' + - '.pre-commit-config.yaml' + - 'CMakeLists.txt' + - 'conanfile.py' + types: + - opened + - synchronize + - labeled + - unlabeled + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +defaults: + run: + shell: bash + +env: + CONAN_REMOTE_NAME: xrplf + CONAN_REMOTE_URL: https://conan.ripplex.io + +jobs: + # This job determines whether the workflow should run. It runs when: + # * Opened as a non-draft PR. + # * A commit is added to a non-draft PR or the PR has the 'DraftRunCI' label. + # * A draft PR has the 'DraftRunCI' label added. + # * A non-draft PR has the 'DraftRunCI' label removed. + # These checks are in part to ensure the workflow won't run needlessly while + # also allowing it to be triggered without having to add a no-op commit. A new + # workflow execution can be triggered by adding and then removing the label on + # a non-draft PR, or conversely by removing it and then adding it back on a + # draft PR; this can be useful in certain cases. + should-run: + if: >- + ${{ + (github.event.action == 'opened' && !github.event.pull_request.draft) || + (github.event.action == 'synchronize' && (!github.event.pull_request.draft || contains(github.event.pull_request.labels.*.name, 'DraftRunCI'))) || + (github.event.action == 'labeled' && github.event.pull_request.draft && github.event.label.name == 'DraftRunCI') || + (github.event.action == 'unlabeled' && !github.event.pull_request.draft && github.event.label.name == 'DraftRunCI') + }} + runs-on: ubuntu-latest + steps: + - name: No-op + run: echo '' + + check-clang-format: + needs: should-run + uses: ./.github/workflows/check-format.yml + + check-levelization: + needs: should-run + uses: ./.github/workflows/check-levelization.yml + + # This job works around the limitation that GitHub Actions does not support + # using environment variables as inputs for reusable workflows. + generate-outputs: + needs: should-run + runs-on: ubuntu-latest + steps: + - name: No-op + run: echo '' + outputs: + conan_remote_name: ${{ env.CONAN_REMOTE_NAME }} + conan_remote_url: ${{ env.CONAN_REMOTE_URL }} + + build-linux: + needs: generate-outputs + uses: ./.github/workflows/build-test.yml + with: + conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }} + conan_remote_url: ${{ needs.generate-outputs.outputs.conan_remote_url }} + os: 'linux' + secrets: + codecov_token: ${{ secrets.CODECOV_TOKEN }} + + build-macos: + needs: generate-outputs + uses: ./.github/workflows/build-test.yml + with: + conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }} + conan_remote_url: ${{ needs.generate-outputs.outputs.conan_remote_url }} + os: 'macos' + + build-windows: + needs: generate-outputs + uses: ./.github/workflows/build-test.yml + with: + conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }} + conan_remote_url: ${{ needs.generate-outputs.outputs.conan_remote_url }} + os: 'windows' + + notify-clio: + needs: + - generate-outputs + - build-linux + - build-macos + - build-windows + uses: ./.github/workflows/notify-clio.yml + with: + conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }} + conan_remote_url: ${{ needs.generate-outputs.outputs.conan_remote_url }} + secrets: + clio_notify_token: ${{ secrets.CLIO_NOTIFY_TOKEN }} + conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }} + conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }} diff --git a/.github/workflows/on-trigger.yml b/.github/workflows/on-trigger.yml new file mode 100644 index 0000000000..a6dded7fd8 --- /dev/null +++ b/.github/workflows/on-trigger.yml @@ -0,0 +1,140 @@ +# This workflow runs all workflows to build the dependencies required for the +# project on various Linux flavors, as well as on MacOS and Windows, on a +# scheduled basis, on merge into the 'develop', 'release', or 'master' branches, +# or manually. The missing commits check is only run when the code is merged +# into the 'develop' or 'release' branches, and the documentation is built when +# the code is merged into the 'develop' branch. +name: Trigger + +on: + push: + branches: + - develop + - release + - master + paths: + - '.github/actions/build-deps/**' + - '.github/actions/build-test/**' + - '.github/scripts/strategy-matrix/**' + - '.github/workflows/build-test.yml' + - '.github/workflows/check-missing-commits.yml' + - '.github/workflows/on-trigger.yml' + - '.github/workflows/publish-docs.yml' + # Keep the list of paths below in sync with those in `on-pr.yml`. + - 'cmake/**' + - 'conan/**' + - 'external/**' + - 'include/**' + - 'src/**' + - 'tests/**' + - '.clang-format' + - '.codecov.yml' + - '.pre-commit-config.yaml' + - 'CMakeLists.txt' + - 'conanfile.py' + # Run at 06:32 UTC on every day of the week from Monday through Friday. This + # will force all dependencies to be rebuilt, which is useful to verify that + # all dependencies can be built successfully. Only the dependencies that + # are actually missing from the remote will be uploaded. + schedule: + - cron: '32 6 * * 1-5' + # Run when manually triggered via the GitHub UI or API. If `force_upload` is + # true, then the dependencies that were missing (`force_rebuild` is false) or + # rebuilt (`force_rebuild` is true) will be uploaded, overwriting existing + # dependencies if needed. + workflow_dispatch: + inputs: + dependencies_force_build: + description: 'Force building of all dependencies.' + required: false + type: boolean + default: false + dependencies_force_upload: + description: 'Force uploading of all dependencies.' + required: false + type: boolean + default: false + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +defaults: + run: + shell: bash + +env: + CONAN_REMOTE_NAME: xrplf + CONAN_REMOTE_URL: https://conan.ripplex.io + +jobs: + check-missing-commits: + if: ${{ github.event_name == 'push' && github.ref_type == 'branch' && contains(fromJSON('["develop", "release"]'), github.ref_name) }} + uses: ./.github/workflows/check-missing-commits.yml + + # This job works around the limitation that GitHub Actions does not support + # using environment variables as inputs for reusable workflows. It also sets + # outputs that depend on the event that triggered the workflow. + generate-outputs: + runs-on: ubuntu-latest + steps: + - name: Check inputs and set outputs + id: generate + run: | + if [[ "${{ github.event_name }}" == 'push' ]]; then + echo 'dependencies_force_build=false' | tee "${GITHUB_OUTPUT}" + echo 'dependencies_force_upload=false' | tee "${GITHUB_OUTPUT}" + elif [[ "${{ github.event_name }}" == 'schedule' ]]; then + echo 'dependencies_force_build=true' | tee "${GITHUB_OUTPUT}" + echo 'dependencies_force_upload=false' | tee "${GITHUB_OUTPUT}" + else + echo 'dependencies_force_build=${{ inputs.dependencies_force_build }}' | tee "${GITHUB_OUTPUT}" + echo 'dependencies_force_upload=${{ inputs.dependencies_force_upload }}' | tee "${GITHUB_OUTPUT}" + fi + outputs: + conan_remote_name: ${{ env.CONAN_REMOTE_NAME }} + conan_remote_url: ${{ env.CONAN_REMOTE_URL }} + dependencies_force_build: ${{ steps.generate.outputs.dependencies_force_build }} + dependencies_force_upload: ${{ steps.generate.outputs.dependencies_force_upload }} + + build-linux: + needs: generate-outputs + uses: ./.github/workflows/build-test.yml + with: + conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }} + conan_remote_url: ${{ needs.generate-outputs.outputs.conan_remote_url }} + dependencies_force_build: ${{ needs.generate-outputs.outputs.dependencies_force_build }} + dependencies_force_upload: ${{ needs.generate-outputs.outputs.dependencies_force_upload }} + os: 'linux' + strategy_matrix_all: true + secrets: + conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }} + conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }} + + build-macos: + needs: generate-outputs + uses: ./.github/workflows/build-test.yml + with: + conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }} + conan_remote_url: ${{ needs.generate-outputs.outputs.conan_remote_url }} + dependencies_force_build: ${{ needs.generate-outputs.outputs.dependencies_force_build }} + dependencies_force_upload: ${{ needs.generate-outputs.outputs.dependencies_force_upload }} + os: 'macos' + strategy_matrix_all: true + secrets: + conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }} + conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }} + + build-windows: + needs: generate-outputs + uses: ./.github/workflows/build-test.yml + with: + conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }} + conan_remote_url: ${{ needs.generate-outputs.outputs.conan_remote_url }} + dependencies_force_build: ${{ needs.generate-outputs.outputs.dependencies_force_build }} + dependencies_force_upload: ${{ needs.generate-outputs.outputs.dependencies_force_upload }} + os: 'windows' + strategy_matrix_all: true + secrets: + conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }} + conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }} diff --git a/.github/workflows/publish-docs.yml b/.github/workflows/publish-docs.yml new file mode 100644 index 0000000000..a2c24bd076 --- /dev/null +++ b/.github/workflows/publish-docs.yml @@ -0,0 +1,58 @@ +# This workflow builds the documentation for the repository, and publishes it to +# GitHub Pages when changes are merged into the default branch. +name: Build and publish documentation + +on: + push: + paths: + - '.github/workflows/publish-docs.yml' + - '*.md' + - '**/*.md' + - 'docs/**' + - 'include/**' + - 'src/libxrpl/**' + - 'src/xrpld/**' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +defaults: + run: + shell: bash + +env: + BUILD_DIR: .build + +jobs: + publish: + runs-on: ubuntu-latest + container: ghcr.io/xrplf/ci/tools-rippled-documentation + steps: + - name: Checkout repository + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 + - name: Check configuration + run: | + echo 'Checking path.' + echo ${PATH} | tr ':' '\n' + + echo 'Checking environment variables.' + env | sort + + echo 'Checking CMake version.' + cmake --version + + echo 'Checking Doxygen version.' + doxygen --version + - name: Build documentation + run: | + mkdir -p ${{ env.BUILD_DIR }} + cd ${{ env.BUILD_DIR }} + cmake -Donly_docs=ON .. + cmake --build . --target docs --parallel $(nproc) + - name: Publish documentation + if: ${{ github.ref_type == 'branch' && github.ref_name == github.event.repository.default_branch }} + uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4.0.0 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + publish_dir: ${{ env.BUILD_DIR }}/docs/html diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml deleted file mode 100644 index b81ffc8d3a..0000000000 --- a/.github/workflows/windows.yml +++ /dev/null @@ -1,106 +0,0 @@ -name: windows - -on: - pull_request: - types: [opened, reopened, synchronize, ready_for_review] - push: - # If the branches list is ever changed, be sure to change it on all - # build/test jobs (nix, macos, windows, instrumentation) - branches: - # Always build the package branches - - develop - - release - - master - # Branches that opt-in to running - - "ci/**" - -# https://docs.github.com/en/actions/using-jobs/using-concurrency -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true -env: - CONAN_REMOTE_URL: https://conan.ripplex.io - CONAN_REMOTE_USERNAME: ${{ secrets.CONAN_REMOTE_USERNAME }} - CONAN_REMOTE_PASSWORD: ${{ secrets.CONAN_REMOTE_PASSWORD }} - # This part of the Conan configuration is specific to this workflow only; we - # do not want to pollute the 'conan/profiles' directory with settings that - # might not work for other workflows. - CONAN_GLOBAL_CONF: | - core.download:parallel={{os.cpu_count()}} - core.upload:parallel={{os.cpu_count()}} - tools.build:jobs=24 - tools.build:verbosity=verbose - tools.compilation:verbosity=verbose - -jobs: - test: - if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }} - strategy: - fail-fast: false - matrix: - version: - - generator: Visual Studio 17 2022 - runs-on: windows-2022 - configuration: - - type: Release - tests: true - - type: Debug - # Skip running unit tests on debug builds, because they - # take an unreasonable amount of time - tests: false - runtime: d - runs-on: ${{ matrix.version.runs-on }} - env: - build_dir: .build - steps: - - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - - name: choose Python - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 - with: - python-version: 3.13 - - name: learn Python cache directory - id: pip-cache - shell: bash - run: | - python -m pip install --upgrade pip - echo "dir=$(pip cache dir)" | tee ${GITHUB_OUTPUT} - - name: restore Python cache directory - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 - with: - path: ${{ steps.pip-cache.outputs.dir }} - key: ${{ runner.os }}-${{ hashFiles('.github/workflows/windows.yml') }} - - name: install Conan - run: pip install wheel conan - - name: check environment - run: | - dir env: - $env:PATH -split ';' - python --version - conan --version - cmake --version - - name: configure Conan - shell: bash - run: | - echo "${CONAN_GLOBAL_CONF}" > $(conan config home)/global.conf - conan config install conan/profiles/ -tf $(conan config home)/profiles/ - conan profile show - - name: build dependencies - uses: ./.github/actions/dependencies - with: - configuration: ${{ matrix.configuration.type }} - - name: build - uses: ./.github/actions/build - with: - generator: "${{ matrix.version.generator }}" - configuration: ${{ matrix.configuration.type }} - # Hard code for now. Move to the matrix if varied options are needed - cmake-args: "-Dassert=TRUE -Dwerr=TRUE -Dreporting=OFF -Dunity=ON" - cmake-target: install - - name: test - shell: bash - if: ${{ matrix.configuration.tests }} - run: | - cd ${build_dir}/${{ matrix.configuration.type }} - ./rippled --unittest --unittest-jobs $(nproc) - ctest -j $(nproc) --output-on-failure diff --git a/.gitignore b/.gitignore index e5952e0de1..ab54adba74 100644 --- a/.gitignore +++ b/.gitignore @@ -37,10 +37,9 @@ Release/*.* *.gcov # Levelization checking -Builds/levelization/results/rawincludes.txt -Builds/levelization/results/paths.txt -Builds/levelization/results/includes/ -Builds/levelization/results/includedby/ +.github/scripts/levelization/results/* +!.github/scripts/levelization/results/loops.txt +!.github/scripts/levelization/results/ordering.txt # Ignore tmp directory. tmp diff --git a/.prettierignore b/.prettierignore new file mode 100644 index 0000000000..477120ade1 --- /dev/null +++ b/.prettierignore @@ -0,0 +1,2 @@ +external +.* diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b0ae72ae54..a5e0933d00 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -81,7 +81,7 @@ If you create new source files, they must be organized as follows: The source must be formatted according to the style guide below. -Header includes must be [levelized](./Builds/levelization). +Header includes must be [levelized](.github/scripts/levelization). Changes should be usually squashed down into a single commit. Some larger or more complicated change sets make more sense, diff --git a/README.md b/README.md index 4fdb89dffa..fe7daa38bc 100644 --- a/README.md +++ b/README.md @@ -42,7 +42,7 @@ If you are interested in running an **API Server** (including a **Full History S Here are some good places to start learning the source code: - Read the markdown files in the source tree: `src/ripple/**/*.md`. -- Read [the levelization document](./Builds/levelization) to get an idea of the internal dependency graph. +- Read [the levelization document](.github/scripts/levelization) to get an idea of the internal dependency graph. - In the big picture, the `main` function constructs an `ApplicationImp` object, which implements the `Application` virtual interface. Almost every component in the application takes an `Application&` parameter in its constructor, typically named `app` and stored as a member variable `app_`. This allows most components to depend on any other component. ### Repository Contents diff --git a/conan/global.conf b/conan/global.conf new file mode 100644 index 0000000000..ae03818232 --- /dev/null +++ b/conan/global.conf @@ -0,0 +1,9 @@ +# Global configuration for Conan. This is used to set the number of parallel +# downloads, uploads, and build jobs. The verbosity is set to verbose to +# provide more information during the build process. +core:non_interactive=True +core.download:parallel={{ os.cpu_count() }} +core.upload:parallel={{ os.cpu_count() }} +tools.build:jobs={{ (os.cpu_count() * 4/5) | int }} +tools.build:verbosity=verbose +tools.compilation:verbosity=verbose diff --git a/external/README.md b/external/README.md index 99ce2c337e..7de1fd25a0 100644 --- a/external/README.md +++ b/external/README.md @@ -1,7 +1,6 @@ # External Conan recipes -The subdirectories in this directory contain copies of external libraries used -by rippled. +The subdirectories in this directory contain external libraries used by rippled. | Folder | Upstream | Description | | :--------------- | :------------------------------------------------------------- | :------------------------------------------------------------------------------------------- | From b04d239926f265f2c7b5e1fd4a86b76d2479f322 Mon Sep 17 00:00:00 2001 From: Bart Date: Mon, 18 Aug 2025 10:49:55 -0400 Subject: [PATCH 126/244] fix: Modify jobs to use '>>' instead of 'tee' for GITHUB_OUTPUT (#5699) --- .github/workflows/build-test.yml | 2 +- .github/workflows/notify-clio.yml | 4 ++-- .github/workflows/on-trigger.yml | 12 ++++++------ 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml index f3b3374090..3fb8f057a2 100644 --- a/.github/workflows/build-test.yml +++ b/.github/workflows/build-test.yml @@ -70,7 +70,7 @@ jobs: - name: Generate strategy matrix working-directory: .github/scripts/strategy-matrix id: generate - run: python generate.py ${{ inputs.strategy_matrix_all && '--all' || '' }} --config=${{ inputs.os }}.json | tee "${GITHUB_OUTPUT}" + run: python generate.py ${{ inputs.strategy_matrix_all && '--all' || '' }} --config=${{ inputs.os }}.json >> "${GITHUB_OUTPUT}" outputs: matrix: ${{ steps.generate.outputs.matrix }} diff --git a/.github/workflows/notify-clio.yml b/.github/workflows/notify-clio.yml index 32ac01d61a..ae95d04db0 100644 --- a/.github/workflows/notify-clio.yml +++ b/.github/workflows/notify-clio.yml @@ -45,9 +45,9 @@ jobs: id: generate run: | echo 'Generating channel.' - echo channel="clio/pr_${{ github.event.pull_request.number }}" | tee "${GITHUB_OUTPUT}" + echo channel="clio/pr_${{ github.event.pull_request.number }}" >> "${GITHUB_OUTPUT}" echo 'Extracting version.' - echo version="$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" | awk -F '"' '{print $2}')" | tee "${GITHUB_OUTPUT}" + echo version="$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" | awk -F '"' '{print $2}')" >> "${GITHUB_OUTPUT}" - name: Add Conan remote run: | echo "Adding Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}." diff --git a/.github/workflows/on-trigger.yml b/.github/workflows/on-trigger.yml index a6dded7fd8..004e44c1fb 100644 --- a/.github/workflows/on-trigger.yml +++ b/.github/workflows/on-trigger.yml @@ -82,14 +82,14 @@ jobs: id: generate run: | if [[ "${{ github.event_name }}" == 'push' ]]; then - echo 'dependencies_force_build=false' | tee "${GITHUB_OUTPUT}" - echo 'dependencies_force_upload=false' | tee "${GITHUB_OUTPUT}" + echo 'dependencies_force_build=false' >> "${GITHUB_OUTPUT}" + echo 'dependencies_force_upload=false' >> "${GITHUB_OUTPUT}" elif [[ "${{ github.event_name }}" == 'schedule' ]]; then - echo 'dependencies_force_build=true' | tee "${GITHUB_OUTPUT}" - echo 'dependencies_force_upload=false' | tee "${GITHUB_OUTPUT}" + echo 'dependencies_force_build=true' >> "${GITHUB_OUTPUT}" + echo 'dependencies_force_upload=false' >> "${GITHUB_OUTPUT}" else - echo 'dependencies_force_build=${{ inputs.dependencies_force_build }}' | tee "${GITHUB_OUTPUT}" - echo 'dependencies_force_upload=${{ inputs.dependencies_force_upload }}' | tee "${GITHUB_OUTPUT}" + echo 'dependencies_force_build=${{ inputs.dependencies_force_build }}' >> "${GITHUB_OUTPUT}" + echo 'dependencies_force_upload=${{ inputs.dependencies_force_upload }}' >> "${GITHUB_OUTPUT}" fi outputs: conan_remote_name: ${{ env.CONAN_REMOTE_NAME }} From afc05659edbc22d04b13402f157832778556f7b7 Mon Sep 17 00:00:00 2001 From: Bart Date: Tue, 19 Aug 2025 12:46:38 -0400 Subject: [PATCH 127/244] fix: Adjust the CI workflows (#5700) --- .github/actions/build-deps/action.yml | 29 +++++-------- .github/actions/build-test/action.yml | 25 +++++------- .github/scripts/strategy-matrix/generate.py | 16 +++++++- .github/workflows/build-test.yml | 33 ++++++++++----- .github/workflows/notify-clio.yml | 15 ++++--- .github/workflows/on-pr.yml | 29 ++++--------- .github/workflows/on-trigger.yml | 45 +++++---------------- .github/workflows/publish-docs.yml | 2 + 8 files changed, 84 insertions(+), 110 deletions(-) diff --git a/.github/actions/build-deps/action.yml b/.github/actions/build-deps/action.yml index 12d80e859c..272af0f97d 100644 --- a/.github/actions/build-deps/action.yml +++ b/.github/actions/build-deps/action.yml @@ -3,46 +3,37 @@ # provided. name: Build Conan dependencies +# Note that actions do not support 'type' and all inputs are strings, see +# https://docs.github.com/en/actions/reference/workflows-and-actions/metadata-syntax#inputs. inputs: build_dir: description: 'The directory where to build.' required: true - type: string build_type: - description: 'The build type to use.' + description: 'The build type to use ("Debug", "Release").' required: true - type: choice - options: - - 'Debug' - - 'Release' conan_remote_name: description: 'The name of the Conan remote to use.' required: true - type: string conan_remote_url: description: 'The URL of the Conan endpoint to use.' required: true - type: string conan_remote_username: description: 'The username for logging into the Conan remote. If not provided, the dependencies will not be uploaded.' required: false - type: string default: '' conan_remote_password: description: 'The password for logging into the Conan remote. If not provided, the dependencies will not be uploaded.' required: false - type: string default: '' force_build: - description: 'Force building of all dependencies.' + description: 'Force building of all dependencies ("true", "false").' required: false - type: boolean - default: false + default: 'false' force_upload: - description: 'Force uploading of all dependencies.' + description: 'Force uploading of all dependencies ("true", "false").' required: false - type: boolean - default: false + default: 'false' runs: using: composite @@ -55,17 +46,17 @@ runs: cd ${{ inputs.build_dir }} conan install \ --output-folder . \ - --build ${{ inputs.force_build && '"*"' || 'missing' }} \ + --build ${{ inputs.force_build == 'true' && '"*"' || 'missing' }} \ --options:host '&:tests=True' \ --options:host '&:xrpld=True' \ --settings:all build_type=${{ inputs.build_type }} \ --format=json .. - name: Upload Conan dependencies - if: ${{ inputs.conan_remote_username && inputs.conan_remote_password }} + if: ${{ inputs.conan_remote_username != '' && inputs.conan_remote_password != '' }} shell: bash working-directory: ${{ inputs.build_dir }} run: | echo "Logging into Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}." conan remote login ${{ inputs.conan_remote_name }} "${{ inputs.conan_remote_username }}" --password "${{ inputs.conan_remote_password }}" echo 'Uploading dependencies.' - conan upload '*' --confirm --check ${{ inputs.force_upload && '--force' || '' }} --remote=${{ inputs.conan_remote_name }} + conan upload '*' --confirm --check ${{ inputs.force_upload == 'true' && '--force' || '' }} --remote=${{ inputs.conan_remote_name }} diff --git a/.github/actions/build-test/action.yml b/.github/actions/build-test/action.yml index 30337ddb98..fc01d81091 100644 --- a/.github/actions/build-test/action.yml +++ b/.github/actions/build-test/action.yml @@ -2,40 +2,33 @@ # already been installed (see the build-deps action). name: Build and Test +# Note that actions do not support 'type' and all inputs are strings, see +# https://docs.github.com/en/actions/reference/workflows-and-actions/metadata-syntax#inputs. inputs: build_dir: description: 'The directory where to build.' required: true - type: string + build_only: + description: 'Whether to only build or to build and test the code ("true", "false").' + required: false + default: 'false' build_type: - description: 'The build type to use.' + description: 'The build type to use ("Debug", "Release").' required: true - type: choice - options: - - 'Debug' - - 'Release' cmake_args: description: 'Additional arguments to pass to CMake.' required: false - type: string default: '' cmake_target: description: 'The CMake target to build.' required: true - type: string codecov_token: description: 'The Codecov token to use for uploading coverage reports.' required: false - type: string default: '' os: - description: 'The operating system to use for the build (linux, macos, or windows).' + description: 'The operating system to use for the build ("linux", "macos", "windows").' required: true - type: choice - options: - - 'linux' - - 'macos' - - 'windows' runs: using: composite @@ -82,7 +75,7 @@ runs: echo 'Verifying presence of instrumentation.' ./rippled --version | grep libvoidstar - name: Test the binary - if: ${{ inputs.cmake_target != 'coverage' }} + if: ${{ inputs.build_only == 'true' }} shell: bash working-directory: ${{ inputs.build_dir }}/${{ inputs.os == 'windows' && inputs.build_type || '' }} run: | diff --git a/.github/scripts/strategy-matrix/generate.py b/.github/scripts/strategy-matrix/generate.py index a5180c942d..42927b5ccd 100644 --- a/.github/scripts/strategy-matrix/generate.py +++ b/.github/scripts/strategy-matrix/generate.py @@ -9,7 +9,7 @@ Generate a strategy matrix for GitHub Actions CI. On each PR commit we will build a selection of Debian, RHEL, Ubuntu, MacOS, and Windows configurations, while upon merge into the develop, release, or master -branches, we will build all configurations. +branches, we will build all configurations, and test most of them. We will further set additional CMake arguments as follows: - All builds will have the `tests`, `werr`, and `xrpld` options. @@ -25,6 +25,13 @@ def generate_strategy_matrix(all: bool, architecture: list[dict], os: list[dict] # for Windows, but it can get overridden for certain configurations. cmake_target = 'install' if os["distro_name"] == 'windows' else 'all' + # We build and test all configurations by default, except for Windows in + # Debug, because it is too slow, as well as when code coverage is + # enabled as that mode already runs the tests. + build_only = False + if os['distro_name'] == 'windows' and build_type == 'Debug': + build_only = True + # Only generate a subset of configurations in PRs. if not all: # Debian: @@ -46,6 +53,7 @@ def generate_strategy_matrix(all: bool, architecture: list[dict], os: list[dict] if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-15' and build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/amd64': cmake_args = f'{cmake_args} -Dcoverage=ON -Dcoverage_format=xml -DCODE_COVERAGE_VERBOSE=ON -DCMAKE_C_FLAGS=-O0 -DCMAKE_CXX_FLAGS=-O0' cmake_target = 'coverage' + build_only = True skip = False if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-16' and build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/arm64': cmake_args = f'{cmake_args} -Dvoidstar=ON' @@ -110,6 +118,11 @@ def generate_strategy_matrix(all: bool, architecture: list[dict], os: list[dict] if build_type == 'Release': cmake_args = f'{cmake_args} -Dassert=ON' + # We skip all RHEL on arm64 due to a build failure that needs further + # investigation. + if os['distro_name'] == 'rhel' and architecture['platform'] == 'linux/arm64': + continue + # Generate a unique name for the configuration, e.g. macos-arm64-debug # or debian-bookworm-gcc-12-amd64-release-unity. config_name = os['distro_name'] @@ -128,6 +141,7 @@ def generate_strategy_matrix(all: bool, architecture: list[dict], os: list[dict] 'architecture': architecture, 'os': os, 'build_type': build_type, + 'build_only': 'true' if build_only else 'false', 'cmake_args': cmake_args, 'cmake_target': cmake_target, 'config_name': config_name, diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml index 3fb8f057a2..a330f5decb 100644 --- a/.github/workflows/build-test.yml +++ b/.github/workflows/build-test.yml @@ -1,7 +1,10 @@ # This workflow builds and tests the binary for various configurations. name: Build and test -# This workflow can only be triggered by other workflows. +# This workflow can only be triggered by other workflows. Note that the +# workflow_call event does not support the 'choice' input type, see +# https://docs.github.com/en/actions/reference/workflows-and-actions/workflow-syntax#onworkflow_callinputsinput_idtype, +# so we use 'string' instead. on: workflow_call: inputs: @@ -29,14 +32,15 @@ on: type: boolean default: false os: - description: 'The operating system to use for the build (linux, macos, or windows).' + description: 'The operating system to use for the build ("linux", "macos", "windows").' required: true type: string - strategy_matrix_all: - description: 'Generate a strategy matrix containing all configurations.' + strategy_matrix: + # TODO: Support additional strategies, e.g. "ubuntu" for generating all Ubuntu configurations. + description: 'The strategy matrix to use for generating the configurations ("minimal", "all").' required: false - type: boolean - default: false + type: string + default: 'minimal' secrets: codecov_token: description: 'The Codecov token to use for uploading coverage reports.' @@ -70,7 +74,7 @@ jobs: - name: Generate strategy matrix working-directory: .github/scripts/strategy-matrix id: generate - run: python generate.py ${{ inputs.strategy_matrix_all && '--all' || '' }} --config=${{ inputs.os }}.json >> "${GITHUB_OUTPUT}" + run: python generate.py ${{ inputs.strategy_matrix == 'all' && '--all' || '' }} --config=${{ inputs.os }}.json >> "${GITHUB_OUTPUT}" outputs: matrix: ${{ steps.generate.outputs.matrix }} @@ -101,9 +105,11 @@ jobs: uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: python-version: 3.13 - - name: Install Conan (Windows) + - name: Install build tools (Windows) if: ${{ inputs.os == 'windows' }} - run: pip install wheel conan + run: | + echo 'Installing build tools.' + pip install wheel conan - name: Check configuration (Windows) if: ${{ inputs.os == 'windows' }} run: | @@ -115,6 +121,11 @@ jobs: echo 'Checking Conan version.' conan --version + - name: Install build tools (MacOS) + if: ${{ inputs.os == 'macos' }} + run: | + echo 'Installing build tools.' + brew install cmake conan ninja coreutils - name: Check configuration (Linux and MacOS) if: ${{ inputs.os == 'linux' || inputs.os == 'macos' }} run: | @@ -135,6 +146,9 @@ jobs: echo 'Checking Ninja version.' ninja --version + + echo 'Checking nproc version.' + nproc --version - name: Set up Conan home directory (MacOS) if: ${{ inputs.os == 'macos' }} run: | @@ -184,6 +198,7 @@ jobs: uses: ./.github/actions/build-test with: build_dir: ${{ inputs.build_dir }} + build_only: ${{ matrix.build_only }} build_type: ${{ matrix.build_type }} cmake_args: ${{ matrix.cmake_args }} cmake_target: ${{ matrix.cmake_target }} diff --git a/.github/workflows/notify-clio.yml b/.github/workflows/notify-clio.yml index ae95d04db0..6ccf527ea6 100644 --- a/.github/workflows/notify-clio.yml +++ b/.github/workflows/notify-clio.yml @@ -44,10 +44,11 @@ jobs: - name: Generate outputs id: generate run: | - echo 'Generating channel.' - echo channel="clio/pr_${{ github.event.pull_request.number }}" >> "${GITHUB_OUTPUT}" + echo 'Generating user and channel.' + echo "user=clio" >> "${GITHUB_OUTPUT}" + echo "channel=pr_${{ github.event.pull_request.number }}" >> "${GITHUB_OUTPUT}" echo 'Extracting version.' - echo version="$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" | awk -F '"' '{print $2}')" >> "${GITHUB_OUTPUT}" + echo "version=$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" | awk -F '"' '{print $2}')" >> "${GITHUB_OUTPUT}" - name: Add Conan remote run: | echo "Adding Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}." @@ -58,10 +59,8 @@ jobs: run: conan remote login ${{ inputs.conan_remote_name }} "${{ secrets.conan_remote_username }}" --password "${{ secrets.conan_remote_password }}" - name: Upload package run: | - echo 'Exporting package to channel ${{ steps.generate.outputs.channel }}.' - conan export --channel=${{ steps.generate.outputs.channel }} . - echo 'Uploading package version ${{ steps.generate.outputs.version }} on channel ${{ steps.generate.outputs.channel }}.' - conan upload --confirm --check --remote=${{ inputs.conan_remote_name }} xrpl/${{ steps.generate.outputs.version }}@${{ steps.generate.outputs.channel }} + conan export --user=${{ steps.generate.outputs.user }} --channel=${{ steps.generate.outputs.channel }} . + conan upload --confirm --check --remote=${{ inputs.conan_remote_name }} xrpl/${{ steps.generate.outputs.version }}@${{ steps.generate.outputs.user }}/${{ steps.generate.outputs.channel }} outputs: channel: ${{ steps.generate.outputs.channel }} version: ${{ steps.generate.outputs.version }} @@ -76,5 +75,5 @@ jobs: run: | gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \ /repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \ - -F "client_payload[version]=${{ needs.upload.outputs.version }}@${{ needs.upload.outputs.channel }}" \ + -F "client_payload[version]=${{ needs.upload.outputs.version }}@${{ needs.upload.outputs.user }}/${{ needs.upload.outputs.channel }}" \ -F "client_payload[pr]=${{ github.event.pull_request.number }}" diff --git a/.github/workflows/on-pr.yml b/.github/workflows/on-pr.yml index 10bc61797a..9d7bbbf89c 100644 --- a/.github/workflows/on-pr.yml +++ b/.github/workflows/on-pr.yml @@ -71,7 +71,7 @@ jobs: - name: No-op run: echo '' - check-clang-format: + check-format: needs: should-run uses: ./.github/workflows/check-format.yml @@ -91,38 +91,23 @@ jobs: conan_remote_name: ${{ env.CONAN_REMOTE_NAME }} conan_remote_url: ${{ env.CONAN_REMOTE_URL }} - build-linux: + build-test: needs: generate-outputs uses: ./.github/workflows/build-test.yml + strategy: + matrix: + os: [linux, macos, windows] with: conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }} conan_remote_url: ${{ needs.generate-outputs.outputs.conan_remote_url }} - os: 'linux' + os: ${{ matrix.os }} secrets: codecov_token: ${{ secrets.CODECOV_TOKEN }} - build-macos: - needs: generate-outputs - uses: ./.github/workflows/build-test.yml - with: - conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }} - conan_remote_url: ${{ needs.generate-outputs.outputs.conan_remote_url }} - os: 'macos' - - build-windows: - needs: generate-outputs - uses: ./.github/workflows/build-test.yml - with: - conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }} - conan_remote_url: ${{ needs.generate-outputs.outputs.conan_remote_url }} - os: 'windows' - notify-clio: needs: - generate-outputs - - build-linux - - build-macos - - build-windows + - build-test uses: ./.github/workflows/notify-clio.yml with: conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }} diff --git a/.github/workflows/on-trigger.yml b/.github/workflows/on-trigger.yml index 004e44c1fb..ed9a794985 100644 --- a/.github/workflows/on-trigger.yml +++ b/.github/workflows/on-trigger.yml @@ -81,10 +81,10 @@ jobs: - name: Check inputs and set outputs id: generate run: | - if [[ "${{ github.event_name }}" == 'push' ]]; then + if [[ '${{ github.event_name }}' == 'push' ]]; then echo 'dependencies_force_build=false' >> "${GITHUB_OUTPUT}" echo 'dependencies_force_upload=false' >> "${GITHUB_OUTPUT}" - elif [[ "${{ github.event_name }}" == 'schedule' ]]; then + elif [[ '${{ github.event_name }}' == 'schedule' ]]; then echo 'dependencies_force_build=true' >> "${GITHUB_OUTPUT}" echo 'dependencies_force_upload=false' >> "${GITHUB_OUTPUT}" else @@ -97,44 +97,19 @@ jobs: dependencies_force_build: ${{ steps.generate.outputs.dependencies_force_build }} dependencies_force_upload: ${{ steps.generate.outputs.dependencies_force_upload }} - build-linux: + build-test: needs: generate-outputs uses: ./.github/workflows/build-test.yml + strategy: + matrix: + os: [linux, macos, windows] with: conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }} conan_remote_url: ${{ needs.generate-outputs.outputs.conan_remote_url }} - dependencies_force_build: ${{ needs.generate-outputs.outputs.dependencies_force_build }} - dependencies_force_upload: ${{ needs.generate-outputs.outputs.dependencies_force_upload }} - os: 'linux' - strategy_matrix_all: true - secrets: - conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }} - conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }} - - build-macos: - needs: generate-outputs - uses: ./.github/workflows/build-test.yml - with: - conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }} - conan_remote_url: ${{ needs.generate-outputs.outputs.conan_remote_url }} - dependencies_force_build: ${{ needs.generate-outputs.outputs.dependencies_force_build }} - dependencies_force_upload: ${{ needs.generate-outputs.outputs.dependencies_force_upload }} - os: 'macos' - strategy_matrix_all: true - secrets: - conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }} - conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }} - - build-windows: - needs: generate-outputs - uses: ./.github/workflows/build-test.yml - with: - conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }} - conan_remote_url: ${{ needs.generate-outputs.outputs.conan_remote_url }} - dependencies_force_build: ${{ needs.generate-outputs.outputs.dependencies_force_build }} - dependencies_force_upload: ${{ needs.generate-outputs.outputs.dependencies_force_upload }} - os: 'windows' - strategy_matrix_all: true + dependencies_force_build: ${{ needs.generate-outputs.outputs.dependencies_force_build == 'true' }} + dependencies_force_upload: ${{ needs.generate-outputs.outputs.dependencies_force_upload == 'true' }} + os: ${{ matrix.os }} + strategy_matrix: 'all' secrets: conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }} conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }} diff --git a/.github/workflows/publish-docs.yml b/.github/workflows/publish-docs.yml index a2c24bd076..509644e6b5 100644 --- a/.github/workflows/publish-docs.yml +++ b/.github/workflows/publish-docs.yml @@ -28,6 +28,8 @@ jobs: publish: runs-on: ubuntu-latest container: ghcr.io/xrplf/ci/tools-rippled-documentation + permissions: + contents: write steps: - name: Checkout repository uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 From 56c1e078f2367da8d5c42b6ef1b7ffe0d0da6bec Mon Sep 17 00:00:00 2001 From: Bart Date: Wed, 20 Aug 2025 19:25:40 -0400 Subject: [PATCH 128/244] fix: Correctly check for build_only when deciding whether to run tests (#5708) This change modifies the `build_only` check used to determine whether to run tests. For easier debugging in the future it also prints out the contents of the strategy matrix. --- .github/actions/build-test/action.yml | 2 +- .github/workflows/build-test.yml | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/.github/actions/build-test/action.yml b/.github/actions/build-test/action.yml index fc01d81091..44292e7318 100644 --- a/.github/actions/build-test/action.yml +++ b/.github/actions/build-test/action.yml @@ -75,7 +75,7 @@ runs: echo 'Verifying presence of instrumentation.' ./rippled --version | grep libvoidstar - name: Test the binary - if: ${{ inputs.build_only == 'true' }} + if: ${{ inputs.build_only == 'false' }} shell: bash working-directory: ${{ inputs.build_dir }}/${{ inputs.os == 'windows' && inputs.build_type || '' }} run: | diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml index a330f5decb..2fa557f671 100644 --- a/.github/workflows/build-test.yml +++ b/.github/workflows/build-test.yml @@ -88,6 +88,19 @@ jobs: runs-on: ${{ matrix.architecture.runner }} container: ${{ inputs.os == 'linux' && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version) || null }} steps: + - name: Check strategy matrix + run: | + echo 'Operating system distro name: ${{ matrix.os.distro_name }}' + echo 'Operating system distro version: ${{ matrix.os.distro_version }}' + echo 'Operating system compiler name: ${{ matrix.os.compiler_name }}' + echo 'Operating system compiler version: ${{ matrix.os.compiler_version }}' + echo 'Architecture platform: ${{ matrix.architecture.platform }}' + echo 'Architecture runner: ${{ toJson(matrix.architecture.runner) }}' + echo 'Build type: ${{ matrix.build_type }}' + echo 'Build only: ${{ matrix.build_only }}' + echo 'CMake arguments: ${{ matrix.cmake_args }}' + echo 'CMake target: ${{ matrix.cmake_target }}' + echo 'Config name: ${{ matrix.config_name }}' - name: Clean workspace (MacOS) if: ${{ inputs.os == 'macos' }} run: | From f847e3287cf50a6d31d6b31e8e7db3e57bf3aa8d Mon Sep 17 00:00:00 2001 From: Bart Date: Thu, 21 Aug 2025 07:41:00 -0400 Subject: [PATCH 129/244] Update Conan dependencies: OpenSSL (#5617) This change updates OpenSSL from 1.1.1w to 3.5.2. The code works as-is, but many functions have been marked as deprecated and thus will need to be rewritten. For now we explicitly add the `-DOPENSSL_SUPPRESS_DEPRECATED` to give us time to do so, while providing us with the benefits of the updated version. --- cmake/RippledCompiler.cmake | 17 ++++++++++------- conanfile.py | 2 +- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/cmake/RippledCompiler.cmake b/cmake/RippledCompiler.cmake index bc3a62a48c..4d16222cbe 100644 --- a/cmake/RippledCompiler.cmake +++ b/cmake/RippledCompiler.cmake @@ -16,13 +16,16 @@ set(CMAKE_CXX_EXTENSIONS OFF) target_compile_definitions (common INTERFACE $<$:DEBUG _DEBUG> - $<$,$>>:NDEBUG>) - # ^^^^ NOTE: CMAKE release builds already have NDEBUG - # defined, so no need to add it explicitly except for - # this special case of (profile ON) and (assert OFF) - # -- presumably this is because we don't want profile - # builds asserting unless asserts were specifically - # requested + #[===[ + NOTE: CMAKE release builds already have NDEBUG defined, so no need to add it + explicitly except for the special case of (profile ON) and (assert OFF). + Presumably this is because we don't want profile builds asserting unless + asserts were specifically requested. + ]===] + $<$,$>>:NDEBUG> + # TODO: Remove once we have migrated functions from OpenSSL 1.x to 3.x. + OPENSSL_SUPPRESS_DEPRECATED +) if (MSVC) # remove existing exception flag since we set it to -EHa diff --git a/conanfile.py b/conanfile.py index ab4657277c..da99836157 100644 --- a/conanfile.py +++ b/conanfile.py @@ -27,7 +27,7 @@ class Xrpl(ConanFile): 'grpc/1.50.1', 'libarchive/3.8.1', 'nudb/2.0.9', - 'openssl/1.1.1w', + 'openssl/3.5.2', 'soci/4.0.3', 'zlib/1.3.1', ] From b13370ac0d207217354f1fc1c29aef87769fb8a1 Mon Sep 17 00:00:00 2001 From: Bart Date: Thu, 21 Aug 2025 15:22:25 -0400 Subject: [PATCH 130/244] chore: Reverts formatting changes to external files, adds formatting changes to proto files (#5711) This change reverts the formatting applied to external files and adds formatting of proto files. As clang-format will complain if a proto file is modified or moved, since the .clang-format file does not explicitly contain a section for proto files, the change has been included in this PR as well. --- .clang-format | 5 + external/{antithesis-sdk => }/.clang-format | 2 +- external/ed25519-donna/README.md | 97 +- external/ed25519-donna/fuzz/README.md | 99 +- external/secp256k1/CHANGELOG.md | 144 +- external/secp256k1/CMakePresets.json | 6 +- external/secp256k1/CONTRIBUTING.md | 74 +- external/secp256k1/README.md | 130 +- external/secp256k1/SECURITY.md | 10 +- external/secp256k1/doc/ellswift.md | 410 +- external/secp256k1/doc/musig.md | 3 +- external/secp256k1/doc/release-process.md | 40 +- .../secp256k1/doc/safegcd_implementation.md | 301 +- .../ecdsa_secp256k1_sha256_bitcoin_test.json | 8086 ++++++++--------- .../proto/org/xrpl/rpc/v1/get_ledger.proto | 114 +- .../org/xrpl/rpc/v1/get_ledger_data.proto | 63 +- .../org/xrpl/rpc/v1/get_ledger_diff.proto | 30 +- .../org/xrpl/rpc/v1/get_ledger_entry.proto | 32 +- .../xrpl/proto/org/xrpl/rpc/v1/ledger.proto | 88 +- .../proto/org/xrpl/rpc/v1/xrp_ledger.proto | 3 - include/xrpl/proto/ripple.proto | 500 +- 21 files changed, 5056 insertions(+), 5181 deletions(-) rename external/{antithesis-sdk => }/.clang-format (54%) diff --git a/.clang-format b/.clang-format index cfd991e64b..9c3820a6bf 100644 --- a/.clang-format +++ b/.clang-format @@ -100,3 +100,8 @@ Language: JavaScript --- Language: Json IndentWidth: 2 +--- +Language: Proto +BasedOnStyle: Google +ColumnLimit: 0 +IndentWidth: 2 diff --git a/external/antithesis-sdk/.clang-format b/external/.clang-format similarity index 54% rename from external/antithesis-sdk/.clang-format rename to external/.clang-format index e871ed18b4..a5121ff074 100644 --- a/external/antithesis-sdk/.clang-format +++ b/external/.clang-format @@ -1,3 +1,3 @@ --- DisableFormat: true -SortIncludes: false +SortIncludes: Never diff --git a/external/ed25519-donna/README.md b/external/ed25519-donna/README.md index 31b2431632..e09fc27e31 100644 --- a/external/ed25519-donna/README.md +++ b/external/ed25519-donna/README.md @@ -1,12 +1,12 @@ -[ed25519](http://ed25519.cr.yp.to/) is an -[Elliptic Curve Digital Signature Algortithm](http://en.wikipedia.org/wiki/Elliptic_Curve_DSA), -developed by [Dan Bernstein](http://cr.yp.to/djb.html), -[Niels Duif](http://www.nielsduif.nl/), -[Tanja Lange](http://hyperelliptic.org/tanja), -[Peter Schwabe](http://www.cryptojedi.org/users/peter/), +[ed25519](http://ed25519.cr.yp.to/) is an +[Elliptic Curve Digital Signature Algortithm](http://en.wikipedia.org/wiki/Elliptic_Curve_DSA), +developed by [Dan Bernstein](http://cr.yp.to/djb.html), +[Niels Duif](http://www.nielsduif.nl/), +[Tanja Lange](http://hyperelliptic.org/tanja), +[Peter Schwabe](http://www.cryptojedi.org/users/peter/), and [Bo-Yin Yang](http://www.iis.sinica.edu.tw/pages/byyang/). -This project provides performant, portable 32-bit & 64-bit implementations. All implementations are +This project provides performant, portable 32-bit & 64-bit implementations. All implementations are of course constant time in regard to secret data. #### Performance @@ -52,35 +52,35 @@ are made. #### Compilation -No configuration is needed **if you are compiling against OpenSSL**. +No configuration is needed **if you are compiling against OpenSSL**. ##### Hash Options If you are not compiling aginst OpenSSL, you will need a hash function. -To use a simple/**slow** implementation of SHA-512, use `-DED25519_REFHASH` when compiling `ed25519.c`. +To use a simple/**slow** implementation of SHA-512, use `-DED25519_REFHASH` when compiling `ed25519.c`. This should never be used except to verify the code works when OpenSSL is not available. -To use a custom hash function, use `-DED25519_CUSTOMHASH` when compiling `ed25519.c` and put your +To use a custom hash function, use `-DED25519_CUSTOMHASH` when compiling `ed25519.c` and put your custom hash implementation in ed25519-hash-custom.h. The hash must have a 512bit digest and implement - struct ed25519_hash_context; + struct ed25519_hash_context; - void ed25519_hash_init(ed25519_hash_context *ctx); - void ed25519_hash_update(ed25519_hash_context *ctx, const uint8_t *in, size_t inlen); - void ed25519_hash_final(ed25519_hash_context *ctx, uint8_t *hash); - void ed25519_hash(uint8_t *hash, const uint8_t *in, size_t inlen); + void ed25519_hash_init(ed25519_hash_context *ctx); + void ed25519_hash_update(ed25519_hash_context *ctx, const uint8_t *in, size_t inlen); + void ed25519_hash_final(ed25519_hash_context *ctx, uint8_t *hash); + void ed25519_hash(uint8_t *hash, const uint8_t *in, size_t inlen); ##### Random Options If you are not compiling aginst OpenSSL, you will need a random function for batch verification. -To use a custom random function, use `-DED25519_CUSTOMRANDOM` when compiling `ed25519.c` and put your +To use a custom random function, use `-DED25519_CUSTOMRANDOM` when compiling `ed25519.c` and put your custom hash implementation in ed25519-randombytes-custom.h. The random function must implement: - void ED25519_FN(ed25519_randombytes_unsafe) (void *p, size_t len); + void ED25519_FN(ed25519_randombytes_unsafe) (void *p, size_t len); -Use `-DED25519_TEST` when compiling `ed25519.c` to use a deterministically seeded, non-thread safe CSPRNG +Use `-DED25519_TEST` when compiling `ed25519.c` to use a deterministically seeded, non-thread safe CSPRNG variant of Bob Jenkins [ISAAC](http://en.wikipedia.org/wiki/ISAAC_%28cipher%29) ##### Minor options @@ -91,79 +91,80 @@ Use `-DED25519_FORCE_32BIT` to force the use of 32 bit routines even when compil ##### 32-bit - gcc ed25519.c -m32 -O3 -c + gcc ed25519.c -m32 -O3 -c ##### 64-bit - gcc ed25519.c -m64 -O3 -c + gcc ed25519.c -m64 -O3 -c ##### SSE2 - gcc ed25519.c -m32 -O3 -c -DED25519_SSE2 -msse2 - gcc ed25519.c -m64 -O3 -c -DED25519_SSE2 + gcc ed25519.c -m32 -O3 -c -DED25519_SSE2 -msse2 + gcc ed25519.c -m64 -O3 -c -DED25519_SSE2 clang and icc are also supported + #### Usage To use the code, link against `ed25519.o -mbits` and: - #include "ed25519.h" + #include "ed25519.h" Add `-lssl -lcrypto` when using OpenSSL (Some systems don't need -lcrypto? It might be trial and error). To generate a private key, simply generate 32 bytes from a secure cryptographic source: - ed25519_secret_key sk; - randombytes(sk, sizeof(ed25519_secret_key)); + ed25519_secret_key sk; + randombytes(sk, sizeof(ed25519_secret_key)); To generate a public key: - ed25519_public_key pk; - ed25519_publickey(sk, pk); + ed25519_public_key pk; + ed25519_publickey(sk, pk); To sign a message: - ed25519_signature sig; - ed25519_sign(message, message_len, sk, pk, signature); + ed25519_signature sig; + ed25519_sign(message, message_len, sk, pk, signature); To verify a signature: - int valid = ed25519_sign_open(message, message_len, pk, signature) == 0; + int valid = ed25519_sign_open(message, message_len, pk, signature) == 0; To batch verify signatures: - const unsigned char *mp[num] = {message1, message2..} - size_t ml[num] = {message_len1, message_len2..} - const unsigned char *pkp[num] = {pk1, pk2..} - const unsigned char *sigp[num] = {signature1, signature2..} - int valid[num] + const unsigned char *mp[num] = {message1, message2..} + size_t ml[num] = {message_len1, message_len2..} + const unsigned char *pkp[num] = {pk1, pk2..} + const unsigned char *sigp[num] = {signature1, signature2..} + int valid[num] - /* valid[i] will be set to 1 if the individual signature was valid, 0 otherwise */ - int all_valid = ed25519_sign_open_batch(mp, ml, pkp, sigp, num, valid) == 0; + /* valid[i] will be set to 1 if the individual signature was valid, 0 otherwise */ + int all_valid = ed25519_sign_open_batch(mp, ml, pkp, sigp, num, valid) == 0; -**Note**: Batch verification uses `ed25519_randombytes_unsafe`, implemented in -`ed25519-randombytes.h`, to generate random scalars for the verification code. +**Note**: Batch verification uses `ed25519_randombytes_unsafe`, implemented in +`ed25519-randombytes.h`, to generate random scalars for the verification code. The default implementation now uses OpenSSLs `RAND_bytes`. Unlike the [SUPERCOP](http://bench.cr.yp.to/supercop.html) version, signatures are -not appended to messages, and there is no need for padding in front of messages. -Additionally, the secret key does not contain a copy of the public key, so it is +not appended to messages, and there is no need for padding in front of messages. +Additionally, the secret key does not contain a copy of the public key, so it is 32 bytes instead of 64 bytes, and the public key must be provided to the signing function. ##### Curve25519 -Curve25519 public keys can be generated thanks to -[Adam Langley](http://www.imperialviolet.org/2013/05/10/fastercurve25519.html) +Curve25519 public keys can be generated thanks to +[Adam Langley](http://www.imperialviolet.org/2013/05/10/fastercurve25519.html) leveraging Ed25519's precomputed basepoint scalar multiplication. - curved25519_key sk, pk; - randombytes(sk, sizeof(curved25519_key)); - curved25519_scalarmult_basepoint(pk, sk); + curved25519_key sk, pk; + randombytes(sk, sizeof(curved25519_key)); + curved25519_scalarmult_basepoint(pk, sk); -Note the name is curved25519, a combination of curve and ed25519, to prevent +Note the name is curved25519, a combination of curve and ed25519, to prevent name clashes. Performance is slightly faster than short message ed25519 signing due to both using the same code for the scalar multiply. @@ -179,4 +180,4 @@ with extreme values to ensure they function correctly. SSE2 is now supported. #### Papers -[Available on the Ed25519 website](http://ed25519.cr.yp.to/papers.html) +[Available on the Ed25519 website](http://ed25519.cr.yp.to/papers.html) \ No newline at end of file diff --git a/external/ed25519-donna/fuzz/README.md b/external/ed25519-donna/fuzz/README.md index 0a5cd49177..306ddfe08c 100644 --- a/external/ed25519-donna/fuzz/README.md +++ b/external/ed25519-donna/fuzz/README.md @@ -1,78 +1,78 @@ This code fuzzes ed25519-donna (and optionally ed25519-donna-sse2) against the ref10 implementations of -[curve25519](https://github.com/floodyberry/supercop/tree/master/crypto_scalarmult/curve25519/ref10) and +[curve25519](https://github.com/floodyberry/supercop/tree/master/crypto_scalarmult/curve25519/ref10) and [ed25519](https://github.com/floodyberry/supercop/tree/master/crypto_sign/ed25519/ref10). Curve25519 tests that generating a public key from a secret key # Building -## \*nix + PHP +## *nix + PHP `php build-nix.php (required parameters) (optional parameters)` Required parameters: -- `--function=[curve25519,ed25519]` -- `--bits=[32,64]` +* `--function=[curve25519,ed25519]` +* `--bits=[32,64]` Optional parameters: -- `--with-sse2` +* `--with-sse2` - Also fuzz against ed25519-donna-sse2 + Also fuzz against ed25519-donna-sse2 +* `--with-openssl` -- `--with-openssl` + Build with OpenSSL's SHA-512. - Build with OpenSSL's SHA-512. + Default: Reference SHA-512 implementation (slow!) - Default: Reference SHA-512 implementation (slow!) +* `--compiler=[gcc,clang,icc]` -- `--compiler=[gcc,clang,icc]` + Default: gcc - Default: gcc +* `--no-asm` -- `--no-asm` + Do not use platform specific assembler - Do not use platform specific assembler example: - - php build-nix.php --bits=64 --function=ed25519 --with-sse2 --compiler=icc + + php build-nix.php --bits=64 --function=ed25519 --with-sse2 --compiler=icc ## Windows Create a project with access to the ed25519 files. -If you are not using OpenSSL, add the `ED25519_REFHASH` define to the projects +If you are not using OpenSSL, add the `ED25519_REFHASH` define to the projects "Properties/Preprocessor/Preprocessor Definitions" option Add the following files to the project: -- `fuzz/curve25519-ref10.c` -- `fuzz/ed25519-ref10.c` -- `fuzz/ed25519-donna.c` -- `fuzz/ed25519-donna-sse2.c` (optional) -- `fuzz-[curve25519/ed25519].c` (depending on which you want to fuzz) +* `fuzz/curve25519-ref10.c` +* `fuzz/ed25519-ref10.c` +* `fuzz/ed25519-donna.c` +* `fuzz/ed25519-donna-sse2.c` (optional) +* `fuzz-[curve25519/ed25519].c` (depending on which you want to fuzz) -If you are also fuzzing against ed25519-donna-sse2, add the `ED25519_SSE2` define for `fuzz-[curve25519/ed25519].c` under +If you are also fuzzing against ed25519-donna-sse2, add the `ED25519_SSE2` define for `fuzz-[curve25519/ed25519].c` under its "Properties/Preprocessor/Preprocessor Definitions" option. # Running -If everything agrees, the program will only output occasional status dots (every 0x1000 passes) +If everything agrees, the program will only output occasional status dots (every 0x1000 passes) and a 64bit progress count (every 0x20000 passes): fuzzing: ref10 curved25519 curved25519-sse2 - + ................................ [0000000000020000] ................................ [0000000000040000] ................................ [0000000000060000] ................................ [0000000000080000] ................................ [00000000000a0000] ................................ [00000000000c0000] - + If any of the implementations do not agree with the ref10 implementation, the program will dump -the random data that was used, the data generated by the ref10 implementation, and diffs of the +the random data that was used, the data generated by the ref10 implementation, and diffs of the ed25519-donna data against the ref10 data. ## Example errors @@ -83,21 +83,21 @@ These are example error dumps (with intentionally introduced errors). Random data: -- sk, or Secret Key -- m, or Message +* sk, or Secret Key +* m, or Message Generated data: -- pk, or Public Key -- sig, or Signature -- valid, or if the signature of the message is valid with the public key +* pk, or Public Key +* sig, or Signature +* valid, or if the signature of the message is valid with the public key Dump: sk: 0x3b,0xb7,0x17,0x7a,0x66,0xdc,0xb7,0x9a,0x90,0x25,0x07,0x99,0x96,0xf3,0x92,0xef, 0x78,0xf8,0xad,0x6c,0x35,0x87,0x81,0x67,0x03,0xe6,0x95,0xba,0x06,0x18,0x7c,0x9c, - + m: 0x7c,0x8d,0x3d,0xe1,0x92,0xee,0x7a,0xb8,0x4d,0xc9,0xfb,0x02,0x34,0x1e,0x5a,0x91, 0xee,0x01,0xa6,0xb8,0xab,0x37,0x3f,0x3d,0x6d,0xa2,0x47,0xe3,0x27,0x93,0x7c,0xb7, @@ -107,66 +107,67 @@ Dump: 0x63,0x14,0xe0,0x81,0x52,0xec,0xcd,0xcf,0x70,0x54,0x7d,0xa3,0x49,0x8b,0xf0,0x89, 0x70,0x07,0x12,0x2a,0xd9,0xaa,0x16,0x01,0xb2,0x16,0x3a,0xbb,0xfc,0xfa,0x13,0x5b, 0x69,0x83,0x92,0x70,0x95,0x76,0xa0,0x8e,0x16,0x79,0xcc,0xaa,0xb5,0x7c,0xf8,0x7a, - + ref10: pk: 0x71,0xb0,0x5e,0x62,0x1b,0xe3,0xe7,0x36,0x91,0x8b,0xc0,0x13,0x36,0x0c,0xc9,0x04, 0x16,0xf5,0xff,0x48,0x0c,0x83,0x6b,0x88,0x53,0xa2,0xc6,0x0f,0xf7,0xac,0x42,0x04, - + sig: 0x3e,0x05,0xc5,0x37,0x16,0x0b,0x29,0x30,0x89,0xa3,0xe7,0x83,0x08,0x16,0xdd,0x96, 0x02,0xfa,0x0d,0x44,0x2c,0x43,0xaa,0x80,0x93,0x04,0x58,0x22,0x09,0xbf,0x11,0xa5, 0xcc,0xa5,0x3c,0x9f,0xa0,0xa4,0x64,0x5a,0x4a,0xdb,0x20,0xfb,0xc7,0x9b,0xfd,0x3f, 0x08,0xae,0xc4,0x3c,0x1e,0xd8,0xb6,0xb4,0xd2,0x6d,0x80,0x92,0xcb,0x71,0xf3,0x02, - + valid: yes - + ed25519-donna: pk diff: ____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____, ____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____, - + sig diff: 0x2c,0xb9,0x25,0x14,0xd0,0x94,0xeb,0xfe,0x46,0x02,0xc2,0xe8,0xa3,0xeb,0xbf,0xb5, 0x72,0x84,0xbf,0xc1,0x8a,0x32,0x30,0x99,0xf7,0x58,0xfe,0x06,0xa8,0xdc,0xdc,0xab, 0xb5,0x57,0x03,0x33,0x87,0xce,0x54,0x55,0x6a,0x69,0x8a,0xc4,0xb7,0x2a,0xed,0x97, 0xb4,0x68,0xe7,0x52,0x7a,0x07,0x55,0x3b,0xa2,0x94,0xd6,0x5e,0xa1,0x61,0x80,0x08, - + valid: no -In this case, the generated public key matches, but the generated signature is completely +In this case, the generated public key matches, but the generated signature is completely different and does not validate. ### Curve25519 Random data: -- sk, or Secret Key +* sk, or Secret Key Generated data: -- pk, or Public Key +* pk, or Public Key Dump: sk: 0x44,0xec,0x0b,0x0e,0xa2,0x0e,0x9c,0x5b,0x8c,0xce,0x7b,0x1d,0x68,0xae,0x0f,0x9e, 0x81,0xe2,0x04,0x76,0xda,0x87,0xa4,0x9e,0xc9,0x4f,0x3b,0xf9,0xc3,0x89,0x63,0x70, - - + + ref10: 0x24,0x55,0x55,0xc0,0xf9,0x80,0xaf,0x02,0x43,0xee,0x8c,0x7f,0xc1,0xad,0x90,0x95, 0x57,0x91,0x14,0x2e,0xf2,0x14,0x22,0x80,0xdd,0x4e,0x3c,0x85,0x71,0x84,0x8c,0x62, - - + + curved25519 diff: 0x12,0xd1,0x61,0x2b,0x16,0xb3,0xd8,0x29,0xf8,0xa3,0xba,0x70,0x4e,0x49,0x4f,0x43, 0xa1,0x3c,0x6b,0x42,0x11,0x61,0xcc,0x30,0x87,0x73,0x46,0xfb,0x85,0xc7,0x9a,0x35, - - + + curved25519-sse2 diff: ____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____, ____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____, -In this case, curved25519 is totally wrong, while curved25519-sse2 matches the reference -implementation. + +In this case, curved25519 is totally wrong, while curved25519-sse2 matches the reference +implementation. \ No newline at end of file diff --git a/external/secp256k1/CHANGELOG.md b/external/secp256k1/CHANGELOG.md index a000672887..ee447c0c1c 100644 --- a/external/secp256k1/CHANGELOG.md +++ b/external/secp256k1/CHANGELOG.md @@ -8,189 +8,153 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [0.6.0] - 2024-11-04 #### Added - -- New module `musig` implements the MuSig2 multisignature scheme according to the [BIP 327 specification](https://github.com/bitcoin/bips/blob/master/bip-0327.mediawiki). See: - - Header file `include/secp256k1_musig.h` which defines the new API. - - Document `doc/musig.md` for further notes on API usage. - - Usage example `examples/musig.c`. -- New CMake variable `SECP256K1_APPEND_LDFLAGS` for appending linker flags to the build command. + - New module `musig` implements the MuSig2 multisignature scheme according to the [BIP 327 specification](https://github.com/bitcoin/bips/blob/master/bip-0327.mediawiki). See: + - Header file `include/secp256k1_musig.h` which defines the new API. + - Document `doc/musig.md` for further notes on API usage. + - Usage example `examples/musig.c`. + - New CMake variable `SECP256K1_APPEND_LDFLAGS` for appending linker flags to the build command. #### Changed - -- API functions now use a significantly more robust method to clear secrets from the stack before returning. However, secret clearing remains a best-effort security measure and cannot guarantee complete removal. -- Any type `secp256k1_foo` can now be forward-declared using `typedef struct secp256k1_foo secp256k1_foo;` (or also `struct secp256k1_foo;` in C++). -- Organized CMake build artifacts into dedicated directories (`bin/` for executables, `lib/` for libraries) to improve build output structure and Windows shared library compatibility. + - API functions now use a significantly more robust method to clear secrets from the stack before returning. However, secret clearing remains a best-effort security measure and cannot guarantee complete removal. + - Any type `secp256k1_foo` can now be forward-declared using `typedef struct secp256k1_foo secp256k1_foo;` (or also `struct secp256k1_foo;` in C++). + - Organized CMake build artifacts into dedicated directories (`bin/` for executables, `lib/` for libraries) to improve build output structure and Windows shared library compatibility. #### Removed - -- Removed the `secp256k1_scratch_space` struct and its associated functions `secp256k1_scratch_space_create` and `secp256k1_scratch_space_destroy` because the scratch space was unused in the API. + - Removed the `secp256k1_scratch_space` struct and its associated functions `secp256k1_scratch_space_create` and `secp256k1_scratch_space_destroy` because the scratch space was unused in the API. #### ABI Compatibility - The symbols `secp256k1_scratch_space_create` and `secp256k1_scratch_space_destroy` were removed. Otherwise, the library maintains backward compatibility with versions 0.3.x through 0.5.x. ## [0.5.1] - 2024-08-01 #### Added - -- Added usage example for an ElligatorSwift key exchange. + - Added usage example for an ElligatorSwift key exchange. #### Changed - -- The default size of the precomputed table for signing was changed from 22 KiB to 86 KiB. The size can be changed with the configure option `--ecmult-gen-kb` (`SECP256K1_ECMULT_GEN_KB` for CMake). -- "auto" is no longer an accepted value for the `--with-ecmult-window` and `--with-ecmult-gen-kb` configure options (this also applies to `SECP256K1_ECMULT_WINDOW_SIZE` and `SECP256K1_ECMULT_GEN_KB` in CMake). To achieve the same configuration as previously provided by the "auto" value, omit setting the configure option explicitly. + - The default size of the precomputed table for signing was changed from 22 KiB to 86 KiB. The size can be changed with the configure option `--ecmult-gen-kb` (`SECP256K1_ECMULT_GEN_KB` for CMake). + - "auto" is no longer an accepted value for the `--with-ecmult-window` and `--with-ecmult-gen-kb` configure options (this also applies to `SECP256K1_ECMULT_WINDOW_SIZE` and `SECP256K1_ECMULT_GEN_KB` in CMake). To achieve the same configuration as previously provided by the "auto" value, omit setting the configure option explicitly. #### Fixed - -- Fixed compilation when the extrakeys module is disabled. + - Fixed compilation when the extrakeys module is disabled. #### ABI Compatibility - The ABI is backward compatible with versions 0.5.0, 0.4.x and 0.3.x. ## [0.5.0] - 2024-05-06 #### Added - -- New function `secp256k1_ec_pubkey_sort` that sorts public keys using lexicographic (of compressed serialization) order. + - New function `secp256k1_ec_pubkey_sort` that sorts public keys using lexicographic (of compressed serialization) order. #### Changed - -- The implementation of the point multiplication algorithm used for signing and public key generation was changed, resulting in improved performance for those operations. - - The related configure option `--ecmult-gen-precision` was replaced with `--ecmult-gen-kb` (`SECP256K1_ECMULT_GEN_KB` for CMake). - - This changes the supported precomputed table sizes for these operations. The new supported sizes are 2 KiB, 22 KiB, or 86 KiB (while the old supported sizes were 32 KiB, 64 KiB, or 512 KiB). + - The implementation of the point multiplication algorithm used for signing and public key generation was changed, resulting in improved performance for those operations. + - The related configure option `--ecmult-gen-precision` was replaced with `--ecmult-gen-kb` (`SECP256K1_ECMULT_GEN_KB` for CMake). + - This changes the supported precomputed table sizes for these operations. The new supported sizes are 2 KiB, 22 KiB, or 86 KiB (while the old supported sizes were 32 KiB, 64 KiB, or 512 KiB). #### ABI Compatibility - The ABI is backward compatible with versions 0.4.x and 0.3.x. ## [0.4.1] - 2023-12-21 #### Changed - -- The point multiplication algorithm used for ECDH operations (module `ecdh`) was replaced with a slightly faster one. -- Optional handwritten x86_64 assembly for field operations was removed because modern C compilers are able to output more efficient assembly. This change results in a significant speedup of some library functions when handwritten x86_64 assembly is enabled (`--with-asm=x86_64` in GNU Autotools, `-DSECP256K1_ASM=x86_64` in CMake), which is the default on x86_64. Benchmarks with GCC 10.5.0 show a 10% speedup for `secp256k1_ecdsa_verify` and `secp256k1_schnorrsig_verify`. + - The point multiplication algorithm used for ECDH operations (module `ecdh`) was replaced with a slightly faster one. + - Optional handwritten x86_64 assembly for field operations was removed because modern C compilers are able to output more efficient assembly. This change results in a significant speedup of some library functions when handwritten x86_64 assembly is enabled (`--with-asm=x86_64` in GNU Autotools, `-DSECP256K1_ASM=x86_64` in CMake), which is the default on x86_64. Benchmarks with GCC 10.5.0 show a 10% speedup for `secp256k1_ecdsa_verify` and `secp256k1_schnorrsig_verify`. #### ABI Compatibility - The ABI is backward compatible with versions 0.4.0 and 0.3.x. ## [0.4.0] - 2023-09-04 #### Added - -- New module `ellswift` implements ElligatorSwift encoding for public keys and x-only Diffie-Hellman key exchange for them. - ElligatorSwift permits representing secp256k1 public keys as 64-byte arrays which cannot be distinguished from uniformly random. See: - - Header file `include/secp256k1_ellswift.h` which defines the new API. - - Document `doc/ellswift.md` which explains the mathematical background of the scheme. - - The [paper](https://eprint.iacr.org/2022/759) on which the scheme is based. -- We now test the library with unreleased development snapshots of GCC and Clang. This gives us an early chance to catch miscompilations and constant-time issues introduced by the compiler (such as those that led to the previous two releases). + - New module `ellswift` implements ElligatorSwift encoding for public keys and x-only Diffie-Hellman key exchange for them. + ElligatorSwift permits representing secp256k1 public keys as 64-byte arrays which cannot be distinguished from uniformly random. See: + - Header file `include/secp256k1_ellswift.h` which defines the new API. + - Document `doc/ellswift.md` which explains the mathematical background of the scheme. + - The [paper](https://eprint.iacr.org/2022/759) on which the scheme is based. + - We now test the library with unreleased development snapshots of GCC and Clang. This gives us an early chance to catch miscompilations and constant-time issues introduced by the compiler (such as those that led to the previous two releases). #### Fixed - -- Fixed symbol visibility in Windows DLL builds, where three internal library symbols were wrongly exported. + - Fixed symbol visibility in Windows DLL builds, where three internal library symbols were wrongly exported. #### Changed - -- When consuming libsecp256k1 as a static library on Windows, the user must now define the `SECP256K1_STATIC` macro before including `secp256k1.h`. + - When consuming libsecp256k1 as a static library on Windows, the user must now define the `SECP256K1_STATIC` macro before including `secp256k1.h`. #### ABI Compatibility - This release is backward compatible with the ABI of 0.3.0, 0.3.1, and 0.3.2. Symbol visibility is now believed to be handled properly on supported platforms and is now considered to be part of the ABI. Please report any improperly exported symbols as a bug. ## [0.3.2] - 2023-05-13 - We strongly recommend updating to 0.3.2 if you use or plan to use GCC >=13 to compile libsecp256k1. When in doubt, check the GCC version using `gcc -v`. #### Security - -- Module `ecdh`: Fix "constant-timeness" issue with GCC 13.1 (and potentially future versions of GCC) that could leave applications using libsecp256k1's ECDH module vulnerable to a timing side-channel attack. The fix avoids secret-dependent control flow during ECDH computations when libsecp256k1 is compiled with GCC 13.1. + - Module `ecdh`: Fix "constant-timeness" issue with GCC 13.1 (and potentially future versions of GCC) that could leave applications using libsecp256k1's ECDH module vulnerable to a timing side-channel attack. The fix avoids secret-dependent control flow during ECDH computations when libsecp256k1 is compiled with GCC 13.1. #### Fixed - -- Fixed an old bug that permitted compilers to potentially output bad assembly code on x86_64. In theory, it could lead to a crash or a read of unrelated memory, but this has never been observed on any compilers so far. + - Fixed an old bug that permitted compilers to potentially output bad assembly code on x86_64. In theory, it could lead to a crash or a read of unrelated memory, but this has never been observed on any compilers so far. #### Changed - -- Various improvements and changes to CMake builds. CMake builds remain experimental. - - Made API versioning consistent with GNU Autotools builds. - - Switched to `BUILD_SHARED_LIBS` variable for controlling whether to build a static or a shared library. - - Added `SECP256K1_INSTALL` variable for the controlling whether to install the build artefacts. -- Renamed asm build option `arm` to `arm32`. Use `--with-asm=arm32` instead of `--with-asm=arm` (GNU Autotools), and `-DSECP256K1_ASM=arm32` instead of `-DSECP256K1_ASM=arm` (CMake). + - Various improvements and changes to CMake builds. CMake builds remain experimental. + - Made API versioning consistent with GNU Autotools builds. + - Switched to `BUILD_SHARED_LIBS` variable for controlling whether to build a static or a shared library. + - Added `SECP256K1_INSTALL` variable for the controlling whether to install the build artefacts. + - Renamed asm build option `arm` to `arm32`. Use `--with-asm=arm32` instead of `--with-asm=arm` (GNU Autotools), and `-DSECP256K1_ASM=arm32` instead of `-DSECP256K1_ASM=arm` (CMake). #### ABI Compatibility - The ABI is compatible with versions 0.3.0 and 0.3.1. ## [0.3.1] - 2023-04-10 - We strongly recommend updating to 0.3.1 if you use or plan to use Clang >=14 to compile libsecp256k1, e.g., Xcode >=14 on macOS has Clang >=14. When in doubt, check the Clang version using `clang -v`. #### Security - -- Fix "constant-timeness" issue with Clang >=14 that could leave applications using libsecp256k1 vulnerable to a timing side-channel attack. The fix avoids secret-dependent control flow and secret-dependent memory accesses in conditional moves of memory objects when libsecp256k1 is compiled with Clang >=14. + - Fix "constant-timeness" issue with Clang >=14 that could leave applications using libsecp256k1 vulnerable to a timing side-channel attack. The fix avoids secret-dependent control flow and secret-dependent memory accesses in conditional moves of memory objects when libsecp256k1 is compiled with Clang >=14. #### Added - -- Added tests against [Project Wycheproof's](https://github.com/google/wycheproof/) set of ECDSA test vectors (Bitcoin "low-S" variant), a fixed set of test cases designed to trigger various edge cases. + - Added tests against [Project Wycheproof's](https://github.com/google/wycheproof/) set of ECDSA test vectors (Bitcoin "low-S" variant), a fixed set of test cases designed to trigger various edge cases. #### Changed - -- Increased minimum required CMake version to 3.13. CMake builds remain experimental. + - Increased minimum required CMake version to 3.13. CMake builds remain experimental. #### ABI Compatibility - The ABI is compatible with version 0.3.0. ## [0.3.0] - 2023-03-08 #### Added - -- Added experimental support for CMake builds. Traditional GNU Autotools builds (`./configure` and `make`) remain fully supported. -- Usage examples: Added a recommended method for securely clearing sensitive data, e.g., secret keys, from memory. -- Tests: Added a new test binary `noverify_tests`. This binary runs the tests without some additional checks present in the ordinary `tests` binary and is thereby closer to production binaries. The `noverify_tests` binary is automatically run as part of the `make check` target. + - Added experimental support for CMake builds. Traditional GNU Autotools builds (`./configure` and `make`) remain fully supported. + - Usage examples: Added a recommended method for securely clearing sensitive data, e.g., secret keys, from memory. + - Tests: Added a new test binary `noverify_tests`. This binary runs the tests without some additional checks present in the ordinary `tests` binary and is thereby closer to production binaries. The `noverify_tests` binary is automatically run as part of the `make check` target. #### Fixed - -- Fixed declarations of API variables for MSVC (`__declspec(dllimport)`). This fixes MSVC builds of programs which link against a libsecp256k1 DLL dynamically and use API variables (and not only API functions). Unfortunately, the MSVC linker now will emit warning `LNK4217` when trying to link against libsecp256k1 statically. Pass `/ignore:4217` to the linker to suppress this warning. + - Fixed declarations of API variables for MSVC (`__declspec(dllimport)`). This fixes MSVC builds of programs which link against a libsecp256k1 DLL dynamically and use API variables (and not only API functions). Unfortunately, the MSVC linker now will emit warning `LNK4217` when trying to link against libsecp256k1 statically. Pass `/ignore:4217` to the linker to suppress this warning. #### Changed - -- Forbade cloning or destroying `secp256k1_context_static`. Create a new context instead of cloning the static context. (If this change breaks your code, your code is probably wrong.) -- Forbade randomizing (copies of) `secp256k1_context_static`. Randomizing a copy of `secp256k1_context_static` did not have any effect and did not provide defense-in-depth protection against side-channel attacks. Create a new context if you want to benefit from randomization. + - Forbade cloning or destroying `secp256k1_context_static`. Create a new context instead of cloning the static context. (If this change breaks your code, your code is probably wrong.) + - Forbade randomizing (copies of) `secp256k1_context_static`. Randomizing a copy of `secp256k1_context_static` did not have any effect and did not provide defense-in-depth protection against side-channel attacks. Create a new context if you want to benefit from randomization. #### Removed - -- Removed the configuration header `src/libsecp256k1-config.h`. We recommend passing flags to `./configure` or `cmake` to set configuration options (see `./configure --help` or `cmake -LH`). If you cannot or do not want to use one of the supported build systems, pass configuration flags such as `-DSECP256K1_ENABLE_MODULE_SCHNORRSIG` manually to the compiler (see the file `configure.ac` for supported flags). + - Removed the configuration header `src/libsecp256k1-config.h`. We recommend passing flags to `./configure` or `cmake` to set configuration options (see `./configure --help` or `cmake -LH`). If you cannot or do not want to use one of the supported build systems, pass configuration flags such as `-DSECP256K1_ENABLE_MODULE_SCHNORRSIG` manually to the compiler (see the file `configure.ac` for supported flags). #### ABI Compatibility - -Due to changes in the API regarding `secp256k1_context_static` described above, the ABI is _not_ compatible with previous versions. +Due to changes in the API regarding `secp256k1_context_static` described above, the ABI is *not* compatible with previous versions. ## [0.2.0] - 2022-12-12 #### Added - -- Added usage examples for common use cases in a new `examples/` directory. -- Added `secp256k1_selftest`, to be used in conjunction with `secp256k1_context_static`. -- Added support for 128-bit wide multiplication on MSVC for x86_64 and arm64, giving roughly a 20% speedup on those platforms. + - Added usage examples for common use cases in a new `examples/` directory. + - Added `secp256k1_selftest`, to be used in conjunction with `secp256k1_context_static`. + - Added support for 128-bit wide multiplication on MSVC for x86_64 and arm64, giving roughly a 20% speedup on those platforms. #### Changed - -- Enabled modules `schnorrsig`, `extrakeys` and `ecdh` by default in `./configure`. -- The `secp256k1_nonce_function_rfc6979` nonce function, used by default by `secp256k1_ecdsa_sign`, now reduces the message hash modulo the group order to match the specification. This only affects improper use of ECDSA signing API. + - Enabled modules `schnorrsig`, `extrakeys` and `ecdh` by default in `./configure`. + - The `secp256k1_nonce_function_rfc6979` nonce function, used by default by `secp256k1_ecdsa_sign`, now reduces the message hash modulo the group order to match the specification. This only affects improper use of ECDSA signing API. #### Deprecated - -- Deprecated context flags `SECP256K1_CONTEXT_VERIFY` and `SECP256K1_CONTEXT_SIGN`. Use `SECP256K1_CONTEXT_NONE` instead. -- Renamed `secp256k1_context_no_precomp` to `secp256k1_context_static`. -- Module `schnorrsig`: renamed `secp256k1_schnorrsig_sign` to `secp256k1_schnorrsig_sign32`. + - Deprecated context flags `SECP256K1_CONTEXT_VERIFY` and `SECP256K1_CONTEXT_SIGN`. Use `SECP256K1_CONTEXT_NONE` instead. + - Renamed `secp256k1_context_no_precomp` to `secp256k1_context_static`. + - Module `schnorrsig`: renamed `secp256k1_schnorrsig_sign` to `secp256k1_schnorrsig_sign32`. #### ABI Compatibility - Since this is the first release, we do not compare application binary interfaces. -However, there are earlier unreleased versions of libsecp256k1 that are _not_ ABI compatible with this version. +However, there are earlier unreleased versions of libsecp256k1 that are *not* ABI compatible with this version. ## [0.1.0] - 2013-03-05 to 2021-12-25 diff --git a/external/secp256k1/CMakePresets.json b/external/secp256k1/CMakePresets.json index 60138c16bf..b35cd80579 100644 --- a/external/secp256k1/CMakePresets.json +++ b/external/secp256k1/CMakePresets.json @@ -1,9 +1,5 @@ { - "cmakeMinimumRequired": { - "major": 3, - "minor": 21, - "patch": 0 - }, + "cmakeMinimumRequired": {"major": 3, "minor": 21, "patch": 0}, "version": 3, "configurePresets": [ { diff --git a/external/secp256k1/CONTRIBUTING.md b/external/secp256k1/CONTRIBUTING.md index 88c22af02b..a366d38b0e 100644 --- a/external/secp256k1/CONTRIBUTING.md +++ b/external/secp256k1/CONTRIBUTING.md @@ -12,15 +12,15 @@ The libsecp256k1 project welcomes contributions in the form of new functionality It is the responsibility of the contributors to convince the maintainers that the proposed functionality is within the project's scope, high-quality and maintainable. Contributors are recommended to provide the following in addition to the new code: -- **Specification:** - A specification can help significantly in reviewing the new code as it provides documentation and context. - It may justify various design decisions, give a motivation and outline security goals. - If the specification contains pseudocode, a reference implementation or test vectors, these can be used to compare with the proposed libsecp256k1 code. -- **Security Arguments:** - In addition to a defining the security goals, it should be argued that the new functionality meets these goals. - Depending on the nature of the new functionality, a wide range of security arguments are acceptable, ranging from being "obviously secure" to rigorous proofs of security. -- **Relevance Arguments:** - The relevance of the new functionality for the Bitcoin ecosystem should be argued by outlining clear use cases. +* **Specification:** + A specification can help significantly in reviewing the new code as it provides documentation and context. + It may justify various design decisions, give a motivation and outline security goals. + If the specification contains pseudocode, a reference implementation or test vectors, these can be used to compare with the proposed libsecp256k1 code. +* **Security Arguments:** + In addition to a defining the security goals, it should be argued that the new functionality meets these goals. + Depending on the nature of the new functionality, a wide range of security arguments are acceptable, ranging from being "obviously secure" to rigorous proofs of security. +* **Relevance Arguments:** + The relevance of the new functionality for the Bitcoin ecosystem should be argued by outlining clear use cases. These are not the only factors taken into account when considering to add new functionality. The proposed new libsecp256k1 code must be of high quality, including API documentation and tests, as well as featuring a misuse-resistant API design. @@ -44,36 +44,36 @@ The Contributor Workflow & Peer Review in libsecp256k1 are similar to Bitcoin Co In addition, libsecp256k1 tries to maintain the following coding conventions: -- No runtime heap allocation (e.g., no `malloc`) unless explicitly requested by the caller (via `secp256k1_context_create` or `secp256k1_scratch_space_create`, for example). Moreover, it should be possible to use the library without any heap allocations. -- The tests should cover all lines and branches of the library (see [Test coverage](#coverage)). -- Operations involving secret data should be tested for being constant time with respect to the secrets (see [src/ctime_tests.c](src/ctime_tests.c)). -- Local variables containing secret data should be cleared explicitly to try to delete secrets from memory. -- Use `secp256k1_memcmp_var` instead of `memcmp` (see [#823](https://github.com/bitcoin-core/secp256k1/issues/823)). -- As a rule of thumb, the default values for configuration options should target standard desktop machines and align with Bitcoin Core's defaults, and the tests should mostly exercise the default configuration (see [#1549](https://github.com/bitcoin-core/secp256k1/issues/1549#issuecomment-2200559257)). +* No runtime heap allocation (e.g., no `malloc`) unless explicitly requested by the caller (via `secp256k1_context_create` or `secp256k1_scratch_space_create`, for example). Moreover, it should be possible to use the library without any heap allocations. +* The tests should cover all lines and branches of the library (see [Test coverage](#coverage)). +* Operations involving secret data should be tested for being constant time with respect to the secrets (see [src/ctime_tests.c](src/ctime_tests.c)). +* Local variables containing secret data should be cleared explicitly to try to delete secrets from memory. +* Use `secp256k1_memcmp_var` instead of `memcmp` (see [#823](https://github.com/bitcoin-core/secp256k1/issues/823)). +* As a rule of thumb, the default values for configuration options should target standard desktop machines and align with Bitcoin Core's defaults, and the tests should mostly exercise the default configuration (see [#1549](https://github.com/bitcoin-core/secp256k1/issues/1549#issuecomment-2200559257)). #### Style conventions -- Commits should be atomic and diffs should be easy to read. For this reason, do not mix any formatting fixes or code moves with actual code changes. Make sure each individual commit is hygienic: that it builds successfully on its own without warnings, errors, regressions, or test failures. -- New code should adhere to the style of existing, in particular surrounding, code. Other than that, we do not enforce strict rules for code formatting. -- The code conforms to C89. Most notably, that means that only `/* ... */` comments are allowed (no `//` line comments). Moreover, any declarations in a `{ ... }` block (e.g., a function) must appear at the beginning of the block before any statements. When you would like to declare a variable in the middle of a block, you can open a new block: - ```C - void secp256k_foo(void) { - unsigned int x; /* declaration */ - int y = 2*x; /* declaration */ - x = 17; /* statement */ - { - int a, b; /* declaration */ - a = x + y; /* statement */ - secp256k_bar(x, &b); /* statement */ - } - } - ``` -- Use `unsigned int` instead of just `unsigned`. -- Use `void *ptr` instead of `void* ptr`. -- Arguments of the publicly-facing API must have a specific order defined in [include/secp256k1.h](include/secp256k1.h). -- User-facing comment lines in headers should be limited to 80 chars if possible. -- All identifiers in file scope should start with `secp256k1_`. -- Avoid trailing whitespace. +* Commits should be atomic and diffs should be easy to read. For this reason, do not mix any formatting fixes or code moves with actual code changes. Make sure each individual commit is hygienic: that it builds successfully on its own without warnings, errors, regressions, or test failures. +* New code should adhere to the style of existing, in particular surrounding, code. Other than that, we do not enforce strict rules for code formatting. +* The code conforms to C89. Most notably, that means that only `/* ... */` comments are allowed (no `//` line comments). Moreover, any declarations in a `{ ... }` block (e.g., a function) must appear at the beginning of the block before any statements. When you would like to declare a variable in the middle of a block, you can open a new block: + ```C + void secp256k_foo(void) { + unsigned int x; /* declaration */ + int y = 2*x; /* declaration */ + x = 17; /* statement */ + { + int a, b; /* declaration */ + a = x + y; /* statement */ + secp256k_bar(x, &b); /* statement */ + } + } + ``` +* Use `unsigned int` instead of just `unsigned`. +* Use `void *ptr` instead of `void* ptr`. +* Arguments of the publicly-facing API must have a specific order defined in [include/secp256k1.h](include/secp256k1.h). +* User-facing comment lines in headers should be limited to 80 chars if possible. +* All identifiers in file scope should start with `secp256k1_`. +* Avoid trailing whitespace. ### Tests @@ -101,7 +101,7 @@ To create a HTML report with coloured and annotated source code: #### Exhaustive tests There are tests of several functions in which a small group replaces secp256k1. -These tests are _exhaustive_ since they provide all elements and scalars of the small group as input arguments (see [src/tests_exhaustive.c](src/tests_exhaustive.c)). +These tests are *exhaustive* since they provide all elements and scalars of the small group as input arguments (see [src/tests_exhaustive.c](src/tests_exhaustive.c)). ### Benchmarks diff --git a/external/secp256k1/README.md b/external/secp256k1/README.md index 4cd64c7fee..222e5fb768 100644 --- a/external/secp256k1/README.md +++ b/external/secp256k1/README.md @@ -1,4 +1,5 @@ -# libsecp256k1 +libsecp256k1 +============ ![Dependencies: None](https://img.shields.io/badge/dependencies-none-success) [![irc.libera.chat #secp256k1](https://img.shields.io/badge/irc.libera.chat-%23secp256k1-success)](https://web.libera.chat/#secp256k1) @@ -8,59 +9,60 @@ High-performance high-assurance C library for digital signatures and other crypt This library is intended to be the highest quality publicly available library for cryptography on the secp256k1 curve. However, the primary focus of its development has been for usage in the Bitcoin system and usage unlike Bitcoin's may be less well tested, verified, or suffer from a less well thought out interface. Correct usage requires some care and consideration that the library is fit for your application's purpose. Features: +* secp256k1 ECDSA signing/verification and key generation. +* Additive and multiplicative tweaking of secret/public keys. +* Serialization/parsing of secret keys, public keys, signatures. +* Constant time, constant memory access signing and public key generation. +* Derandomized ECDSA (via RFC6979 or with a caller provided function.) +* Very efficient implementation. +* Suitable for embedded systems. +* No runtime dependencies. +* Optional module for public key recovery. +* Optional module for ECDH key exchange. +* Optional module for Schnorr signatures according to [BIP-340](https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki). +* Optional module for ElligatorSwift key exchange according to [BIP-324](https://github.com/bitcoin/bips/blob/master/bip-0324.mediawiki). +* Optional module for MuSig2 Schnorr multi-signatures according to [BIP-327](https://github.com/bitcoin/bips/blob/master/bip-0327.mediawiki). -- secp256k1 ECDSA signing/verification and key generation. -- Additive and multiplicative tweaking of secret/public keys. -- Serialization/parsing of secret keys, public keys, signatures. -- Constant time, constant memory access signing and public key generation. -- Derandomized ECDSA (via RFC6979 or with a caller provided function.) -- Very efficient implementation. -- Suitable for embedded systems. -- No runtime dependencies. -- Optional module for public key recovery. -- Optional module for ECDH key exchange. -- Optional module for Schnorr signatures according to [BIP-340](https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki). -- Optional module for ElligatorSwift key exchange according to [BIP-324](https://github.com/bitcoin/bips/blob/master/bip-0324.mediawiki). -- Optional module for MuSig2 Schnorr multi-signatures according to [BIP-327](https://github.com/bitcoin/bips/blob/master/bip-0327.mediawiki). +Implementation details +---------------------- -## Implementation details +* General + * No runtime heap allocation. + * Extensive testing infrastructure. + * Structured to facilitate review and analysis. + * Intended to be portable to any system with a C89 compiler and uint64_t support. + * No use of floating types. + * Expose only higher level interfaces to minimize the API surface and improve application security. ("Be difficult to use insecurely.") +* Field operations + * Optimized implementation of arithmetic modulo the curve's field size (2^256 - 0x1000003D1). + * Using 5 52-bit limbs + * Using 10 26-bit limbs (including hand-optimized assembly for 32-bit ARM, by Wladimir J. van der Laan). + * This is an experimental feature that has not received enough scrutiny to satisfy the standard of quality of this library but is made available for testing and review by the community. +* Scalar operations + * Optimized implementation without data-dependent branches of arithmetic modulo the curve's order. + * Using 4 64-bit limbs (relying on __int128 support in the compiler). + * Using 8 32-bit limbs. +* Modular inverses (both field elements and scalars) based on [safegcd](https://gcd.cr.yp.to/index.html) with some modifications, and a variable-time variant (by Peter Dettman). +* Group operations + * Point addition formula specifically simplified for the curve equation (y^2 = x^3 + 7). + * Use addition between points in Jacobian and affine coordinates where possible. + * Use a unified addition/doubling formula where necessary to avoid data-dependent branches. + * Point/x comparison without a field inversion by comparison in the Jacobian coordinate space. +* Point multiplication for verification (a*P + b*G). + * Use wNAF notation for point multiplicands. + * Use a much larger window for multiples of G, using precomputed multiples. + * Use Shamir's trick to do the multiplication with the public key and the generator simultaneously. + * Use secp256k1's efficiently-computable endomorphism to split the P multiplicand into 2 half-sized ones. +* Point multiplication for signing + * Use a precomputed table of multiples of powers of 16 multiplied with the generator, so general multiplication becomes a series of additions. + * Intended to be completely free of timing sidechannels for secret-key operations (on reasonable hardware/toolchains) + * Access the table with branch-free conditional moves so memory access is uniform. + * No data-dependent branches + * Optional runtime blinding which attempts to frustrate differential power analysis. + * The precomputed tables add and eventually subtract points for which no known scalar (secret key) is known, preventing even an attacker with control over the secret key used to control the data internally. -- General - - No runtime heap allocation. - - Extensive testing infrastructure. - - Structured to facilitate review and analysis. - - Intended to be portable to any system with a C89 compiler and uint64_t support. - - No use of floating types. - - Expose only higher level interfaces to minimize the API surface and improve application security. ("Be difficult to use insecurely.") -- Field operations - - Optimized implementation of arithmetic modulo the curve's field size (2^256 - 0x1000003D1). - - Using 5 52-bit limbs - - Using 10 26-bit limbs (including hand-optimized assembly for 32-bit ARM, by Wladimir J. van der Laan). - - This is an experimental feature that has not received enough scrutiny to satisfy the standard of quality of this library but is made available for testing and review by the community. -- Scalar operations - - Optimized implementation without data-dependent branches of arithmetic modulo the curve's order. - - Using 4 64-bit limbs (relying on \_\_int128 support in the compiler). - - Using 8 32-bit limbs. -- Modular inverses (both field elements and scalars) based on [safegcd](https://gcd.cr.yp.to/index.html) with some modifications, and a variable-time variant (by Peter Dettman). -- Group operations - - Point addition formula specifically simplified for the curve equation (y^2 = x^3 + 7). - - Use addition between points in Jacobian and affine coordinates where possible. - - Use a unified addition/doubling formula where necessary to avoid data-dependent branches. - - Point/x comparison without a field inversion by comparison in the Jacobian coordinate space. -- Point multiplication for verification (a*P + b*G). - - Use wNAF notation for point multiplicands. - - Use a much larger window for multiples of G, using precomputed multiples. - - Use Shamir's trick to do the multiplication with the public key and the generator simultaneously. - - Use secp256k1's efficiently-computable endomorphism to split the P multiplicand into 2 half-sized ones. -- Point multiplication for signing - - Use a precomputed table of multiples of powers of 16 multiplied with the generator, so general multiplication becomes a series of additions. - - Intended to be completely free of timing sidechannels for secret-key operations (on reasonable hardware/toolchains) - - Access the table with branch-free conditional moves so memory access is uniform. - - No data-dependent branches - - Optional runtime blinding which attempts to frustrate differential power analysis. - - The precomputed tables add and eventually subtract points for which no known scalar (secret key) is known, preventing even an attacker with control over the secret key used to control the data internally. - -## Building with Autotools +Building with Autotools +----------------------- $ ./autogen.sh $ ./configure @@ -70,7 +72,8 @@ Features: To compile optional modules (such as Schnorr signatures), you need to run `./configure` with additional flags (such as `--enable-module-schnorrsig`). Run `./configure --help` to see the full list of available flags. -## Building with CMake (experimental) +Building with CMake (experimental) +---------------------------------- To maintain a pristine source tree, CMake encourages to perform an out-of-source build by using a separate dedicated build tree. @@ -106,19 +109,18 @@ In "Developer Command Prompt for VS 2022": >cmake -G "Visual Studio 17 2022" -A x64 -S . -B build >cmake --build build --config RelWithDebInfo -## Usage examples - +Usage examples +----------- Usage examples can be found in the [examples](examples) directory. To compile them you need to configure with `--enable-examples`. - -- [ECDSA example](examples/ecdsa.c) -- [Schnorr signatures example](examples/schnorr.c) -- [Deriving a shared secret (ECDH) example](examples/ecdh.c) -- [ElligatorSwift key exchange example](examples/ellswift.c) + * [ECDSA example](examples/ecdsa.c) + * [Schnorr signatures example](examples/schnorr.c) + * [Deriving a shared secret (ECDH) example](examples/ecdh.c) + * [ElligatorSwift key exchange example](examples/ellswift.c) To compile the Schnorr signature and ECDH examples, you also need to configure with `--enable-module-schnorrsig` and `--enable-module-ecdh`. -## Benchmark - +Benchmark +------------ If configured with `--enable-benchmark` (which is the default), binaries for benchmarking the libsecp256k1 functions will be present in the root directory after the build. To print the benchmark result to the command line: @@ -129,10 +131,12 @@ To create a CSV file for the benchmark result : $ ./bench_name | sed '2d;s/ \{1,\}//g' > bench_name.csv -## Reporting a vulnerability +Reporting a vulnerability +------------ See [SECURITY.md](SECURITY.md) -## Contributing to libsecp256k1 +Contributing to libsecp256k1 +------------ See [CONTRIBUTING.md](CONTRIBUTING.md) diff --git a/external/secp256k1/SECURITY.md b/external/secp256k1/SECURITY.md index cb438707ce..b515cc1c8e 100644 --- a/external/secp256k1/SECURITY.md +++ b/external/secp256k1/SECURITY.md @@ -6,10 +6,10 @@ To report security issues send an email to secp256k1-security@bitcoincore.org (n The following keys may be used to communicate sensitive information to developers: -| Name | Fingerprint | -| ------------- | ------------------------------------------------- | -| Pieter Wuille | 133E AC17 9436 F14A 5CF1 B794 860F EB80 4E66 9320 | -| Jonas Nick | 36C7 1A37 C9D9 88BD E825 08D9 B1A7 0E4F 8DCD 0366 | -| Tim Ruffing | 09E0 3F87 1092 E40E 106E 902B 33BC 86AB 80FF 5516 | +| Name | Fingerprint | +|------|-------------| +| Pieter Wuille | 133E AC17 9436 F14A 5CF1 B794 860F EB80 4E66 9320 | +| Jonas Nick | 36C7 1A37 C9D9 88BD E825 08D9 B1A7 0E4F 8DCD 0366 | +| Tim Ruffing | 09E0 3F87 1092 E40E 106E 902B 33BC 86AB 80FF 5516 | You can import a key by running the following command with that individual’s fingerprint: `gpg --keyserver hkps://keys.openpgp.org --recv-keys ""` Ensure that you put quotes around fingerprints containing spaces. diff --git a/external/secp256k1/doc/ellswift.md b/external/secp256k1/doc/ellswift.md index ffbe9d02ac..9d60e6be0b 100644 --- a/external/secp256k1/doc/ellswift.md +++ b/external/secp256k1/doc/ellswift.md @@ -5,17 +5,17 @@ construction in the ["SwiftEC: Shallue–van de Woestijne Indifferentiable Function To Elliptic Curves"](https://eprint.iacr.org/2022/759) paper by Jorge Chávez-Saab, Francisco Rodríguez-Henríquez, and Mehdi Tibouchi. -- [1. Introduction](#1-introduction) -- [2. The decoding function](#2-the-decoding-function) - - [2.1 Decoding for `secp256k1`](#21-decoding-for-secp256k1) -- [3. The encoding function](#3-the-encoding-function) - - [3.1 Switching to _v, w_ coordinates](#31-switching-to-v-w-coordinates) - - [3.2 Avoiding computing all inverses](#32-avoiding-computing-all-inverses) - - [3.3 Finding the inverse](#33-finding-the-inverse) - - [3.4 Dealing with special cases](#34-dealing-with-special-cases) - - [3.5 Encoding for `secp256k1`](#35-encoding-for-secp256k1) -- [4. Encoding and decoding full _(x, y)_ coordinates](#4-encoding-and-decoding-full-x-y-coordinates) - - [4.1 Full _(x, y)_ coordinates for `secp256k1`](#41-full-x-y-coordinates-for-secp256k1) +* [1. Introduction](#1-introduction) +* [2. The decoding function](#2-the-decoding-function) + + [2.1 Decoding for `secp256k1`](#21-decoding-for-secp256k1) +* [3. The encoding function](#3-the-encoding-function) + + [3.1 Switching to *v, w* coordinates](#31-switching-to-v-w-coordinates) + + [3.2 Avoiding computing all inverses](#32-avoiding-computing-all-inverses) + + [3.3 Finding the inverse](#33-finding-the-inverse) + + [3.4 Dealing with special cases](#34-dealing-with-special-cases) + + [3.5 Encoding for `secp256k1`](#35-encoding-for-secp256k1) +* [4. Encoding and decoding full *(x, y)* coordinates](#4-encoding-and-decoding-full-x-y-coordinates) + + [4.1 Full *(x, y)* coordinates for `secp256k1`](#41-full-x-y-coordinates-for-secp256k1) ## 1. Introduction @@ -34,14 +34,13 @@ are taken modulo $p$), and then evaluating $F_u(t)$, which for every $u$ and $t$ x-coordinate on the curve. The functions $F_u$ will be defined in [Section 2](#2-the-decoding-function). **Encoding** a given $x$ coordinate is conceptually done as follows: +* Loop: + * Pick a uniformly random field element $u.$ + * Compute the set $L = F_u^{-1}(x)$ of $t$ values for which $F_u(t) = x$, which may have up to *8* elements. + * With probability $1 - \dfrac{\\#L}{8}$, restart the loop. + * Select a uniformly random $t \in L$ and return $(u, t).$ -- Loop: - - Pick a uniformly random field element $u.$ - - Compute the set $L = F_u^{-1}(x)$ of $t$ values for which $F_u(t) = x$, which may have up to _8_ elements. - - With probability $1 - \dfrac{\\#L}{8}$, restart the loop. - - Select a uniformly random $t \in L$ and return $(u, t).$ - -This is the _ElligatorSwift_ algorithm, here given for just x-coordinates. An extension to full +This is the *ElligatorSwift* algorithm, here given for just x-coordinates. An extension to full $(x, y)$ points will be given in [Section 4](#4-encoding-and-decoding-full-x-y-coordinates). The algorithm finds a uniformly random $(u, t)$ among (almost all) those for which $F_u(t) = x.$ Section 3.2 in the paper proves that the number of such encodings for @@ -51,40 +50,37 @@ almost all x-coordinates on the curve (all but at most 39) is close to two times ## 2. The decoding function First some definitions: - -- $\mathbb{F}$ is the finite field of size $q$, of characteristic 5 or more, and $q \equiv 1 \mod 3.$ - - For `secp256k1`, $q = 2^{256} - 2^{32} - 977$, which satisfies that requirement. -- Let $E$ be the elliptic curve of points $(x, y) \in \mathbb{F}^2$ for which $y^2 = x^3 + ax + b$, with $a$ and $b$ +* $\mathbb{F}$ is the finite field of size $q$, of characteristic 5 or more, and $q \equiv 1 \mod 3.$ + * For `secp256k1`, $q = 2^{256} - 2^{32} - 977$, which satisfies that requirement. +* Let $E$ be the elliptic curve of points $(x, y) \in \mathbb{F}^2$ for which $y^2 = x^3 + ax + b$, with $a$ and $b$ public constants, for which $\Delta_E = -16(4a^3 + 27b^2)$ is a square, and at least one of $(-b \pm \sqrt{-3 \Delta_E} / 36)/2$ is a square. - This implies that the order of $E$ is either odd, or a multiple of _4_. + This implies that the order of $E$ is either odd, or a multiple of *4*. If $a=0$, this condition is always fulfilled. - - For `secp256k1`, $a=0$ and $b=7.$ -- Let the function $g(x) = x^3 + ax + b$, so the $E$ curve equation is also $y^2 = g(x).$ -- Let the function $h(x) = 3x^3 + 4a.$ -- Define $V$ as the set of solutions $(x_1, x_2, x_3, z)$ to $z^2 = g(x_1)g(x_2)g(x_3).$ -- Define $S_u$ as the set of solutions $(X, Y)$ to $X^2 + h(u)Y^2 = -g(u)$ and $Y \neq 0.$ -- $P_u$ is a function from $\mathbb{F}$ to $S_u$ that will be defined below. -- $\psi_u$ is a function from $S_u$ to $V$ that will be defined below. + * For `secp256k1`, $a=0$ and $b=7.$ +* Let the function $g(x) = x^3 + ax + b$, so the $E$ curve equation is also $y^2 = g(x).$ +* Let the function $h(x) = 3x^3 + 4a.$ +* Define $V$ as the set of solutions $(x_1, x_2, x_3, z)$ to $z^2 = g(x_1)g(x_2)g(x_3).$ +* Define $S_u$ as the set of solutions $(X, Y)$ to $X^2 + h(u)Y^2 = -g(u)$ and $Y \neq 0.$ +* $P_u$ is a function from $\mathbb{F}$ to $S_u$ that will be defined below. +* $\psi_u$ is a function from $S_u$ to $V$ that will be defined below. **Note**: In the paper: - -- $F_u$ corresponds to $F_{0,u}$ there. -- $P_u(t)$ is called $P$ there. -- All $S_u$ sets together correspond to $S$ there. -- All $\psi_u$ functions together (operating on elements of $S$) correspond to $\psi$ there. +* $F_u$ corresponds to $F_{0,u}$ there. +* $P_u(t)$ is called $P$ there. +* All $S_u$ sets together correspond to $S$ there. +* All $\psi_u$ functions together (operating on elements of $S$) correspond to $\psi$ there. Note that for $V$, the left hand side of the equation $z^2$ is square, and thus the right hand must also be square. As multiplying non-squares results in a square in $\mathbb{F}$, out of the three right-hand side factors an even number must be non-squares. -This implies that exactly _1_ or exactly _3_ out of +This implies that exactly *1* or exactly *3* out of $\\{g(x_1), g(x_2), g(x_3)\\}$ must be square, and thus that for any $(x_1,x_2,x_3,z) \in V$, at least one of $\\{x_1, x_2, x_3\\}$ must be a valid x-coordinate on $E.$ There is one exception to this, namely when $z=0$, but even then one of the three values is a valid x-coordinate. **Define** the decoding function $F_u(t)$ as: - -- Let $(x_1, x_2, x_3, z) = \psi_u(P_u(t)).$ -- Return the first element $x$ of $(x_3, x_2, x_1)$ which is a valid x-coordinate on $E$ (i.e., $g(x)$ is square). +* Let $(x_1, x_2, x_3, z) = \psi_u(P_u(t)).$ +* Return the first element $x$ of $(x_3, x_2, x_1)$ which is a valid x-coordinate on $E$ (i.e., $g(x)$ is square). $P_u(t) = (X(u, t), Y(u, t))$, where: @@ -102,13 +98,12 @@ Y(u, t) & = & \left\\{\begin{array}{ll} $$ $P_u(t)$ is defined: - -- For $a=0$, unless: - - $u = 0$ or $t = 0$ (division by zero) - - $g(u) = -t^2$ (would give $Y=0$). -- For $a \neq 0$, unless: - - $X_0(u) = 0$ or $h(u)t^2 = -1$ (division by zero) - - $Y_0(u) (1 - h(u)t^2) = 2X_0(u)t$ (would give $Y=0$). +* For $a=0$, unless: + * $u = 0$ or $t = 0$ (division by zero) + * $g(u) = -t^2$ (would give $Y=0$). +* For $a \neq 0$, unless: + * $X_0(u) = 0$ or $h(u)t^2 = -1$ (division by zero) + * $Y_0(u) (1 - h(u)t^2) = 2X_0(u)t$ (would give $Y=0$). The functions $X_0(u)$ and $Y_0(u)$ are defined in Appendix A of the paper, and depend on various properties of $E.$ @@ -128,22 +123,20 @@ $$ Put together and specialized for $a=0$ curves, decoding $(u, t)$ to an x-coordinate is: **Define** $F_u(t)$ as: - -- Let $X = \dfrac{u^3 + b - t^2}{2t}.$ -- Let $Y = \dfrac{X + t}{u\sqrt{-3}}.$ -- Return the first $x$ in $(u + 4Y^2, \dfrac{-X}{2Y} - \dfrac{u}{2}, \dfrac{X}{2Y} - \dfrac{u}{2})$ for which $g(x)$ is square. +* Let $X = \dfrac{u^3 + b - t^2}{2t}.$ +* Let $Y = \dfrac{X + t}{u\sqrt{-3}}.$ +* Return the first $x$ in $(u + 4Y^2, \dfrac{-X}{2Y} - \dfrac{u}{2}, \dfrac{X}{2Y} - \dfrac{u}{2})$ for which $g(x)$ is square. To make sure that every input decodes to a valid x-coordinate, we remap the inputs in case $P_u$ is not defined (when $u=0$, $t=0$, or $g(u) = -t^2$): **Define** $F_u(t)$ as: - -- Let $u'=u$ if $u \neq 0$; $1$ otherwise (guaranteeing $u' \neq 0$). -- Let $t'=t$ if $t \neq 0$; $1$ otherwise (guaranteeing $t' \neq 0$). -- Let $t''=t'$ if $g(u') \neq -t'^2$; $2t'$ otherwise (guaranteeing $t'' \neq 0$ and $g(u') \neq -t''^2$). -- Let $X = \dfrac{u'^3 + b - t''^2}{2t''}.$ -- Let $Y = \dfrac{X + t''}{u'\sqrt{-3}}.$ -- Return the first $x$ in $(u' + 4Y^2, \dfrac{-X}{2Y} - \dfrac{u'}{2}, \dfrac{X}{2Y} - \dfrac{u'}{2})$ for which $x^3 + b$ is square. +* Let $u'=u$ if $u \neq 0$; $1$ otherwise (guaranteeing $u' \neq 0$). +* Let $t'=t$ if $t \neq 0$; $1$ otherwise (guaranteeing $t' \neq 0$). +* Let $t''=t'$ if $g(u') \neq -t'^2$; $2t'$ otherwise (guaranteeing $t'' \neq 0$ and $g(u') \neq -t''^2$). +* Let $X = \dfrac{u'^3 + b - t''^2}{2t''}.$ +* Let $Y = \dfrac{X + t''}{u'\sqrt{-3}}.$ +* Return the first $x$ in $(u' + 4Y^2, \dfrac{-X}{2Y} - \dfrac{u'}{2}, \dfrac{X}{2Y} - \dfrac{u'}{2})$ for which $x^3 + b$ is square. The choices here are not strictly necessary. Just returning a fixed constant in any of the undefined cases would suffice, but the approach here is simple enough and gives fairly uniform output even in these cases. @@ -157,11 +150,10 @@ in `secp256k1_ellswift_xswiftec_var` (which outputs the actual x-coordinate). ## 3. The encoding function To implement $F_u^{-1}(x)$, the function to find the set of inverses $t$ for which $F_u(t) = x$, we have to reverse the process: - -- Find all the $(X, Y) \in S_u$ that could have given rise to $x$, through the $x_1$, $x_2$, or $x_3$ formulas in $\psi_u.$ -- Map those $(X, Y)$ solutions to $t$ values using $P_u^{-1}(X, Y).$ -- For each of the found $t$ values, verify that $F_u(t) = x.$ -- Return the remaining $t$ values. +* Find all the $(X, Y) \in S_u$ that could have given rise to $x$, through the $x_1$, $x_2$, or $x_3$ formulas in $\psi_u.$ +* Map those $(X, Y)$ solutions to $t$ values using $P_u^{-1}(X, Y).$ +* For each of the found $t$ values, verify that $F_u(t) = x.$ +* Return the remaining $t$ values. The function $P_u^{-1}$, which finds $t$ given $(X, Y) \in S_u$, is significantly simpler than $P_u:$ @@ -193,14 +185,13 @@ precedence over both. Because of this, the $g(-u-x)$ being square test for $x_1$ values round-trip back to the input $x$ correctly. This is the reason for choosing the $(x_3, x_2, x_1)$ precedence order in the decoder; any order which does not place $x_3$ first requires more complicated round-trip checks in the encoder. -### 3.1 Switching to _v, w_ coordinates +### 3.1 Switching to *v, w* coordinates Before working out the formulas for all this, we switch to different variables for $S_u.$ Let $v = (X/Y - u)/2$, and $w = 2Y.$ Or in the other direction, $X = w(u/2 + v)$ and $Y = w/2:$ - -- $S_u'$ becomes the set of $(v, w)$ for which $w^2 (u^2 + uv + v^2 + a) = -g(u)$ and $w \neq 0.$ -- For $a=0$ curves, $P_u^{-1}$ can be stated for $(v,w)$ as $P_u^{'-1}(v, w) = w\left(\frac{\sqrt{-3}-1}{2}u - v\right).$ -- $\psi_u$ can be stated for $(v, w)$ as $\psi_u'(v, w) = (x_1, x_2, x_3, z)$, where +* $S_u'$ becomes the set of $(v, w)$ for which $w^2 (u^2 + uv + v^2 + a) = -g(u)$ and $w \neq 0.$ +* For $a=0$ curves, $P_u^{-1}$ can be stated for $(v,w)$ as $P_u^{'-1}(v, w) = w\left(\frac{\sqrt{-3}-1}{2}u - v\right).$ +* $\psi_u$ can be stated for $(v, w)$ as $\psi_u'(v, w) = (x_1, x_2, x_3, z)$, where $$ \begin{array}{lcl} @@ -213,37 +204,34 @@ $$ We can now write the expressions for finding $(v, w)$ given $x$ explicitly, by solving each of the $\\{x_1, x_2, x_3\\}$ expressions for $v$ or $w$, and using the $S_u'$ equation to find the other variable: - -- Assuming $x = x_1$, we find $v = x$ and $w = \pm\sqrt{-g(u)/(u^2 + uv + v^2 + a)}$ (two solutions). -- Assuming $x = x_2$, we find $v = -u-x$ and $w = \pm\sqrt{-g(u)/(u^2 + uv + v^2 + a)}$ (two solutions). -- Assuming $x = x_3$, we find $w = \pm\sqrt{x-u}$ and $v = -u/2 \pm \sqrt{-w^2(4g(u) + w^2h(u))}/(2w^2)$ (four solutions). +* Assuming $x = x_1$, we find $v = x$ and $w = \pm\sqrt{-g(u)/(u^2 + uv + v^2 + a)}$ (two solutions). +* Assuming $x = x_2$, we find $v = -u-x$ and $w = \pm\sqrt{-g(u)/(u^2 + uv + v^2 + a)}$ (two solutions). +* Assuming $x = x_3$, we find $w = \pm\sqrt{x-u}$ and $v = -u/2 \pm \sqrt{-w^2(4g(u) + w^2h(u))}/(2w^2)$ (four solutions). ### 3.2 Avoiding computing all inverses -The _ElligatorSwift_ algorithm as stated in Section 1 requires the computation of $L = F_u^{-1}(x)$ (the +The *ElligatorSwift* algorithm as stated in Section 1 requires the computation of $L = F_u^{-1}(x)$ (the set of all $t$ such that $(u, t)$ decode to $x$) in full. This is unnecessary. Observe that the procedure of restarting with probability $(1 - \frac{\\#L}{8})$ and otherwise returning a uniformly random element from $L$ is actually equivalent to always padding $L$ with $\bot$ values up to length 8, picking a uniformly random element from that, restarting whenever $\bot$ is picked: -**Define** _ElligatorSwift(x)_ as: - -- Loop: - - Pick a uniformly random field element $u.$ - - Compute the set $L = F_u^{-1}(x).$ - - Let $T$ be the 8-element vector consisting of the elements of $L$, plus $8 - \\#L$ times $\\{\bot\\}.$ - - Select a uniformly random $t \in T.$ - - If $t \neq \bot$, return $(u, t)$; restart loop otherwise. +**Define** *ElligatorSwift(x)* as: +* Loop: + * Pick a uniformly random field element $u.$ + * Compute the set $L = F_u^{-1}(x).$ + * Let $T$ be the 8-element vector consisting of the elements of $L$, plus $8 - \\#L$ times $\\{\bot\\}.$ + * Select a uniformly random $t \in T.$ + * If $t \neq \bot$, return $(u, t)$; restart loop otherwise. Now notice that the order of elements in $T$ does not matter, as all we do is pick a uniformly random element in it, so we do not need to have all $\bot$ values at the end. As we have 8 distinct formulas for finding $(v, w)$ (taking the variants due to $\pm$ into account), we can associate every index in $T$ with exactly one of those formulas, making sure that: - -- Formulas that yield no solutions (due to division by zero or non-existing square roots) or invalid solutions are made to return $\bot.$ -- For the $x_1$ and $x_2$ cases, if $g(-u-x)$ is a square, $\bot$ is returned instead (the round-trip check). -- In case multiple formulas would return the same non- $\bot$ result, all but one of those must be turned into $\bot$ to avoid biasing those. +* Formulas that yield no solutions (due to division by zero or non-existing square roots) or invalid solutions are made to return $\bot.$ +* For the $x_1$ and $x_2$ cases, if $g(-u-x)$ is a square, $\bot$ is returned instead (the round-trip check). +* In case multiple formulas would return the same non- $\bot$ result, all but one of those must be turned into $\bot$ to avoid biasing those. The last condition above only occurs with negligible probability for cryptographically-sized curves, but is interesting to take into account as it allows exhaustive testing in small groups. See [Section 3.4](#34-dealing-with-special-cases) @@ -252,13 +240,12 @@ for an analysis of all the negligible cases. If we define $T = (G_{0,u}(x), G_{1,u}(x), \ldots, G_{7,u}(x))$, with each $G_{i,u}$ matching one of the formulas, the loop can be simplified to only compute one of the inverses instead of all of them: -**Define** _ElligatorSwift(x)_ as: - -- Loop: - - Pick a uniformly random field element $u.$ - - Pick a uniformly random integer $c$ in $[0,8).$ - - Let $t = G_{c,u}(x).$ - - If $t \neq \bot$, return $(u, t)$; restart loop otherwise. +**Define** *ElligatorSwift(x)* as: +* Loop: + * Pick a uniformly random field element $u.$ + * Pick a uniformly random integer $c$ in $[0,8).$ + * Let $t = G_{c,u}(x).$ + * If $t \neq \bot$, return $(u, t)$; restart loop otherwise. This is implemented in `secp256k1_ellswift_xelligatorswift_var`. @@ -269,19 +256,18 @@ Those are then repeated as $c=4$ through $c=7$ for the other sign of $w$ (noting Ignoring the negligible cases, we get: **Define** $G_{c,u}(x)$ as: - -- If $c \in \\{0, 1, 4, 5\\}$ (for $x_1$ and $x_2$ formulas): - - If $g(-u-x)$ is square, return $\bot$ (as $x_3$ would be valid and take precedence). - - If $c \in \\{0, 4\\}$ (the $x_1$ formula) let $v = x$, otherwise let $v = -u-x$ (the $x_2$ formula) - - Let $s = -g(u)/(u^2 + uv + v^2 + a)$ (using $s = w^2$ in what follows). -- Otherwise, when $c \in \\{2, 3, 6, 7\\}$ (for $x_3$ formulas): - - Let $s = x-u.$ - - Let $r = \sqrt{-s(4g(u) + sh(u))}.$ - - Let $v = (r/s - u)/2$ if $c \in \\{3, 7\\}$; $(-r/s - u)/2$ otherwise. -- Let $w = \sqrt{s}.$ -- Depending on $c:$ - - If $c \in \\{0, 1, 2, 3\\}:$ return $P_u^{'-1}(v, w).$ - - If $c \in \\{4, 5, 6, 7\\}:$ return $P_u^{'-1}(v, -w).$ +* If $c \in \\{0, 1, 4, 5\\}$ (for $x_1$ and $x_2$ formulas): + * If $g(-u-x)$ is square, return $\bot$ (as $x_3$ would be valid and take precedence). + * If $c \in \\{0, 4\\}$ (the $x_1$ formula) let $v = x$, otherwise let $v = -u-x$ (the $x_2$ formula) + * Let $s = -g(u)/(u^2 + uv + v^2 + a)$ (using $s = w^2$ in what follows). +* Otherwise, when $c \in \\{2, 3, 6, 7\\}$ (for $x_3$ formulas): + * Let $s = x-u.$ + * Let $r = \sqrt{-s(4g(u) + sh(u))}.$ + * Let $v = (r/s - u)/2$ if $c \in \\{3, 7\\}$; $(-r/s - u)/2$ otherwise. +* Let $w = \sqrt{s}.$ +* Depending on $c:$ + * If $c \in \\{0, 1, 2, 3\\}:$ return $P_u^{'-1}(v, w).$ + * If $c \in \\{4, 5, 6, 7\\}:$ return $P_u^{'-1}(v, -w).$ Whenever a square root of a non-square is taken, $\bot$ is returned; for both square roots this happens with roughly 50% on random inputs. Similarly, when a division by 0 would occur, $\bot$ is returned as well; this will only happen @@ -298,21 +284,20 @@ transformation. Furthermore, that transformation has no effect on $s$ in the fir as $u^2 + ux + x^2 + a = u^2 + u(-u-x) + (-u-x)^2 + a.$ Thus we can extract it out and move it down: **Define** $G_{c,u}(x)$ as: - -- If $c \in \\{0, 1, 4, 5\\}:$ - - If $g(-u-x)$ is square, return $\bot.$ - - Let $s = -g(u)/(u^2 + ux + x^2 + a).$ - - Let $v = x.$ -- Otherwise, when $c \in \\{2, 3, 6, 7\\}:$ - - Let $s = x-u.$ - - Let $r = \sqrt{-s(4g(u) + sh(u))}.$ - - Let $v = (r/s - u)/2.$ -- Let $w = \sqrt{s}.$ -- Depending on $c:$ - - If $c \in \\{0, 2\\}:$ return $P_u^{'-1}(v, w).$ - - If $c \in \\{1, 3\\}:$ return $P_u^{'-1}(-u-v, w).$ - - If $c \in \\{4, 6\\}:$ return $P_u^{'-1}(v, -w).$ - - If $c \in \\{5, 7\\}:$ return $P_u^{'-1}(-u-v, -w).$ +* If $c \in \\{0, 1, 4, 5\\}:$ + * If $g(-u-x)$ is square, return $\bot.$ + * Let $s = -g(u)/(u^2 + ux + x^2 + a).$ + * Let $v = x.$ +* Otherwise, when $c \in \\{2, 3, 6, 7\\}:$ + * Let $s = x-u.$ + * Let $r = \sqrt{-s(4g(u) + sh(u))}.$ + * Let $v = (r/s - u)/2.$ +* Let $w = \sqrt{s}.$ +* Depending on $c:$ + * If $c \in \\{0, 2\\}:$ return $P_u^{'-1}(v, w).$ + * If $c \in \\{1, 3\\}:$ return $P_u^{'-1}(-u-v, w).$ + * If $c \in \\{4, 6\\}:$ return $P_u^{'-1}(v, -w).$ + * If $c \in \\{5, 7\\}:$ return $P_u^{'-1}(-u-v, -w).$ This shows there will always be exactly 0, 4, or 8 $t$ values for a given $(u, x)$ input. There can be 0, 1, or 2 $(v, w)$ pairs before invoking $P_u^{'-1}$, and each results in 4 distinct $t$ values. @@ -325,60 +310,58 @@ we analyse them here. They generally fall into two categories: cases in which th do not decode back to $x$ (or at least cannot guarantee that they do), and cases in which the encoder might produce the same $t$ value for multiple $c$ inputs (thereby biasing that encoding): -- In the branch for $x_1$ and $x_2$ (where $c \in \\{0, 1, 4, 5\\}$): - - When $g(u) = 0$, we would have $s=w=Y=0$, which is not on $S_u.$ This is only possible on even-ordered curves. +* In the branch for $x_1$ and $x_2$ (where $c \in \\{0, 1, 4, 5\\}$): + * When $g(u) = 0$, we would have $s=w=Y=0$, which is not on $S_u.$ This is only possible on even-ordered curves. Excluding this also removes the one condition under which the simplified check for $x_3$ on the curve fails (namely when $g(x_1)=g(x_2)=0$ but $g(x_3)$ is not square). This does exclude some valid encodings: when both $g(u)=0$ and $u^2+ux+x^2+a=0$ (also implying $g(x)=0$), the $S_u'$ equation degenerates to $0 = 0$, and many valid $t$ values may exist. Yet, these cannot be targeted uniformly by the encoder anyway as there will generally be more than 8. - - When $g(x) = 0$, the same $t$ would be produced as in the $x_3$ branch (where $c \in \\{2, 3, 6, 7\\}$) which we give precedence + * When $g(x) = 0$, the same $t$ would be produced as in the $x_3$ branch (where $c \in \\{2, 3, 6, 7\\}$) which we give precedence as it can deal with $g(u)=0$. This is again only possible on even-ordered curves. -- In the branch for $x_3$ (where $c \in \\{2, 3, 6, 7\\}$): - - When $s=0$, a division by zero would occur. - - When $v = -u-v$ and $c \in \\{3, 7\\}$, the same $t$ would be returned as in the $c \in \\{2, 6\\}$ cases. +* In the branch for $x_3$ (where $c \in \\{2, 3, 6, 7\\}$): + * When $s=0$, a division by zero would occur. + * When $v = -u-v$ and $c \in \\{3, 7\\}$, the same $t$ would be returned as in the $c \in \\{2, 6\\}$ cases. It is equivalent to checking whether $r=0$. This cannot occur in the $x_1$ or $x_2$ branches, as it would trigger the $g(-u-x)$ is square condition. A similar concern for $w = -w$ does not exist, as $w=0$ is already impossible in both branches: in the first it requires $g(u)=0$ which is already outlawed on even-ordered curves and impossible on others; in the second it would trigger division by zero. -- Curve-specific special cases also exist that need to be rejected, because they result in $(u,t)$ which is invalid to the decoder, or because of division by zero in the encoder: - - For $a=0$ curves, when $u=0$ or when $t=0$. The latter can only be reached by the encoder when $g(u)=0$, which requires an even-ordered curve. - - For $a \neq 0$ curves, when $X_0(u)=0$, when $h(u)t^2 = -1$, or when $w(u + 2v) = 2X_0(u)$ while also either $w \neq 2Y_0(u)$ or $h(u)=0$. +* Curve-specific special cases also exist that need to be rejected, because they result in $(u,t)$ which is invalid to the decoder, or because of division by zero in the encoder: + * For $a=0$ curves, when $u=0$ or when $t=0$. The latter can only be reached by the encoder when $g(u)=0$, which requires an even-ordered curve. + * For $a \neq 0$ curves, when $X_0(u)=0$, when $h(u)t^2 = -1$, or when $w(u + 2v) = 2X_0(u)$ while also either $w \neq 2Y_0(u)$ or $h(u)=0$. **Define** a version of $G_{c,u}(x)$ which deals with all these cases: - -- If $a=0$ and $u=0$, return $\bot.$ -- If $a \neq 0$ and $X_0(u)=0$, return $\bot.$ -- If $c \in \\{0, 1, 4, 5\\}:$ - - If $g(u) = 0$ or $g(x) = 0$, return $\bot$ (even curves only). - - If $g(-u-x)$ is square, return $\bot.$ - - Let $s = -g(u)/(u^2 + ux + x^2 + a)$ (cannot cause division by zero). - - Let $v = x.$ -- Otherwise, when $c \in \\{2, 3, 6, 7\\}:$ - - Let $s = x-u.$ - - Let $r = \sqrt{-s(4g(u) + sh(u))}$; return $\bot$ if not square. - - If $c \in \\{3, 7\\}$ and $r=0$, return $\bot.$ - - If $s = 0$, return $\bot.$ - - Let $v = (r/s - u)/2.$ -- Let $w = \sqrt{s}$; return $\bot$ if not square. -- If $a \neq 0$ and $w(u+2v) = 2X_0(u)$ and either $w \neq 2Y_0(u)$ or $h(u) = 0$, return $\bot.$ -- Depending on $c:$ - - If $c \in \\{0, 2\\}$, let $t = P_u^{'-1}(v, w).$ - - If $c \in \\{1, 3\\}$, let $t = P_u^{'-1}(-u-v, w).$ - - If $c \in \\{4, 6\\}$, let $t = P_u^{'-1}(v, -w).$ - - If $c \in \\{5, 7\\}$, let $t = P_u^{'-1}(-u-v, -w).$ -- If $a=0$ and $t=0$, return $\bot$ (even curves only). -- If $a \neq 0$ and $h(u)t^2 = -1$, return $\bot.$ -- Return $t.$ +* If $a=0$ and $u=0$, return $\bot.$ +* If $a \neq 0$ and $X_0(u)=0$, return $\bot.$ +* If $c \in \\{0, 1, 4, 5\\}:$ + * If $g(u) = 0$ or $g(x) = 0$, return $\bot$ (even curves only). + * If $g(-u-x)$ is square, return $\bot.$ + * Let $s = -g(u)/(u^2 + ux + x^2 + a)$ (cannot cause division by zero). + * Let $v = x.$ +* Otherwise, when $c \in \\{2, 3, 6, 7\\}:$ + * Let $s = x-u.$ + * Let $r = \sqrt{-s(4g(u) + sh(u))}$; return $\bot$ if not square. + * If $c \in \\{3, 7\\}$ and $r=0$, return $\bot.$ + * If $s = 0$, return $\bot.$ + * Let $v = (r/s - u)/2.$ +* Let $w = \sqrt{s}$; return $\bot$ if not square. +* If $a \neq 0$ and $w(u+2v) = 2X_0(u)$ and either $w \neq 2Y_0(u)$ or $h(u) = 0$, return $\bot.$ +* Depending on $c:$ + * If $c \in \\{0, 2\\}$, let $t = P_u^{'-1}(v, w).$ + * If $c \in \\{1, 3\\}$, let $t = P_u^{'-1}(-u-v, w).$ + * If $c \in \\{4, 6\\}$, let $t = P_u^{'-1}(v, -w).$ + * If $c \in \\{5, 7\\}$, let $t = P_u^{'-1}(-u-v, -w).$ +* If $a=0$ and $t=0$, return $\bot$ (even curves only). +* If $a \neq 0$ and $h(u)t^2 = -1$, return $\bot.$ +* Return $t.$ Given any $u$, using this algorithm over all $x$ and $c$ values, every $t$ value will be reached exactly once, for an $x$ for which $F_u(t) = x$ holds, except for these cases that will not be reached: - -- All cases where $P_u(t)$ is not defined: - - For $a=0$ curves, when $u=0$, $t=0$, or $g(u) = -t^2.$ - - For $a \neq 0$ curves, when $h(u)t^2 = -1$, $X_0(u) = 0$, or $Y_0(u) (1 - h(u) t^2) = 2X_0(u)t.$ -- When $g(u)=0$, the potentially many $t$ values that decode to an $x$ satisfying $g(x)=0$ using the $x_2$ formula. These were excluded by the $g(u)=0$ condition in the $c \in \\{0, 1, 4, 5\\}$ branch. +* All cases where $P_u(t)$ is not defined: + * For $a=0$ curves, when $u=0$, $t=0$, or $g(u) = -t^2.$ + * For $a \neq 0$ curves, when $h(u)t^2 = -1$, $X_0(u) = 0$, or $Y_0(u) (1 - h(u) t^2) = 2X_0(u)t.$ +* When $g(u)=0$, the potentially many $t$ values that decode to an $x$ satisfying $g(x)=0$ using the $x_2$ formula. These were excluded by the $g(u)=0$ condition in the $c \in \\{0, 1, 4, 5\\}$ branch. These cases form a negligible subset of all $(u, t)$ for cryptographically sized curves. @@ -387,42 +370,40 @@ These cases form a negligible subset of all $(u, t)$ for cryptographically sized Specialized for odd-ordered $a=0$ curves: **Define** $G_{c,u}(x)$ as: - -- If $u=0$, return $\bot.$ -- If $c \in \\{0, 1, 4, 5\\}:$ - - If $(-u-x)^3 + b$ is square, return $\bot$ - - Let $s = -(u^3 + b)/(u^2 + ux + x^2)$ (cannot cause division by 0). - - Let $v = x.$ -- Otherwise, when $c \in \\{2, 3, 6, 7\\}:$ - - Let $s = x-u.$ - - Let $r = \sqrt{-s(4(u^3 + b) + 3su^2)}$; return $\bot$ if not square. - - If $c \in \\{3, 7\\}$ and $r=0$, return $\bot.$ - - If $s = 0$, return $\bot.$ - - Let $v = (r/s - u)/2.$ -- Let $w = \sqrt{s}$; return $\bot$ if not square. -- Depending on $c:$ - - If $c \in \\{0, 2\\}:$ return $w(\frac{\sqrt{-3}-1}{2}u - v).$ - - If $c \in \\{1, 3\\}:$ return $w(\frac{\sqrt{-3}+1}{2}u + v).$ - - If $c \in \\{4, 6\\}:$ return $w(\frac{-\sqrt{-3}+1}{2}u + v).$ - - If $c \in \\{5, 7\\}:$ return $w(\frac{-\sqrt{-3}-1}{2}u - v).$ +* If $u=0$, return $\bot.$ +* If $c \in \\{0, 1, 4, 5\\}:$ + * If $(-u-x)^3 + b$ is square, return $\bot$ + * Let $s = -(u^3 + b)/(u^2 + ux + x^2)$ (cannot cause division by 0). + * Let $v = x.$ +* Otherwise, when $c \in \\{2, 3, 6, 7\\}:$ + * Let $s = x-u.$ + * Let $r = \sqrt{-s(4(u^3 + b) + 3su^2)}$; return $\bot$ if not square. + * If $c \in \\{3, 7\\}$ and $r=0$, return $\bot.$ + * If $s = 0$, return $\bot.$ + * Let $v = (r/s - u)/2.$ +* Let $w = \sqrt{s}$; return $\bot$ if not square. +* Depending on $c:$ + * If $c \in \\{0, 2\\}:$ return $w(\frac{\sqrt{-3}-1}{2}u - v).$ + * If $c \in \\{1, 3\\}:$ return $w(\frac{\sqrt{-3}+1}{2}u + v).$ + * If $c \in \\{4, 6\\}:$ return $w(\frac{-\sqrt{-3}+1}{2}u + v).$ + * If $c \in \\{5, 7\\}:$ return $w(\frac{-\sqrt{-3}-1}{2}u - v).$ This is implemented in `secp256k1_ellswift_xswiftec_inv_var`. And the x-only ElligatorSwift encoding algorithm is still: -**Define** _ElligatorSwift(x)_ as: - -- Loop: - - Pick a uniformly random field element $u.$ - - Pick a uniformly random integer $c$ in $[0,8).$ - - Let $t = G_{c,u}(x).$ - - If $t \neq \bot$, return $(u, t)$; restart loop otherwise. +**Define** *ElligatorSwift(x)* as: +* Loop: + * Pick a uniformly random field element $u.$ + * Pick a uniformly random integer $c$ in $[0,8).$ + * Let $t = G_{c,u}(x).$ + * If $t \neq \bot$, return $(u, t)$; restart loop otherwise. Note that this logic does not take the remapped $u=0$, $t=0$, and $g(u) = -t^2$ cases into account; it just avoids them. While it is not impossible to make the encoder target them, this would increase the maximum number of $t$ values for a given $(u, x)$ combination beyond 8, and thereby slow down the ElligatorSwift loop proportionally, for a negligible gain in uniformity. -## 4. Encoding and decoding full _(x, y)_ coordinates +## 4. Encoding and decoding full *(x, y)* coordinates So far we have only addressed encoding and decoding x-coordinates, but in some cases an encoding for full points with $(x, y)$ coordinates is desirable. It is possible to encode this information @@ -441,32 +422,30 @@ four distinct $P_u^{'-1}$ calls in the definition of $G_{u,c}.$ To encode the sign of $y$ in the sign of $Y:$ -**Define** _Decode(u, t)_ for full $(x, y)$ as: - -- Let $(X, Y) = P_u(t).$ -- Let $x$ be the first value in $(u + 4Y^2, \frac{-X}{2Y} - \frac{u}{2}, \frac{X}{2Y} - \frac{u}{2})$ for which $g(x)$ is square. -- Let $y = \sqrt{g(x)}.$ -- If $sign(y) = sign(Y)$, return $(x, y)$; otherwise return $(x, -y).$ +**Define** *Decode(u, t)* for full $(x, y)$ as: +* Let $(X, Y) = P_u(t).$ +* Let $x$ be the first value in $(u + 4Y^2, \frac{-X}{2Y} - \frac{u}{2}, \frac{X}{2Y} - \frac{u}{2})$ for which $g(x)$ is square. +* Let $y = \sqrt{g(x)}.$ +* If $sign(y) = sign(Y)$, return $(x, y)$; otherwise return $(x, -y).$ And encoding would be done using a $G_{c,u}(x, y)$ function defined as: **Define** $G_{c,u}(x, y)$ as: - -- If $c \in \\{0, 1\\}:$ - - If $g(u) = 0$ or $g(x) = 0$, return $\bot$ (even curves only). - - If $g(-u-x)$ is square, return $\bot.$ - - Let $s = -g(u)/(u^2 + ux + x^2 + a)$ (cannot cause division by zero). - - Let $v = x.$ -- Otherwise, when $c \in \\{2, 3\\}:$ - - Let $s = x-u.$ - - Let $r = \sqrt{-s(4g(u) + sh(u))}$; return $\bot$ if not square. - - If $c = 3$ and $r = 0$, return $\bot.$ - - Let $v = (r/s - u)/2.$ -- Let $w = \sqrt{s}$; return $\bot$ if not square. -- Let $w' = w$ if $sign(w/2) = sign(y)$; $-w$ otherwise. -- Depending on $c:$ - - If $c \in \\{0, 2\\}:$ return $P_u^{'-1}(v, w').$ - - If $c \in \\{1, 3\\}:$ return $P_u^{'-1}(-u-v, w').$ +* If $c \in \\{0, 1\\}:$ + * If $g(u) = 0$ or $g(x) = 0$, return $\bot$ (even curves only). + * If $g(-u-x)$ is square, return $\bot.$ + * Let $s = -g(u)/(u^2 + ux + x^2 + a)$ (cannot cause division by zero). + * Let $v = x.$ +* Otherwise, when $c \in \\{2, 3\\}:$ + * Let $s = x-u.$ + * Let $r = \sqrt{-s(4g(u) + sh(u))}$; return $\bot$ if not square. + * If $c = 3$ and $r = 0$, return $\bot.$ + * Let $v = (r/s - u)/2.$ +* Let $w = \sqrt{s}$; return $\bot$ if not square. +* Let $w' = w$ if $sign(w/2) = sign(y)$; $-w$ otherwise. +* Depending on $c:$ + * If $c \in \\{0, 2\\}:$ return $P_u^{'-1}(v, w').$ + * If $c \in \\{1, 3\\}:$ return $P_u^{'-1}(-u-v, w').$ Note that $c$ now only ranges $[0,4)$, as the sign of $w'$ is decided based on that of $y$, rather than on $c.$ This change makes some valid encodings unreachable: when $y = 0$ and $sign(Y) \neq sign(0)$. @@ -475,23 +454,22 @@ In the above logic, $sign$ can be implemented in several ways, such as parity of of the input field element (for prime-sized fields) or the quadratic residuosity (for fields where $-1$ is not square). The choice does not matter, as long as it only takes on two possible values, and for $x \neq 0$ it holds that $sign(x) \neq sign(-x)$. -### 4.1 Full _(x, y)_ coordinates for `secp256k1` +### 4.1 Full *(x, y)* coordinates for `secp256k1` For $a=0$ curves, there is another option. Note that for those, the $P_u(t)$ function translates negations of $t$ to negations of (both) $X$ and $Y.$ Thus, we can use $sign(t)$ to encode the y-coordinate directly. Combined with the earlier remapping to guarantee all inputs land on the curve, we get as decoder: -**Define** _Decode(u, t)_ as: - -- Let $u'=u$ if $u \neq 0$; $1$ otherwise. -- Let $t'=t$ if $t \neq 0$; $1$ otherwise. -- Let $t''=t'$ if $u'^3 + b + t'^2 \neq 0$; $2t'$ otherwise. -- Let $X = \dfrac{u'^3 + b - t''^2}{2t''}.$ -- Let $Y = \dfrac{X + t''}{u'\sqrt{-3}}.$ -- Let $x$ be the first element of $(u' + 4Y^2, \frac{-X}{2Y} - \frac{u'}{2}, \frac{X}{2Y} - \frac{u'}{2})$ for which $g(x)$ is square. -- Let $y = \sqrt{g(x)}.$ -- Return $(x, y)$ if $sign(y) = sign(t)$; $(x, -y)$ otherwise. +**Define** *Decode(u, t)* as: +* Let $u'=u$ if $u \neq 0$; $1$ otherwise. +* Let $t'=t$ if $t \neq 0$; $1$ otherwise. +* Let $t''=t'$ if $u'^3 + b + t'^2 \neq 0$; $2t'$ otherwise. +* Let $X = \dfrac{u'^3 + b - t''^2}{2t''}.$ +* Let $Y = \dfrac{X + t''}{u'\sqrt{-3}}.$ +* Let $x$ be the first element of $(u' + 4Y^2, \frac{-X}{2Y} - \frac{u'}{2}, \frac{X}{2Y} - \frac{u'}{2})$ for which $g(x)$ is square. +* Let $y = \sqrt{g(x)}.$ +* Return $(x, y)$ if $sign(y) = sign(t)$; $(x, -y)$ otherwise. This is implemented in `secp256k1_ellswift_swiftec_var`. The used $sign(x)$ function is the parity of $x$ when represented as in integer in $[0,q).$ diff --git a/external/secp256k1/doc/musig.md b/external/secp256k1/doc/musig.md index 176b131da6..ae21f9b131 100644 --- a/external/secp256k1/doc/musig.md +++ b/external/secp256k1/doc/musig.md @@ -1,4 +1,5 @@ -# Notes on the musig module API +Notes on the musig module API +=========================== The following sections contain additional notes on the API of the musig module (`include/secp256k1_musig.h`). A usage example can be found in `examples/musig.c`. diff --git a/external/secp256k1/doc/release-process.md b/external/secp256k1/doc/release-process.md index 4ac9ca0d23..a64bae0f0d 100644 --- a/external/secp256k1/doc/release-process.md +++ b/external/secp256k1/doc/release-process.md @@ -2,7 +2,7 @@ This document outlines the process for releasing versions of the form `$MAJOR.$MINOR.$PATCH`. -We distinguish between two types of releases: _regular_ and _maintenance_ releases. +We distinguish between two types of releases: *regular* and *maintenance* releases. Regular releases are releases of a new major or minor version as well as patches of the most recent release. Maintenance releases, on the other hand, are required for patches of older releases. @@ -15,7 +15,6 @@ This process also assumes that there will be no minor releases for old major rel We aim to cut a regular release every 3-4 months, approximately twice as frequent as major Bitcoin Core releases. Every second release should be published one month before the feature freeze of the next major Bitcoin Core release, allowing sufficient time to update the library in Core. ## Sanity checks - Perform these checks when reviewing the release PR (see below): 1. Ensure `make distcheck` doesn't fail. @@ -43,15 +42,15 @@ Perform these checks when reviewing the release PR (see below): ## Regular release 1. Open a PR to the master branch with a commit (using message `"release: prepare for $MAJOR.$MINOR.$PATCH"`, for example) that - - finalizes the release notes in [CHANGELOG.md](../CHANGELOG.md) by - - adding a section for the release (make sure that the version number is a link to a diff between the previous and new version), - - removing the `[Unreleased]` section header, - - ensuring that the release notes are not missing entries (check the `needs-changelog` label on github), and - - including an entry for `### ABI Compatibility` if it doesn't exist, - - sets `_PKG_VERSION_IS_RELEASE` to `true` in `configure.ac`, and, - - if this is not a patch release, - - updates `_PKG_VERSION_*` and `_LIB_VERSION_*` in `configure.ac`, and - - updates `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_*` in `CMakeLists.txt`. + * finalizes the release notes in [CHANGELOG.md](../CHANGELOG.md) by + * adding a section for the release (make sure that the version number is a link to a diff between the previous and new version), + * removing the `[Unreleased]` section header, + * ensuring that the release notes are not missing entries (check the `needs-changelog` label on github), and + * including an entry for `### ABI Compatibility` if it doesn't exist, + * sets `_PKG_VERSION_IS_RELEASE` to `true` in `configure.ac`, and, + * if this is not a patch release, + * updates `_PKG_VERSION_*` and `_LIB_VERSION_*` in `configure.ac`, and + * updates `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_*` in `CMakeLists.txt`. 2. Perform the [sanity checks](#sanity-checks) on the PR branch. 3. After the PR is merged, tag the commit, and push the tag: ``` @@ -60,12 +59,11 @@ Perform these checks when reviewing the release PR (see below): git push git@github.com:bitcoin-core/secp256k1.git v$MAJOR.$MINOR.$PATCH ``` 4. Open a PR to the master branch with a commit (using message `"release cleanup: bump version after $MAJOR.$MINOR.$PATCH"`, for example) that - - sets `_PKG_VERSION_IS_RELEASE` to `false` and increments `_PKG_VERSION_PATCH` and `_LIB_VERSION_REVISION` in `configure.ac`, - - increments the `$PATCH` component of `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_REVISION` in `CMakeLists.txt`, and - - adds an `[Unreleased]` section header to the [CHANGELOG.md](../CHANGELOG.md). + * sets `_PKG_VERSION_IS_RELEASE` to `false` and increments `_PKG_VERSION_PATCH` and `_LIB_VERSION_REVISION` in `configure.ac`, + * increments the `$PATCH` component of `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_REVISION` in `CMakeLists.txt`, and + * adds an `[Unreleased]` section header to the [CHANGELOG.md](../CHANGELOG.md). If other maintainers are not present to approve the PR, it can be merged without ACKs. - 5. Create a new GitHub release with a link to the corresponding entry in [CHANGELOG.md](../CHANGELOG.md). 6. Send an announcement email to the bitcoin-dev mailing list. @@ -79,9 +77,9 @@ Note that bug fixes need to be backported only to releases for which no compatib git push git@github.com:bitcoin-core/secp256k1.git $MAJOR.$MINOR ``` 2. Open a pull request to the `$MAJOR.$MINOR` branch that - - includes the bug fixes, - - finalizes the release notes similar to a regular release, - - increments `_PKG_VERSION_PATCH` and `_LIB_VERSION_REVISION` in `configure.ac` + * includes the bug fixes, + * finalizes the release notes similar to a regular release, + * increments `_PKG_VERSION_PATCH` and `_LIB_VERSION_REVISION` in `configure.ac` and the `$PATCH` component of `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_REVISION` in `CMakeLists.txt` (with commit message `"release: bump versions for $MAJOR.$MINOR.$PATCH"`, for example). 3. Perform the [sanity checks](#sanity-checks) on the PR branch. @@ -91,6 +89,6 @@ Note that bug fixes need to be backported only to releases for which no compatib git tag -s v$MAJOR.$MINOR.$PATCH -m "libsecp256k1 $MAJOR.$MINOR.$PATCH" git push git@github.com:bitcoin-core/secp256k1.git v$MAJOR.$MINOR.$PATCH ``` -5. Create a new GitHub release with a link to the corresponding entry in [CHANGELOG.md](../CHANGELOG.md). -6. Send an announcement email to the bitcoin-dev mailing list. -7. Open PR to the master branch that includes a commit (with commit message `"release notes: add $MAJOR.$MINOR.$PATCH"`, for example) that adds release notes to [CHANGELOG.md](../CHANGELOG.md). +6. Create a new GitHub release with a link to the corresponding entry in [CHANGELOG.md](../CHANGELOG.md). +7. Send an announcement email to the bitcoin-dev mailing list. +8. Open PR to the master branch that includes a commit (with commit message `"release notes: add $MAJOR.$MINOR.$PATCH"`, for example) that adds release notes to [CHANGELOG.md](../CHANGELOG.md). diff --git a/external/secp256k1/doc/safegcd_implementation.md b/external/secp256k1/doc/safegcd_implementation.md index 72d99daad3..5dbbb7bbd2 100644 --- a/external/secp256k1/doc/safegcd_implementation.md +++ b/external/secp256k1/doc/safegcd_implementation.md @@ -29,67 +29,65 @@ def gcd(f, g): return abs(f) ``` -It computes the greatest common divisor of an odd integer _f_ and any integer _g_. Its inner loop -keeps rewriting the variables _f_ and _g_ alongside a state variable _δ_ that starts at _1_, until -_g=0_ is reached. At that point, _|f|_ gives the GCD. Each of the transitions in the loop is called a +It computes the greatest common divisor of an odd integer *f* and any integer *g*. Its inner loop +keeps rewriting the variables *f* and *g* alongside a state variable *δ* that starts at *1*, until +*g=0* is reached. At that point, *|f|* gives the GCD. Each of the transitions in the loop is called a "division step" (referred to as divstep in what follows). -For example, _gcd(21, 14)_ would be computed as: - -- Start with _δ=1 f=21 g=14_ -- Take the third branch: _δ=2 f=21 g=7_ -- Take the first branch: _δ=-1 f=7 g=-7_ -- Take the second branch: _δ=0 f=7 g=0_ -- The answer _|f| = 7_. +For example, *gcd(21, 14)* would be computed as: +- Start with *δ=1 f=21 g=14* +- Take the third branch: *δ=2 f=21 g=7* +- Take the first branch: *δ=-1 f=7 g=-7* +- Take the second branch: *δ=0 f=7 g=0* +- The answer *|f| = 7*. Why it works: - - Divsteps can be decomposed into two steps (see paragraph 8.2 in the paper): - - (a) If _g_ is odd, replace _(f,g)_ with _(g,g-f)_ or (f,g+f), resulting in an even _g_. - - (b) Replace _(f,g)_ with _(f,g/2)_ (where _g_ is guaranteed to be even). + - (a) If *g* is odd, replace *(f,g)* with *(g,g-f)* or (f,g+f), resulting in an even *g*. + - (b) Replace *(f,g)* with *(f,g/2)* (where *g* is guaranteed to be even). - Neither of those two operations change the GCD: - - For (a), assume _gcd(f,g)=c_, then it must be the case that _f=a c_ and _g=b c_ for some integers _a_ - and _b_. As _(g,g-f)=(b c,(b-a)c)_ and _(f,f+g)=(a c,(a+b)c)_, the result clearly still has - common factor _c_. Reasoning in the other direction shows that no common factor can be added by + - For (a), assume *gcd(f,g)=c*, then it must be the case that *f=a c* and *g=b c* for some integers *a* + and *b*. As *(g,g-f)=(b c,(b-a)c)* and *(f,f+g)=(a c,(a+b)c)*, the result clearly still has + common factor *c*. Reasoning in the other direction shows that no common factor can be added by doing so either. - - For (b), we know that _f_ is odd, so _gcd(f,g)_ clearly has no factor _2_, and we can remove - it from _g_. -- The algorithm will eventually converge to _g=0_. This is proven in the paper (see theorem G.3). -- It follows that eventually we find a final value _f'_ for which _gcd(f,g) = gcd(f',0)_. As the - gcd of _f'_ and _0_ is _|f'|_ by definition, that is our answer. + - For (b), we know that *f* is odd, so *gcd(f,g)* clearly has no factor *2*, and we can remove + it from *g*. +- The algorithm will eventually converge to *g=0*. This is proven in the paper (see theorem G.3). +- It follows that eventually we find a final value *f'* for which *gcd(f,g) = gcd(f',0)*. As the + gcd of *f'* and *0* is *|f'|* by definition, that is our answer. Compared to more [traditional GCD algorithms](https://en.wikipedia.org/wiki/Euclidean_algorithm), this one has the property of only ever looking at the low-order bits of the variables to decide the next steps, and being easy to make -constant-time (in more low-level languages than Python). The _δ_ parameter is necessary to +constant-time (in more low-level languages than Python). The *δ* parameter is necessary to guide the algorithm towards shrinking the numbers' magnitudes without explicitly needing to look at high order bits. Properties that will become important later: - -- Performing more divsteps than needed is not a problem, as _f_ does not change anymore after _g=0_. -- Only even numbers are divided by _2_. This means that when reasoning about it algebraically we +- Performing more divsteps than needed is not a problem, as *f* does not change anymore after *g=0*. +- Only even numbers are divided by *2*. This means that when reasoning about it algebraically we do not need to worry about rounding. -- At every point during the algorithm's execution the next _N_ steps only depend on the bottom _N_ - bits of _f_ and _g_, and on _δ_. +- At every point during the algorithm's execution the next *N* steps only depend on the bottom *N* + bits of *f* and *g*, and on *δ*. + ## 2. From GCDs to modular inverses -We want an algorithm to compute the inverse _a_ of _x_ modulo _M_, i.e. the number a such that _a x=1 -mod M_. This inverse only exists if the GCD of _x_ and _M_ is _1_, but that is always the case if _M_ is -prime and _0 < x < M_. In what follows, assume that the modular inverse exists. +We want an algorithm to compute the inverse *a* of *x* modulo *M*, i.e. the number a such that *a x=1 +mod M*. This inverse only exists if the GCD of *x* and *M* is *1*, but that is always the case if *M* is +prime and *0 < x < M*. In what follows, assume that the modular inverse exists. It turns out this inverse can be computed as a side effect of computing the GCD by keeping track of how the internal variables can be written as linear combinations of the inputs at every step (see the [extended Euclidean algorithm](https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm)). -Since the GCD is _1_, such an algorithm will compute numbers _a_ and _b_ such that a x + b M = 1*. +Since the GCD is *1*, such an algorithm will compute numbers *a* and *b* such that a x + b M = 1*. Taking that expression *mod M* gives *a x mod M = 1*, and we see that *a* is the modular inverse of *x -mod M\*. +mod M*. A similar approach can be used to calculate modular inverses using the divsteps-based GCD -algorithm shown above, if the modulus _M_ is odd. To do so, compute _gcd(f=M,g=x)_, while keeping -track of extra variables _d_ and _e_, for which at every step _d = f/x (mod M)_ and _e = g/x (mod M)_. -_f/x_ here means the number which multiplied with _x_ gives _f mod M_. As _f_ and _g_ are initialized to _M_ -and _x_ respectively, _d_ and _e_ just start off being _0_ (_M/x mod M = 0/x mod M = 0_) and _1_ (_x/x mod M -= 1_). +algorithm shown above, if the modulus *M* is odd. To do so, compute *gcd(f=M,g=x)*, while keeping +track of extra variables *d* and *e*, for which at every step *d = f/x (mod M)* and *e = g/x (mod M)*. +*f/x* here means the number which multiplied with *x* gives *f mod M*. As *f* and *g* are initialized to *M* +and *x* respectively, *d* and *e* just start off being *0* (*M/x mod M = 0/x mod M = 0*) and *1* (*x/x mod M += 1*). ```python def div2(M, x): @@ -121,16 +119,17 @@ def modinv(M, x): return (d * f) % M ``` -Also note that this approach to track _d_ and _e_ throughout the computation to determine the inverse +Also note that this approach to track *d* and *e* throughout the computation to determine the inverse is different from the paper. There (see paragraph 12.1 in the paper) a transition matrix for the entire computation is determined (see section 3 below) and the inverse is computed from that. The approach here avoids the need for 2x2 matrix multiplications of various sizes, and appears to be faster at the level of optimization we're able to do in C. + ## 3. Batching multiple divsteps -Every divstep can be expressed as a matrix multiplication, applying a transition matrix _(1/2 t)_ -to both vectors _[f, g]_ and _[d, e]_ (see paragraph 8.1 in the paper): +Every divstep can be expressed as a matrix multiplication, applying a transition matrix *(1/2 t)* +to both vectors *[f, g]* and *[d, e]* (see paragraph 8.1 in the paper): ``` t = [ u, v ] @@ -143,15 +142,15 @@ to both vectors _[f, g]_ and _[d, e]_ (see paragraph 8.1 in the paper): [ out_e ] [ in_e ] ``` -where _(u, v, q, r)_ is _(0, 2, -1, 1)_, _(2, 0, 1, 1)_, or _(2, 0, 0, 1)_, depending on which branch is -taken. As above, the resulting _f_ and _g_ are always integers. +where *(u, v, q, r)* is *(0, 2, -1, 1)*, *(2, 0, 1, 1)*, or *(2, 0, 0, 1)*, depending on which branch is +taken. As above, the resulting *f* and *g* are always integers. Performing multiple divsteps corresponds to a multiplication with the product of all the individual divsteps' transition matrices. As each transition matrix consists of integers -divided by _2_, the product of these matrices will consist of integers divided by _2N_ (see also -theorem 9.2 in the paper). These divisions are expensive when updating _d_ and _e_, so we delay -them: we compute the integer coefficients of the combined transition matrix scaled by _2N_, and -do one division by _2N_ as a final step: +divided by *2*, the product of these matrices will consist of integers divided by *2N* (see also +theorem 9.2 in the paper). These divisions are expensive when updating *d* and *e*, so we delay +them: we compute the integer coefficients of the combined transition matrix scaled by *2N*, and +do one division by *2N* as a final step: ```python def divsteps_n_matrix(delta, f, g): @@ -167,13 +166,13 @@ def divsteps_n_matrix(delta, f, g): return delta, (u, v, q, r) ``` -As the branches in the divsteps are completely determined by the bottom _N_ bits of _f_ and _g_, this +As the branches in the divsteps are completely determined by the bottom *N* bits of *f* and *g*, this function to compute the transition matrix only needs to see those bottom bits. Furthermore all -intermediate results and outputs fit in _(N+1)_-bit numbers (unsigned for _f_ and _g_; signed for _u_, _v_, -_q_, and _r_) (see also paragraph 8.3 in the paper). This means that an implementation using 64-bit -integers could set _N=62_ and compute the full transition matrix for 62 steps at once without any +intermediate results and outputs fit in *(N+1)*-bit numbers (unsigned for *f* and *g*; signed for *u*, *v*, +*q*, and *r*) (see also paragraph 8.3 in the paper). This means that an implementation using 64-bit +integers could set *N=62* and compute the full transition matrix for 62 steps at once without any big integer arithmetic at all. This is the reason why this algorithm is efficient: it only needs -to update the full-size _f_, _g_, _d_, and _e_ numbers once every _N_ steps. +to update the full-size *f*, *g*, *d*, and *e* numbers once every *N* steps. We still need functions to compute: @@ -185,8 +184,8 @@ We still need functions to compute: [ out_e ] ( [ q, r ]) [ in_e ] ``` -Because the divsteps transformation only ever divides even numbers by two, the result of _t [f,g]_ is always even. When _t_ is a composition of _N_ divsteps, it follows that the resulting _f_ -and _g_ will be multiple of _2N_, and division by _2N_ is simply shifting them down: +Because the divsteps transformation only ever divides even numbers by two, the result of *t [f,g]* is always even. When *t* is a composition of *N* divsteps, it follows that the resulting *f* +and *g* will be multiple of *2N*, and division by *2N* is simply shifting them down: ```python def update_fg(f, g, t): @@ -200,8 +199,8 @@ def update_fg(f, g, t): return cf >> N, cg >> N ``` -The same is not true for _d_ and _e_, and we need an equivalent of the `div2` function for division by _2N mod M_. -This is easy if we have precomputed _1/M mod 2N_ (which always exists for odd _M_): +The same is not true for *d* and *e*, and we need an equivalent of the `div2` function for division by *2N mod M*. +This is easy if we have precomputed *1/M mod 2N* (which always exists for odd *M*): ```python def div2n(M, Mi, x): @@ -225,7 +224,7 @@ def update_de(d, e, t, M, Mi): return div2n(M, Mi, cd), div2n(M, Mi, ce) ``` -With all of those, we can write a version of `modinv` that performs _N_ divsteps at once: +With all of those, we can write a version of `modinv` that performs *N* divsteps at once: ```python3 def modinv(M, Mi, x): @@ -243,19 +242,20 @@ def modinv(M, Mi, x): return (d * f) % M ``` -This means that in practice we'll always perform a multiple of _N_ divsteps. This is not a problem -because once _g=0_, further divsteps do not affect _f_, _g_, _d_, or _e_ anymore (only _δ_ keeps +This means that in practice we'll always perform a multiple of *N* divsteps. This is not a problem +because once *g=0*, further divsteps do not affect *f*, *g*, *d*, or *e* anymore (only *δ* keeps increasing). For variable time code such excess iterations will be mostly optimized away in later sections. + ## 4. Avoiding modulus operations -So far, there are two places where we compute a remainder of big numbers modulo _M_: at the end of -`div2n` in every `update_de`, and at the very end of `modinv` after potentially negating _d_ due to the -sign of _f_. These are relatively expensive operations when done generically. +So far, there are two places where we compute a remainder of big numbers modulo *M*: at the end of +`div2n` in every `update_de`, and at the very end of `modinv` after potentially negating *d* due to the +sign of *f*. These are relatively expensive operations when done generically. -To deal with the modulus operation in `div2n`, we simply stop requiring _d_ and _e_ to be in range -_[0,M)_ all the time. Let's start by inlining `div2n` into `update_de`, and dropping the modulus +To deal with the modulus operation in `div2n`, we simply stop requiring *d* and *e* to be in range +*[0,M)* all the time. Let's start by inlining `div2n` into `update_de`, and dropping the modulus operation at the end: ```python @@ -272,15 +272,15 @@ def update_de(d, e, t, M, Mi): return cd >> N, ce >> N ``` -Let's look at bounds on the ranges of these numbers. It can be shown that _|u|+|v|_ and _|q|+|r|_ -never exceed _2N_ (see paragraph 8.3 in the paper), and thus a multiplication with _t_ will have -outputs whose absolute values are at most _2N_ times the maximum absolute input value. In case the -inputs _d_ and _e_ are in _(-M,M)_, which is certainly true for the initial values _d=0_ and _e=1_ assuming -_M > 1_, the multiplication results in numbers in range _(-2NM,2NM)_. Subtracting less than _2N_ -times _M_ to cancel out _N_ bits brings that up to _(-2N+1M,2NM)_, and -dividing by _2N_ at the end takes it to _(-2M,M)_. Another application of `update_de` would take that -to _(-3M,2M)_, and so forth. This progressive expansion of the variables' ranges can be -counteracted by incrementing _d_ and _e_ by _M_ whenever they're negative: +Let's look at bounds on the ranges of these numbers. It can be shown that *|u|+|v|* and *|q|+|r|* +never exceed *2N* (see paragraph 8.3 in the paper), and thus a multiplication with *t* will have +outputs whose absolute values are at most *2N* times the maximum absolute input value. In case the +inputs *d* and *e* are in *(-M,M)*, which is certainly true for the initial values *d=0* and *e=1* assuming +*M > 1*, the multiplication results in numbers in range *(-2NM,2NM)*. Subtracting less than *2N* +times *M* to cancel out *N* bits brings that up to *(-2N+1M,2NM)*, and +dividing by *2N* at the end takes it to *(-2M,M)*. Another application of `update_de` would take that +to *(-3M,2M)*, and so forth. This progressive expansion of the variables' ranges can be +counteracted by incrementing *d* and *e* by *M* whenever they're negative: ```python ... @@ -293,12 +293,12 @@ counteracted by incrementing _d_ and _e_ by _M_ whenever they're negative: ... ``` -With inputs in _(-2M,M)_, they will first be shifted into range _(-M,M)_, which means that the -output will again be in _(-2M,M)_, and this remains the case regardless of how many `update_de` +With inputs in *(-2M,M)*, they will first be shifted into range *(-M,M)*, which means that the +output will again be in *(-2M,M)*, and this remains the case regardless of how many `update_de` invocations there are. In what follows, we will try to make this more efficient. -Note that increasing _d_ by _M_ is equal to incrementing _cd_ by _u M_ and _ce_ by _q M_. Similarly, -increasing _e_ by _M_ is equal to incrementing _cd_ by _v M_ and _ce_ by _r M_. So we could instead write: +Note that increasing *d* by *M* is equal to incrementing *cd* by *u M* and *ce* by *q M*. Similarly, +increasing *e* by *M* is equal to incrementing *cd* by *v M* and *ce* by *r M*. So we could instead write: ```python ... @@ -318,10 +318,10 @@ increasing _e_ by _M_ is equal to incrementing _cd_ by _v M_ and _ce_ by ... ``` -Now note that we have two steps of corrections to _cd_ and _ce_ that add multiples of _M_: this +Now note that we have two steps of corrections to *cd* and *ce* that add multiples of *M*: this increment, and the decrement that cancels out bottom bits. The second one depends on the first -one, but they can still be efficiently combined by only computing the bottom bits of _cd_ and _ce_ -at first, and using that to compute the final _md_, _me_ values: +one, but they can still be efficiently combined by only computing the bottom bits of *cd* and *ce* +at first, and using that to compute the final *md*, *me* values: ```python def update_de(d, e, t, M, Mi): @@ -346,8 +346,8 @@ def update_de(d, e, t, M, Mi): return cd >> N, ce >> N ``` -One last optimization: we can avoid the _md M_ and _me M_ multiplications in the bottom bits of _cd_ -and _ce_ by moving them to the _md_ and _me_ correction: +One last optimization: we can avoid the *md M* and *me M* multiplications in the bottom bits of *cd* +and *ce* by moving them to the *md* and *me* correction: ```python ... @@ -362,10 +362,10 @@ and _ce_ by moving them to the _md_ and _me_ correction: ... ``` -The resulting function takes _d_ and _e_ in range _(-2M,M)_ as inputs, and outputs values in the same -range. That also means that the _d_ value at the end of `modinv` will be in that range, while we want -a result in _[0,M)_. To do that, we need a normalization function. It's easy to integrate the -conditional negation of _d_ (based on the sign of _f_) into it as well: +The resulting function takes *d* and *e* in range *(-2M,M)* as inputs, and outputs values in the same +range. That also means that the *d* value at the end of `modinv` will be in that range, while we want +a result in *[0,M)*. To do that, we need a normalization function. It's easy to integrate the +conditional negation of *d* (based on the sign of *f*) into it as well: ```python def normalize(sign, v, M): @@ -391,21 +391,22 @@ And calling it in `modinv` is simply: return normalize(f, d, M) ``` + ## 5. Constant-time operation The primary selling point of the algorithm is fast constant-time operation. What code flow still depends on the input data so far? -- the number of iterations of the while _g ≠ 0_ loop in `modinv` +- the number of iterations of the while *g ≠ 0* loop in `modinv` - the branches inside `divsteps_n_matrix` - the sign checks in `update_de` - the sign checks in `normalize` To make the while loop in `modinv` constant time it can be replaced with a constant number of -iterations. The paper proves (Theorem 11.2) that _741_ divsteps are sufficient for any _256_-bit -inputs, and [safegcd-bounds](https://github.com/sipa/safegcd-bounds) shows that the slightly better bound _724_ is -sufficient even. Given that every loop iteration performs _N_ divsteps, it will run a total of -_⌈724/N⌉_ times. +iterations. The paper proves (Theorem 11.2) that *741* divsteps are sufficient for any *256*-bit +inputs, and [safegcd-bounds](https://github.com/sipa/safegcd-bounds) shows that the slightly better bound *724* is +sufficient even. Given that every loop iteration performs *N* divsteps, it will run a total of +*⌈724/N⌉* times. To deal with the branches in `divsteps_n_matrix` we will replace them with constant-time bitwise operations (and hope the C compiler isn't smart enough to turn them back into branches; see @@ -424,10 +425,10 @@ divstep can be written instead as (compare to the inner loop of `gcd` in section ``` To convert the above to bitwise operations, we rely on a trick to negate conditionally: per the -definition of negative numbers in two's complement, (_-v == ~v + 1_) holds for every number _v_. As -_-1_ in two's complement is all _1_ bits, bitflipping can be expressed as xor with _-1_. It follows -that _-v == (v ^ -1) - (-1)_. Thus, if we have a variable _c_ that takes on values _0_ or _-1_, then -_(v ^ c) - c_ is _v_ if _c=0_ and _-v_ if _c=-1_. +definition of negative numbers in two's complement, (*-v == ~v + 1*) holds for every number *v*. As +*-1* in two's complement is all *1* bits, bitflipping can be expressed as xor with *-1*. It follows +that *-v == (v ^ -1) - (-1)*. Thus, if we have a variable *c* that takes on values *0* or *-1*, then +*(v ^ c) - c* is *v* if *c=0* and *-v* if *c=-1*. Using this we can write: @@ -443,13 +444,13 @@ in constant-time form as: x = (f ^ c1) - c1 ``` -To use that trick, we need a helper mask variable _c1_ that resolves the condition _δ>0_ to _-1_ -(if true) or _0_ (if false). We compute _c1_ using right shifting, which is equivalent to dividing by -the specified power of _2_ and rounding down (in Python, and also in C under the assumption of a typical two's complement system; see -`assumptions.h` for tests that this is the case). Right shifting by _63_ thus maps all -numbers in range _[-263,0)_ to _-1_, and numbers in range _[0,263)_ to _0_. +To use that trick, we need a helper mask variable *c1* that resolves the condition *δ>0* to *-1* +(if true) or *0* (if false). We compute *c1* using right shifting, which is equivalent to dividing by +the specified power of *2* and rounding down (in Python, and also in C under the assumption of a typical two's complement system; see +`assumptions.h` for tests that this is the case). Right shifting by *63* thus maps all +numbers in range *[-263,0)* to *-1*, and numbers in range *[0,263)* to *0*. -Using the facts that _x&0=0_ and _x&(-1)=x_ (on two's complement systems again), we can write: +Using the facts that *x&0=0* and *x&(-1)=x* (on two's complement systems again), we can write: ```python if g & 1: @@ -497,8 +498,8 @@ becomes: ``` It turns out that this can be implemented more efficiently by applying the substitution -_η=-δ_. In this representation, negating _δ_ corresponds to negating _η_, and incrementing -_δ_ corresponds to decrementing _η_. This allows us to remove the negation in the _c1_ +*η=-δ*. In this representation, negating *δ* corresponds to negating *η*, and incrementing +*δ* corresponds to decrementing *η*. This allows us to remove the negation in the *c1* computation: ```python @@ -518,12 +519,12 @@ computation: g >>= 1 ``` -A variant of divsteps with better worst-case performance can be used instead: starting _δ_ at -_1/2_ instead of _1_. This reduces the worst case number of iterations to _590_ for _256_-bit inputs -(which can be shown using convex hull analysis). In this case, the substitution _ζ=-(δ+1/2)_ -is used instead to keep the variable integral. Incrementing _δ_ by _1_ still translates to -decrementing _ζ_ by _1_, but negating _δ_ now corresponds to going from _ζ_ to _-(ζ+1)_, or -_~ζ_. Doing that conditionally based on _c3_ is simply: +A variant of divsteps with better worst-case performance can be used instead: starting *δ* at +*1/2* instead of *1*. This reduces the worst case number of iterations to *590* for *256*-bit inputs +(which can be shown using convex hull analysis). In this case, the substitution *ζ=-(δ+1/2)* +is used instead to keep the variable integral. Incrementing *δ* by *1* still translates to +decrementing *ζ* by *1*, but negating *δ* now corresponds to going from *ζ* to *-(ζ+1)*, or +*~ζ*. Doing that conditionally based on *c3* is simply: ```python ... @@ -533,12 +534,13 @@ _~ζ_. Doing that conditionally based on _c3_ is simply: ``` By replacing the loop in `divsteps_n_matrix` with a variant of the divstep code above (extended to -also apply all _f_ operations to _u_, _v_ and all _g_ operations to _q_, _r_), a constant-time version of +also apply all *f* operations to *u*, *v* and all *g* operations to *q*, *r*), a constant-time version of `divsteps_n_matrix` is obtained. The full code will be in section 7. These bit fiddling tricks can also be used to make the conditional negations and additions in `update_de` and `normalize` constant-time. + ## 6. Variable-time optimizations In section 5, we modified the `divsteps_n_matrix` function (and a few others) to be constant time. @@ -548,7 +550,7 @@ faster non-constant time `divsteps_n_matrix` function. To do so, first consider yet another way of writing the inner loop of divstep operations in `gcd` from section 1. This decomposition is also explained in the paper in section 8.2. We use -the original version with initial _δ=1_ and _η=-δ_ here. +the original version with initial *δ=1* and *η=-δ* here. ```python for _ in range(N): @@ -560,7 +562,7 @@ for _ in range(N): g >>= 1 ``` -Whenever _g_ is even, the loop only shifts _g_ down and decreases _η_. When _g_ ends in multiple zero +Whenever *g* is even, the loop only shifts *g* down and decreases *η*. When *g* ends in multiple zero bits, these iterations can be consolidated into one step. This requires counting the bottom zero bits efficiently, which is possible on most platforms; it is abstracted here as the function `count_trailing_zeros`. @@ -593,20 +595,20 @@ while True: # g is even now, and the eta decrement and g shift will happen in the next loop. ``` -We can now remove multiple bottom _0_ bits from _g_ at once, but still need a full iteration whenever -there is a bottom _1_ bit. In what follows, we will get rid of multiple _1_ bits simultaneously as +We can now remove multiple bottom *0* bits from *g* at once, but still need a full iteration whenever +there is a bottom *1* bit. In what follows, we will get rid of multiple *1* bits simultaneously as well. -Observe that as long as _η ≥ 0_, the loop does not modify _f_. Instead, it cancels out bottom -bits of _g_ and shifts them out, and decreases _η_ and _i_ accordingly - interrupting only when _η_ -becomes negative, or when _i_ reaches _0_. Combined, this is equivalent to adding a multiple of _f_ to -_g_ to cancel out multiple bottom bits, and then shifting them out. +Observe that as long as *η ≥ 0*, the loop does not modify *f*. Instead, it cancels out bottom +bits of *g* and shifts them out, and decreases *η* and *i* accordingly - interrupting only when *η* +becomes negative, or when *i* reaches *0*. Combined, this is equivalent to adding a multiple of *f* to +*g* to cancel out multiple bottom bits, and then shifting them out. -It is easy to find what that multiple is: we want a number _w_ such that _g+w f_ has a few bottom -zero bits. If that number of bits is _L_, we want _g+w f mod 2L = 0_, or _w = -g/f mod 2L_. Since _f_ -is odd, such a _w_ exists for any _L_. _L_ cannot be more than _i_ steps (as we'd finish the loop before -doing more) or more than _η+1_ steps (as we'd run `eta, f, g = -eta, g, -f` at that point), but -apart from that, we're only limited by the complexity of computing _w_. +It is easy to find what that multiple is: we want a number *w* such that *g+w f* has a few bottom +zero bits. If that number of bits is *L*, we want *g+w f mod 2L = 0*, or *w = -g/f mod 2L*. Since *f* +is odd, such a *w* exists for any *L*. *L* cannot be more than *i* steps (as we'd finish the loop before +doing more) or more than *η+1* steps (as we'd run `eta, f, g = -eta, g, -f` at that point), but +apart from that, we're only limited by the complexity of computing *w*. This code demonstrates how to cancel up to 4 bits per step: @@ -640,25 +642,26 @@ some can be found in Hacker's Delight second edition by Henry S. Warren, Jr. pag Here we need the negated modular inverse, which is a simple transformation of those: - Instead of a 3-bit table: - - _-f_ or _f ^ 6_ + - *-f* or *f ^ 6* - Instead of a 4-bit table: - - _1 - f(f + 1)_ - - _-(f + (((f + 1) & 4) << 1))_ -- For larger tables the following technique can be used: if _w=-1/f mod 2L_, then _w(w f+2)_ is - _-1/f mod 22L_. This allows extending the previous formulas (or tables). In particular we + - *1 - f(f + 1)* + - *-(f + (((f + 1) & 4) << 1))* +- For larger tables the following technique can be used: if *w=-1/f mod 2L*, then *w(w f+2)* is + *-1/f mod 22L*. This allows extending the previous formulas (or tables). In particular we have this 6-bit function (based on the 3-bit function above): - - _f(f2 - 2)_ + - *f(f2 - 2)* -This loop, again extended to also handle _u_, _v_, _q_, and _r_ alongside _f_ and _g_, placed in +This loop, again extended to also handle *u*, *v*, *q*, and *r* alongside *f* and *g*, placed in `divsteps_n_matrix`, gives a significantly faster, but non-constant time version. + ## 7. Final Python version All together we need the following functions: - A way to compute the transition matrix in constant time, using the `divsteps_n_matrix` function from section 2, but with its loop replaced by a variant of the constant-time divstep from - section 5, extended to handle _u_, _v_, _q_, _r_: + section 5, extended to handle *u*, *v*, *q*, *r*: ```python def divsteps_n_matrix(zeta, f, g): @@ -681,7 +684,7 @@ def divsteps_n_matrix(zeta, f, g): return zeta, (u, v, q, r) ``` -- The functions to update _f_ and _g_, and _d_ and _e_, from section 2 and section 4, with the constant-time +- The functions to update *f* and *g*, and *d* and *e*, from section 2 and section 4, with the constant-time changes to `update_de` from section 5: ```python @@ -720,7 +723,7 @@ def normalize(sign, v, M): return v ``` -- And finally the `modinv` function too, adapted to use _ζ_ instead of _δ_, and using the fixed +- And finally the `modinv` function too, adapted to use *ζ* instead of *δ*, and using the fixed iteration count from section 5: ```python @@ -769,21 +772,20 @@ def modinv_var(M, Mi, x): ## 8. From GCDs to Jacobi symbol -We can also use a similar approach to calculate Jacobi symbol _(x | M)_ by keeping track of an -extra variable _j_, for which at every step _(x | M) = j (g | f)_. As we update _f_ and _g_, we -make corresponding updates to _j_ using +We can also use a similar approach to calculate Jacobi symbol *(x | M)* by keeping track of an +extra variable *j*, for which at every step *(x | M) = j (g | f)*. As we update *f* and *g*, we +make corresponding updates to *j* using [properties of the Jacobi symbol](https://en.wikipedia.org/wiki/Jacobi_symbol#Properties): +* *((g/2) | f)* is either *(g | f)* or *-(g | f)*, depending on the value of *f mod 8* (negating if it's *3* or *5*). +* *(f | g)* is either *(g | f)* or *-(g | f)*, depending on *f mod 4* and *g mod 4* (negating if both are *3*). -- _((g/2) | f)_ is either _(g | f)_ or _-(g | f)_, depending on the value of _f mod 8_ (negating if it's _3_ or _5_). -- _(f | g)_ is either _(g | f)_ or _-(g | f)_, depending on _f mod 4_ and _g mod 4_ (negating if both are _3_). - -These updates depend only on the values of _f_ and _g_ modulo _4_ or _8_, and can thus be applied -very quickly, as long as we keep track of a few additional bits of _f_ and _g_. Overall, this +These updates depend only on the values of *f* and *g* modulo *4* or *8*, and can thus be applied +very quickly, as long as we keep track of a few additional bits of *f* and *g*. Overall, this calculation is slightly simpler than the one for the modular inverse because we no longer need to -keep track of _d_ and _e_. +keep track of *d* and *e*. -However, one difficulty of this approach is that the Jacobi symbol _(a | n)_ is only defined for -positive odd integers _n_, whereas in the original safegcd algorithm, _f, g_ can take negative +However, one difficulty of this approach is that the Jacobi symbol *(a | n)* is only defined for +positive odd integers *n*, whereas in the original safegcd algorithm, *f, g* can take negative values. We resolve this by using the following modified steps: ```python @@ -797,16 +799,15 @@ values. We resolve this by using the following modified steps: ``` The algorithm is still correct, since the changed divstep, called a "posdivstep" (see section 8.4 -and E.5 in the paper) preserves _gcd(f, g)_. However, there's no proof that the modified algorithm +and E.5 in the paper) preserves *gcd(f, g)*. However, there's no proof that the modified algorithm will converge. The justification for posdivsteps is completely empirical: in practice, it appears -that the vast majority of nonzero inputs converge to _f=g=gcd(f0, g0)_ in a +that the vast majority of nonzero inputs converge to *f=g=gcd(f0, g0)* in a number of steps proportional to their logarithm. Note that: - -- We require inputs to satisfy _gcd(x, M) = 1_, as otherwise _f=1_ is not reached. -- We require inputs _x &neq; 0_, because applying posdivstep with _g=0_ has no effect. -- We need to update the termination condition from _g=0_ to _f=1_. +- We require inputs to satisfy *gcd(x, M) = 1*, as otherwise *f=1* is not reached. +- We require inputs *x &neq; 0*, because applying posdivstep with *g=0* has no effect. +- We need to update the termination condition from *g=0* to *f=1*. We account for the possibility of nonconvergence by only performing a bounded number of posdivsteps, and then falling back to square-root based Jacobi calculation if a solution has not @@ -814,5 +815,5 @@ yet been found. The optimizations in sections 3-7 above are described in the context of the original divsteps, but in the C implementation we also adapt most of them (not including "avoiding modulus operations", -since it's not necessary to track _d, e_, and "constant-time operation", since we never calculate +since it's not necessary to track *d, e*, and "constant-time operation", since we never calculate Jacobi symbols for secret data) to the posdivsteps version. diff --git a/external/secp256k1/src/wycheproof/ecdsa_secp256k1_sha256_bitcoin_test.json b/external/secp256k1/src/wycheproof/ecdsa_secp256k1_sha256_bitcoin_test.json index 04e34f5a17..9c90747993 100644 --- a/external/secp256k1/src/wycheproof/ecdsa_secp256k1_sha256_bitcoin_test.json +++ b/external/secp256k1/src/wycheproof/ecdsa_secp256k1_sha256_bitcoin_test.json @@ -1,6358 +1,6358 @@ { - "algorithm": "ECDSA", - "schema": "ecdsa_bitcoin_verify_schema.json", - "generatorVersion": "0.9rc5", - "numberOfTests": 463, - "header": [ + "algorithm" : "ECDSA", + "schema" : "ecdsa_bitcoin_verify_schema.json", + "generatorVersion" : "0.9rc5", + "numberOfTests" : 463, + "header" : [ "Test vectors of type EcdsaBitcoinVerify are meant for the verification", "of a ECDSA variant used for bitcoin, that add signature non-malleability." ], - "notes": { - "ArithmeticError": { - "bugType": "EDGE_CASE", - "description": "Some implementations of ECDSA have arithmetic errors that occur when intermediate results have extreme values. This test vector has been constructed to test such occurences.", - "cves": [ + "notes" : { + "ArithmeticError" : { + "bugType" : "EDGE_CASE", + "description" : "Some implementations of ECDSA have arithmetic errors that occur when intermediate results have extreme values. This test vector has been constructed to test such occurences.", + "cves" : [ "CVE-2017-18146" ] }, - "BerEncodedSignature": { - "bugType": "BER_ENCODING", - "description": "ECDSA signatures are usually DER encoded. This signature contains valid values for r and s, but it uses alternative BER encoding.", - "effect": "Accepting alternative BER encodings may be benign in some cases, or be an issue if protocol requires signature malleability.", - "cves": [ + "BerEncodedSignature" : { + "bugType" : "BER_ENCODING", + "description" : "ECDSA signatures are usually DER encoded. This signature contains valid values for r and s, but it uses alternative BER encoding.", + "effect" : "Accepting alternative BER encodings may be benign in some cases, or be an issue if protocol requires signature malleability.", + "cves" : [ "CVE-2020-14966", "CVE-2020-13822", "CVE-2019-14859", "CVE-2016-1000342" ] }, - "EdgeCasePublicKey": { - "bugType": "EDGE_CASE", - "description": "The test vector uses a special case public key. " + "EdgeCasePublicKey" : { + "bugType" : "EDGE_CASE", + "description" : "The test vector uses a special case public key. " }, - "EdgeCaseShamirMultiplication": { - "bugType": "EDGE_CASE", - "description": "Shamir proposed a fast method for computing the sum of two scalar multiplications efficiently. This test vector has been constructed so that an intermediate result is the point at infinity if Shamir's method is used." + "EdgeCaseShamirMultiplication" : { + "bugType" : "EDGE_CASE", + "description" : "Shamir proposed a fast method for computing the sum of two scalar multiplications efficiently. This test vector has been constructed so that an intermediate result is the point at infinity if Shamir's method is used." }, - "IntegerOverflow": { - "bugType": "CAN_OF_WORMS", - "description": "The test vector contains an r and s that has been modified, so that the original value is restored if the implementation ignores the most significant bits.", - "effect": "Without further analysis it is unclear if the modification can be used to forge signatures." + "IntegerOverflow" : { + "bugType" : "CAN_OF_WORMS", + "description" : "The test vector contains an r and s that has been modified, so that the original value is restored if the implementation ignores the most significant bits.", + "effect" : "Without further analysis it is unclear if the modification can be used to forge signatures." }, - "InvalidEncoding": { - "bugType": "CAN_OF_WORMS", - "description": "ECDSA signatures are encoded using ASN.1. This test vector contains an incorrectly encoded signature. The test vector itself was generated from a valid signature by modifying its encoding.", - "effect": "Without further analysis it is unclear if the modification can be used to forge signatures." + "InvalidEncoding" : { + "bugType" : "CAN_OF_WORMS", + "description" : "ECDSA signatures are encoded using ASN.1. This test vector contains an incorrectly encoded signature. The test vector itself was generated from a valid signature by modifying its encoding.", + "effect" : "Without further analysis it is unclear if the modification can be used to forge signatures." }, - "InvalidSignature": { - "bugType": "AUTH_BYPASS", - "description": "The signature contains special case values such as r=0 and s=0. Buggy implementations may accept such values, if the implementation does not check boundaries and computes s^(-1) == 0.", - "effect": "Accepting such signatures can have the effect that an adversary can forge signatures without even knowning the message to sign.", - "cves": [ + "InvalidSignature" : { + "bugType" : "AUTH_BYPASS", + "description" : "The signature contains special case values such as r=0 and s=0. Buggy implementations may accept such values, if the implementation does not check boundaries and computes s^(-1) == 0.", + "effect" : "Accepting such signatures can have the effect that an adversary can forge signatures without even knowning the message to sign.", + "cves" : [ "CVE-2022-21449", "CVE-2021-43572", "CVE-2022-24884" ] }, - "InvalidTypesInSignature": { - "bugType": "AUTH_BYPASS", - "description": "The signature contains invalid types. Dynamic typed languages sometime coerce such values of different types into integers. If an implementation is careless and has additional bugs, such as not checking integer boundaries then it may be possible that such signatures are accepted.", - "effect": "Accepting such signatures can have the effect that an adversary can forge signatures without even knowning the message to sign.", - "cves": [ + "InvalidTypesInSignature" : { + "bugType" : "AUTH_BYPASS", + "description" : "The signature contains invalid types. Dynamic typed languages sometime coerce such values of different types into integers. If an implementation is careless and has additional bugs, such as not checking integer boundaries then it may be possible that such signatures are accepted.", + "effect" : "Accepting such signatures can have the effect that an adversary can forge signatures without even knowning the message to sign.", + "cves" : [ "CVE-2022-21449" ] }, - "ModifiedInteger": { - "bugType": "CAN_OF_WORMS", - "description": "The test vector contains an r and s that has been modified. The goal is to check for arithmetic errors.", - "effect": "Without further analysis it is unclear if the modification can be used to forge signatures." + "ModifiedInteger" : { + "bugType" : "CAN_OF_WORMS", + "description" : "The test vector contains an r and s that has been modified. The goal is to check for arithmetic errors.", + "effect" : "Without further analysis it is unclear if the modification can be used to forge signatures." }, - "ModifiedSignature": { - "bugType": "CAN_OF_WORMS", - "description": "The test vector contains an invalid signature that was generated from a valid signature by modifying it.", - "effect": "Without further analysis it is unclear if the modification can be used to forge signatures." + "ModifiedSignature" : { + "bugType" : "CAN_OF_WORMS", + "description" : "The test vector contains an invalid signature that was generated from a valid signature by modifying it.", + "effect" : "Without further analysis it is unclear if the modification can be used to forge signatures." }, - "ModularInverse": { - "bugType": "EDGE_CASE", - "description": "The test vectors contains a signature where computing the modular inverse of s hits an edge case.", - "effect": "While the signature in this test vector is constructed and similar cases are unlikely to occur, it is important to determine if the underlying arithmetic error can be used to forge signatures.", - "cves": [ + "ModularInverse" : { + "bugType" : "EDGE_CASE", + "description" : "The test vectors contains a signature where computing the modular inverse of s hits an edge case.", + "effect" : "While the signature in this test vector is constructed and similar cases are unlikely to occur, it is important to determine if the underlying arithmetic error can be used to forge signatures.", + "cves" : [ "CVE-2019-0865" ] }, - "PointDuplication": { - "bugType": "EDGE_CASE", - "description": "Some implementations of ECDSA do not handle duplication and points at infinity correctly. This is a test vector that has been specially crafted to check for such an omission.", - "cves": [ + "PointDuplication" : { + "bugType" : "EDGE_CASE", + "description" : "Some implementations of ECDSA do not handle duplication and points at infinity correctly. This is a test vector that has been specially crafted to check for such an omission.", + "cves" : [ "2020-12607", "CVE-2015-2730" ] }, - "RangeCheck": { - "bugType": "CAN_OF_WORMS", - "description": "The test vector contains an r and s that has been modified. By adding or subtracting the order of the group (or other values) the test vector checks whether signature verification verifies the range of r and s.", - "effect": "Without further analysis it is unclear if the modification can be used to forge signatures." + "RangeCheck" : { + "bugType" : "CAN_OF_WORMS", + "description" : "The test vector contains an r and s that has been modified. By adding or subtracting the order of the group (or other values) the test vector checks whether signature verification verifies the range of r and s.", + "effect" : "Without further analysis it is unclear if the modification can be used to forge signatures." }, - "SignatureMalleabilityBitcoin": { - "bugType": "SIGNATURE_MALLEABILITY", - "description": "\"BitCoins\"-curves are curves where signature malleability can be a serious issue. An implementation should only accept a signature s where s < n/2. If an implementation is not meant for uses cases that require signature malleability then this implemenation should be tested with another set of test vectors.", - "effect": "In bitcoin exchanges, it may be used to make a double deposits or double withdrawals", - "links": [ + "SignatureMalleabilityBitcoin" : { + "bugType" : "SIGNATURE_MALLEABILITY", + "description" : "\"BitCoins\"-curves are curves where signature malleability can be a serious issue. An implementation should only accept a signature s where s < n/2. If an implementation is not meant for uses cases that require signature malleability then this implemenation should be tested with another set of test vectors.", + "effect" : "In bitcoin exchanges, it may be used to make a double deposits or double withdrawals", + "links" : [ "https://en.bitcoin.it/wiki/Transaction_malleability", "https://en.bitcoinwiki.org/wiki/Transaction_Malleability" ] }, - "SmallRandS": { - "bugType": "EDGE_CASE", - "description": "The test vectors contains a signature where both r and s are small integers. Some libraries cannot verify such signatures.", - "effect": "While the signature in this test vector is constructed and similar cases are unlikely to occur, it is important to determine if the underlying arithmetic error can be used to forge signatures.", - "cves": [ + "SmallRandS" : { + "bugType" : "EDGE_CASE", + "description" : "The test vectors contains a signature where both r and s are small integers. Some libraries cannot verify such signatures.", + "effect" : "While the signature in this test vector is constructed and similar cases are unlikely to occur, it is important to determine if the underlying arithmetic error can be used to forge signatures.", + "cves" : [ "2020-13895" ] }, - "SpecialCaseHash": { - "bugType": "EDGE_CASE", - "description": "The test vector contains a signature where the hash of the message is a special case, e.g., contains a long run of 0 or 1 bits." + "SpecialCaseHash" : { + "bugType" : "EDGE_CASE", + "description" : "The test vector contains a signature where the hash of the message is a special case, e.g., contains a long run of 0 or 1 bits." }, - "ValidSignature": { - "bugType": "BASIC", - "description": "The test vector contains a valid signature that was generated pseudorandomly. Such signatures should not fail to verify unless some of the parameters (e.g. curve or hash function) are not supported." + "ValidSignature" : { + "bugType" : "BASIC", + "description" : "The test vector contains a valid signature that was generated pseudorandomly. Such signatures should not fail to verify unless some of the parameters (e.g. curve or hash function) are not supported." } }, - "testGroups": [ + "testGroups" : [ { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04b838ff44e5bc177bf21189d0766082fc9d843226887fc9760371100b7ee20a6ff0c9d75bfba7b31a6bca1974496eeb56de357071955d83c4b1badaa0b21832e9", - "wx": "00b838ff44e5bc177bf21189d0766082fc9d843226887fc9760371100b7ee20a6f", - "wy": "00f0c9d75bfba7b31a6bca1974496eeb56de357071955d83c4b1badaa0b21832e9" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04b838ff44e5bc177bf21189d0766082fc9d843226887fc9760371100b7ee20a6ff0c9d75bfba7b31a6bca1974496eeb56de357071955d83c4b1badaa0b21832e9", + "wx" : "00b838ff44e5bc177bf21189d0766082fc9d843226887fc9760371100b7ee20a6f", + "wy" : "00f0c9d75bfba7b31a6bca1974496eeb56de357071955d83c4b1badaa0b21832e9" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004b838ff44e5bc177bf21189d0766082fc9d843226887fc9760371100b7ee20a6ff0c9d75bfba7b31a6bca1974496eeb56de357071955d83c4b1badaa0b21832e9", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEuDj/ROW8F3vyEYnQdmCC/J2EMiaIf8l2\nA3EQC37iCm/wyddb+6ezGmvKGXRJbutW3jVwcZVdg8Sxutqgshgy6Q==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004b838ff44e5bc177bf21189d0766082fc9d843226887fc9760371100b7ee20a6ff0c9d75bfba7b31a6bca1974496eeb56de357071955d83c4b1badaa0b21832e9", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEuDj/ROW8F3vyEYnQdmCC/J2EMiaIf8l2\nA3EQC37iCm/wyddb+6ezGmvKGXRJbutW3jVwcZVdg8Sxutqgshgy6Q==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 1, - "comment": "Signature malleability", - "flags": [ + "tcId" : 1, + "comment" : "Signature malleability", + "flags" : [ "SignatureMalleabilityBitcoin" ], - "msg": "313233343030", - "sig": "3046022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365022100900e75ad233fcc908509dbff5922647db37c21f4afd3203ae8dc4ae7794b0f87", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365022100900e75ad233fcc908509dbff5922647db37c21f4afd3203ae8dc4ae7794b0f87", + "result" : "invalid" }, { - "tcId": 2, - "comment": "valid", - "flags": [ + "tcId" : 2, + "comment" : "valid", + "flags" : [ "ValidSignature" ], - "msg": "313233343030", - "sig": "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "valid" + "msg" : "313233343030", + "sig" : "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "valid" }, { - "tcId": 3, - "comment": "length of sequence [r, s] uses long form encoding", - "flags": [ + "tcId" : 3, + "comment" : "length of sequence [r, s] uses long form encoding", + "flags" : [ "BerEncodedSignature" ], - "msg": "313233343030", - "sig": "308145022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "308145022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 4, - "comment": "length of sequence [r, s] contains a leading 0", - "flags": [ + "tcId" : 4, + "comment" : "length of sequence [r, s] contains a leading 0", + "flags" : [ "BerEncodedSignature" ], - "msg": "313233343030", - "sig": "30820045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30820045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 5, - "comment": "length of sequence [r, s] uses 70 instead of 69", - "flags": [ + "tcId" : 5, + "comment" : "length of sequence [r, s] uses 70 instead of 69", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3046022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 6, - "comment": "length of sequence [r, s] uses 68 instead of 69", - "flags": [ + "tcId" : 6, + "comment" : "length of sequence [r, s] uses 68 instead of 69", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3044022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3044022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 7, - "comment": "uint32 overflow in length of sequence [r, s]", - "flags": [ + "tcId" : 7, + "comment" : "uint32 overflow in length of sequence [r, s]", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "30850100000045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30850100000045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 8, - "comment": "uint64 overflow in length of sequence [r, s]", - "flags": [ + "tcId" : 8, + "comment" : "uint64 overflow in length of sequence [r, s]", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3089010000000000000045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3089010000000000000045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 9, - "comment": "length of sequence [r, s] = 2**31 - 1", - "flags": [ + "tcId" : 9, + "comment" : "length of sequence [r, s] = 2**31 - 1", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "30847fffffff022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30847fffffff022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 10, - "comment": "length of sequence [r, s] = 2**31", - "flags": [ + "tcId" : 10, + "comment" : "length of sequence [r, s] = 2**31", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "308480000000022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "308480000000022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 11, - "comment": "length of sequence [r, s] = 2**32 - 1", - "flags": [ + "tcId" : 11, + "comment" : "length of sequence [r, s] = 2**32 - 1", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3084ffffffff022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3084ffffffff022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 12, - "comment": "length of sequence [r, s] = 2**40 - 1", - "flags": [ + "tcId" : 12, + "comment" : "length of sequence [r, s] = 2**40 - 1", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3085ffffffffff022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3085ffffffffff022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 13, - "comment": "length of sequence [r, s] = 2**64 - 1", - "flags": [ + "tcId" : 13, + "comment" : "length of sequence [r, s] = 2**64 - 1", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3088ffffffffffffffff022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3088ffffffffffffffff022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 14, - "comment": "incorrect length of sequence [r, s]", - "flags": [ + "tcId" : 14, + "comment" : "incorrect length of sequence [r, s]", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "30ff022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30ff022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 15, - "comment": "replaced sequence [r, s] by an indefinite length tag without termination", - "flags": [ + "tcId" : 15, + "comment" : "replaced sequence [r, s] by an indefinite length tag without termination", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3080022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3080022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 16, - "comment": "removing sequence [r, s]", - "flags": [ + "tcId" : 16, + "comment" : "removing sequence [r, s]", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "", - "result": "invalid" + "msg" : "313233343030", + "sig" : "", + "result" : "invalid" }, { - "tcId": 17, - "comment": "lonely sequence tag", - "flags": [ + "tcId" : 17, + "comment" : "lonely sequence tag", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "30", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30", + "result" : "invalid" }, { - "tcId": 18, - "comment": "appending 0's to sequence [r, s]", - "flags": [ + "tcId" : 18, + "comment" : "appending 0's to sequence [r, s]", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", + "result" : "invalid" }, { - "tcId": 19, - "comment": "prepending 0's to sequence [r, s]", - "flags": [ + "tcId" : 19, + "comment" : "prepending 0's to sequence [r, s]", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "30470000022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30470000022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 20, - "comment": "appending unused 0's to sequence [r, s]", - "flags": [ + "tcId" : 20, + "comment" : "appending unused 0's to sequence [r, s]", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", + "result" : "invalid" }, { - "tcId": 21, - "comment": "appending null value to sequence [r, s]", - "flags": [ + "tcId" : 21, + "comment" : "appending null value to sequence [r, s]", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0500", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0500", + "result" : "invalid" }, { - "tcId": 22, - "comment": "prepending garbage to sequence [r, s]", - "flags": [ + "tcId" : 22, + "comment" : "prepending garbage to sequence [r, s]", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "304a4981773045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304a4981773045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 23, - "comment": "prepending garbage to sequence [r, s]", - "flags": [ + "tcId" : 23, + "comment" : "prepending garbage to sequence [r, s]", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "304925003045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304925003045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 24, - "comment": "appending garbage to sequence [r, s]", - "flags": [ + "tcId" : 24, + "comment" : "appending garbage to sequence [r, s]", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "30473045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0004deadbeef", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30473045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0004deadbeef", + "result" : "invalid" }, { - "tcId": 25, - "comment": "including undefined tags", - "flags": [ + "tcId" : 25, + "comment" : "including undefined tags", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "304daa00bb00cd003045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304daa00bb00cd003045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 26, - "comment": "including undefined tags", - "flags": [ + "tcId" : 26, + "comment" : "including undefined tags", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "304d2229aa00bb00cd00022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304d2229aa00bb00cd00022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 27, - "comment": "including undefined tags", - "flags": [ + "tcId" : 27, + "comment" : "including undefined tags", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "304d022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323652228aa00bb00cd0002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304d022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323652228aa00bb00cd0002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 28, - "comment": "truncated length of sequence [r, s]", - "flags": [ + "tcId" : 28, + "comment" : "truncated length of sequence [r, s]", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3081", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3081", + "result" : "invalid" }, { - "tcId": 29, - "comment": "including undefined tags to sequence [r, s]", - "flags": [ + "tcId" : 29, + "comment" : "including undefined tags to sequence [r, s]", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "304baa02aabb3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304baa02aabb3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 30, - "comment": "using composition with indefinite length for sequence [r, s]", - "flags": [ + "tcId" : 30, + "comment" : "using composition with indefinite length for sequence [r, s]", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "30803045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30803045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", + "result" : "invalid" }, { - "tcId": 31, - "comment": "using composition with wrong tag for sequence [r, s]", - "flags": [ + "tcId" : 31, + "comment" : "using composition with wrong tag for sequence [r, s]", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "30803145022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30803145022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", + "result" : "invalid" }, { - "tcId": 32, - "comment": "Replacing sequence [r, s] with NULL", - "flags": [ + "tcId" : 32, + "comment" : "Replacing sequence [r, s] with NULL", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "0500", - "result": "invalid" + "msg" : "313233343030", + "sig" : "0500", + "result" : "invalid" }, { - "tcId": 33, - "comment": "changing tag value of sequence [r, s]", - "flags": [ + "tcId" : 33, + "comment" : "changing tag value of sequence [r, s]", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "2e45022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "2e45022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 34, - "comment": "changing tag value of sequence [r, s]", - "flags": [ + "tcId" : 34, + "comment" : "changing tag value of sequence [r, s]", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "2f45022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "2f45022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 35, - "comment": "changing tag value of sequence [r, s]", - "flags": [ + "tcId" : 35, + "comment" : "changing tag value of sequence [r, s]", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3145022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3145022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 36, - "comment": "changing tag value of sequence [r, s]", - "flags": [ + "tcId" : 36, + "comment" : "changing tag value of sequence [r, s]", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3245022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3245022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 37, - "comment": "changing tag value of sequence [r, s]", - "flags": [ + "tcId" : 37, + "comment" : "changing tag value of sequence [r, s]", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "ff45022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "ff45022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 38, - "comment": "dropping value of sequence [r, s]", - "flags": [ + "tcId" : 38, + "comment" : "dropping value of sequence [r, s]", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3000", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3000", + "result" : "invalid" }, { - "tcId": 39, - "comment": "using composition for sequence [r, s]", - "flags": [ + "tcId" : 39, + "comment" : "using composition for sequence [r, s]", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "304930010230442100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304930010230442100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 40, - "comment": "truncated sequence [r, s]", - "flags": [ + "tcId" : 40, + "comment" : "truncated sequence [r, s]", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3044022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3044022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31", + "result" : "invalid" }, { - "tcId": 41, - "comment": "truncated sequence [r, s]", - "flags": [ + "tcId" : 41, + "comment" : "truncated sequence [r, s]", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "30442100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30442100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 42, - "comment": "sequence [r, s] of size 4166 to check for overflows", - "flags": [ + "tcId" : 42, + "comment" : "sequence [r, s] of size 4166 to check for overflows", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "30821046022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30821046022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "result" : "invalid" }, { - "tcId": 43, - "comment": "indefinite length", - "flags": [ + "tcId" : 43, + "comment" : "indefinite length", + "flags" : [ "BerEncodedSignature" ], - "msg": "313233343030", - "sig": "3080022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3080022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", + "result" : "invalid" }, { - "tcId": 44, - "comment": "indefinite length with truncated delimiter", - "flags": [ + "tcId" : 44, + "comment" : "indefinite length with truncated delimiter", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3080022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba00", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3080022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba00", + "result" : "invalid" }, { - "tcId": 45, - "comment": "indefinite length with additional element", - "flags": [ + "tcId" : 45, + "comment" : "indefinite length with additional element", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3080022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba05000000", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3080022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba05000000", + "result" : "invalid" }, { - "tcId": 46, - "comment": "indefinite length with truncated element", - "flags": [ + "tcId" : 46, + "comment" : "indefinite length with truncated element", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3080022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba060811220000", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3080022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba060811220000", + "result" : "invalid" }, { - "tcId": 47, - "comment": "indefinite length with garbage", - "flags": [ + "tcId" : 47, + "comment" : "indefinite length with garbage", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3080022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000fe02beef", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3080022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000fe02beef", + "result" : "invalid" }, { - "tcId": 48, - "comment": "indefinite length with nonempty EOC", - "flags": [ + "tcId" : 48, + "comment" : "indefinite length with nonempty EOC", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3080022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0002beef", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3080022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0002beef", + "result" : "invalid" }, { - "tcId": 49, - "comment": "prepend empty sequence", - "flags": [ + "tcId" : 49, + "comment" : "prepend empty sequence", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "30473000022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30473000022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 50, - "comment": "append empty sequence", - "flags": [ + "tcId" : 50, + "comment" : "append empty sequence", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba3000", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba3000", + "result" : "invalid" }, { - "tcId": 51, - "comment": "append zero", - "flags": [ + "tcId" : 51, + "comment" : "append zero", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3048022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba020100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3048022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba020100", + "result" : "invalid" }, { - "tcId": 52, - "comment": "append garbage with high tag number", - "flags": [ + "tcId" : 52, + "comment" : "append garbage with high tag number", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3048022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31babf7f00", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3048022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31babf7f00", + "result" : "invalid" }, { - "tcId": 53, - "comment": "append null with explicit tag", - "flags": [ + "tcId" : 53, + "comment" : "append null with explicit tag", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31baa0020500", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31baa0020500", + "result" : "invalid" }, { - "tcId": 54, - "comment": "append null with implicit tag", - "flags": [ + "tcId" : 54, + "comment" : "append null with implicit tag", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31baa000", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31baa000", + "result" : "invalid" }, { - "tcId": 55, - "comment": "sequence of sequence", - "flags": [ + "tcId" : 55, + "comment" : "sequence of sequence", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "30473045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30473045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 56, - "comment": "truncated sequence: removed last 1 elements", - "flags": [ + "tcId" : 56, + "comment" : "truncated sequence: removed last 1 elements", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3023022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3023022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365", + "result" : "invalid" }, { - "tcId": 57, - "comment": "repeating element in sequence", - "flags": [ + "tcId" : 57, + "comment" : "repeating element in sequence", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3067022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3067022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 58, - "comment": "flipped bit 0 in r", - "flags": [ + "tcId" : 58, + "comment" : "flipped bit 0 in r", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "304300813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236402206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304300813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236402206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 59, - "comment": "flipped bit 32 in r", - "flags": [ + "tcId" : 59, + "comment" : "flipped bit 32 in r", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "304300813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccac983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304300813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccac983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 60, - "comment": "flipped bit 48 in r", - "flags": [ + "tcId" : 60, + "comment" : "flipped bit 48 in r", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "304300813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5133ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304300813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5133ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 61, - "comment": "flipped bit 64 in r", - "flags": [ + "tcId" : 61, + "comment" : "flipped bit 64 in r", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "304300813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc08b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304300813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc08b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 62, - "comment": "length of r uses long form encoding", - "flags": [ + "tcId" : 62, + "comment" : "length of r uses long form encoding", + "flags" : [ "BerEncodedSignature" ], - "msg": "313233343030", - "sig": "304602812100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304602812100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 63, - "comment": "length of r contains a leading 0", - "flags": [ + "tcId" : 63, + "comment" : "length of r contains a leading 0", + "flags" : [ "BerEncodedSignature" ], - "msg": "313233343030", - "sig": "30470282002100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30470282002100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 64, - "comment": "length of r uses 34 instead of 33", - "flags": [ + "tcId" : 64, + "comment" : "length of r uses 34 instead of 33", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3045022200813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3045022200813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 65, - "comment": "length of r uses 32 instead of 33", - "flags": [ + "tcId" : 65, + "comment" : "length of r uses 32 instead of 33", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3045022000813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3045022000813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 66, - "comment": "uint32 overflow in length of r", - "flags": [ + "tcId" : 66, + "comment" : "uint32 overflow in length of r", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "304a0285010000002100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304a0285010000002100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 67, - "comment": "uint64 overflow in length of r", - "flags": [ + "tcId" : 67, + "comment" : "uint64 overflow in length of r", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "304e028901000000000000002100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304e028901000000000000002100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 68, - "comment": "length of r = 2**31 - 1", - "flags": [ + "tcId" : 68, + "comment" : "length of r = 2**31 - 1", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "304902847fffffff00813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304902847fffffff00813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 69, - "comment": "length of r = 2**31", - "flags": [ + "tcId" : 69, + "comment" : "length of r = 2**31", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "304902848000000000813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304902848000000000813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 70, - "comment": "length of r = 2**32 - 1", - "flags": [ + "tcId" : 70, + "comment" : "length of r = 2**32 - 1", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "30490284ffffffff00813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30490284ffffffff00813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 71, - "comment": "length of r = 2**40 - 1", - "flags": [ + "tcId" : 71, + "comment" : "length of r = 2**40 - 1", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "304a0285ffffffffff00813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304a0285ffffffffff00813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 72, - "comment": "length of r = 2**64 - 1", - "flags": [ + "tcId" : 72, + "comment" : "length of r = 2**64 - 1", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "304d0288ffffffffffffffff00813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304d0288ffffffffffffffff00813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 73, - "comment": "incorrect length of r", - "flags": [ + "tcId" : 73, + "comment" : "incorrect length of r", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "304502ff00813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304502ff00813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 74, - "comment": "replaced r by an indefinite length tag without termination", - "flags": [ + "tcId" : 74, + "comment" : "replaced r by an indefinite length tag without termination", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3045028000813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3045028000813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 75, - "comment": "removing r", - "flags": [ + "tcId" : 75, + "comment" : "removing r", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "302202206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "302202206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 76, - "comment": "lonely integer tag", - "flags": [ + "tcId" : 76, + "comment" : "lonely integer tag", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "30230202206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30230202206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 77, - "comment": "lonely integer tag", - "flags": [ + "tcId" : 77, + "comment" : "lonely integer tag", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3024022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3024022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502", + "result" : "invalid" }, { - "tcId": 78, - "comment": "appending 0's to r", - "flags": [ + "tcId" : 78, + "comment" : "appending 0's to r", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3047022300813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365000002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3047022300813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365000002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 79, - "comment": "prepending 0's to r", - "flags": [ + "tcId" : 79, + "comment" : "prepending 0's to r", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "30470223000000813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30470223000000813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 80, - "comment": "appending unused 0's to r", - "flags": [ + "tcId" : 80, + "comment" : "appending unused 0's to r", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365000002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365000002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 81, - "comment": "appending null value to r", - "flags": [ + "tcId" : 81, + "comment" : "appending null value to r", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3047022300813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365050002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3047022300813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365050002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 82, - "comment": "prepending garbage to r", - "flags": [ + "tcId" : 82, + "comment" : "prepending garbage to r", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "304a2226498177022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304a2226498177022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 83, - "comment": "prepending garbage to r", - "flags": [ + "tcId" : 83, + "comment" : "prepending garbage to r", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "304922252500022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304922252500022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 84, - "comment": "appending garbage to r", - "flags": [ + "tcId" : 84, + "comment" : "appending garbage to r", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "304d2223022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650004deadbeef02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304d2223022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650004deadbeef02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 85, - "comment": "truncated length of r", - "flags": [ + "tcId" : 85, + "comment" : "truncated length of r", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3024028102206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3024028102206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 86, - "comment": "including undefined tags to r", - "flags": [ + "tcId" : 86, + "comment" : "including undefined tags to r", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "304b2227aa02aabb022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304b2227aa02aabb022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 87, - "comment": "using composition with indefinite length for r", - "flags": [ + "tcId" : 87, + "comment" : "using composition with indefinite length for r", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "30492280022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365000002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30492280022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365000002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 88, - "comment": "using composition with wrong tag for r", - "flags": [ + "tcId" : 88, + "comment" : "using composition with wrong tag for r", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "30492280032100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365000002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30492280032100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365000002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 89, - "comment": "Replacing r with NULL", - "flags": [ + "tcId" : 89, + "comment" : "Replacing r with NULL", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3024050002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3024050002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 90, - "comment": "changing tag value of r", - "flags": [ + "tcId" : 90, + "comment" : "changing tag value of r", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3045002100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3045002100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 91, - "comment": "changing tag value of r", - "flags": [ + "tcId" : 91, + "comment" : "changing tag value of r", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3045012100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3045012100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 92, - "comment": "changing tag value of r", - "flags": [ + "tcId" : 92, + "comment" : "changing tag value of r", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3045032100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3045032100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 93, - "comment": "changing tag value of r", - "flags": [ + "tcId" : 93, + "comment" : "changing tag value of r", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3045042100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3045042100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 94, - "comment": "changing tag value of r", - "flags": [ + "tcId" : 94, + "comment" : "changing tag value of r", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3045ff2100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3045ff2100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 95, - "comment": "dropping value of r", - "flags": [ + "tcId" : 95, + "comment" : "dropping value of r", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3024020002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3024020002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 96, - "comment": "using composition for r", - "flags": [ + "tcId" : 96, + "comment" : "using composition for r", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "304922250201000220813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304922250201000220813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 97, - "comment": "modifying first byte of r", - "flags": [ + "tcId" : 97, + "comment" : "modifying first byte of r", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3045022102813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3045022102813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 98, - "comment": "modifying last byte of r", - "flags": [ + "tcId" : 98, + "comment" : "modifying last byte of r", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323e502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323e502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 99, - "comment": "truncated r", - "flags": [ + "tcId" : 99, + "comment" : "truncated r", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3044022000813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832302206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3044022000813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832302206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 100, - "comment": "truncated r", - "flags": [ + "tcId" : 100, + "comment" : "truncated r", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "30440220813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30440220813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 101, - "comment": "r of size 4130 to check for overflows", - "flags": [ + "tcId" : 101, + "comment" : "r of size 4130 to check for overflows", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "308210480282102200813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "308210480282102200813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 102, - "comment": "leading ff in r", - "flags": [ + "tcId" : 102, + "comment" : "leading ff in r", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "30460222ff00813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30460222ff00813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 103, - "comment": "replaced r by infinity", - "flags": [ + "tcId" : 103, + "comment" : "replaced r by infinity", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "302509018002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "302509018002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 104, - "comment": "replacing r with zero", - "flags": [ + "tcId" : 104, + "comment" : "replacing r with zero", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "302502010002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "302502010002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 105, - "comment": "flipped bit 0 in s", - "flags": [ + "tcId" : 105, + "comment" : "flipped bit 0 in s", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3043022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323656ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31bb", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3043022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323656ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31bb", + "result" : "invalid" }, { - "tcId": 106, - "comment": "flipped bit 32 in s", - "flags": [ + "tcId" : 106, + "comment" : "flipped bit 32 in s", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3043022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323656ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a456eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3043022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323656ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a456eb31ba", + "result" : "invalid" }, { - "tcId": 107, - "comment": "flipped bit 48 in s", - "flags": [ + "tcId" : 107, + "comment" : "flipped bit 48 in s", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3043022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323656ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f713a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3043022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323656ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f713a556eb31ba", + "result" : "invalid" }, { - "tcId": 108, - "comment": "flipped bit 64 in s", - "flags": [ + "tcId" : 108, + "comment" : "flipped bit 64 in s", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3043022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323656ff18a52dcc0336f7af62400a6dd9b810732baf1ff758001d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3043022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323656ff18a52dcc0336f7af62400a6dd9b810732baf1ff758001d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 109, - "comment": "length of s uses long form encoding", - "flags": [ + "tcId" : 109, + "comment" : "length of s uses long form encoding", + "flags" : [ "BerEncodedSignature" ], - "msg": "313233343030", - "sig": "3046022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650281206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650281206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 110, - "comment": "length of s contains a leading 0", - "flags": [ + "tcId" : 110, + "comment" : "length of s contains a leading 0", + "flags" : [ "BerEncodedSignature" ], - "msg": "313233343030", - "sig": "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365028200206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365028200206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 111, - "comment": "length of s uses 33 instead of 32", - "flags": [ + "tcId" : 111, + "comment" : "length of s uses 33 instead of 32", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502216ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502216ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 112, - "comment": "length of s uses 31 instead of 32", - "flags": [ + "tcId" : 112, + "comment" : "length of s uses 31 instead of 32", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365021f6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365021f6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 113, - "comment": "uint32 overflow in length of s", - "flags": [ + "tcId" : 113, + "comment" : "uint32 overflow in length of s", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "304a022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365028501000000206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304a022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365028501000000206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 114, - "comment": "uint64 overflow in length of s", - "flags": [ + "tcId" : 114, + "comment" : "uint64 overflow in length of s", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "304e022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502890100000000000000206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304e022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502890100000000000000206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 115, - "comment": "length of s = 2**31 - 1", - "flags": [ + "tcId" : 115, + "comment" : "length of s = 2**31 - 1", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502847fffffff6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502847fffffff6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 116, - "comment": "length of s = 2**31", - "flags": [ + "tcId" : 116, + "comment" : "length of s = 2**31", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650284800000006ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650284800000006ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 117, - "comment": "length of s = 2**32 - 1", - "flags": [ + "tcId" : 117, + "comment" : "length of s = 2**32 - 1", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650284ffffffff6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650284ffffffff6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 118, - "comment": "length of s = 2**40 - 1", - "flags": [ + "tcId" : 118, + "comment" : "length of s = 2**40 - 1", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "304a022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650285ffffffffff6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304a022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650285ffffffffff6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 119, - "comment": "length of s = 2**64 - 1", - "flags": [ + "tcId" : 119, + "comment" : "length of s = 2**64 - 1", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "304d022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650288ffffffffffffffff6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304d022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650288ffffffffffffffff6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 120, - "comment": "incorrect length of s", - "flags": [ + "tcId" : 120, + "comment" : "incorrect length of s", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502ff6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502ff6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 121, - "comment": "replaced s by an indefinite length tag without termination", - "flags": [ + "tcId" : 121, + "comment" : "replaced s by an indefinite length tag without termination", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502806ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502806ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 122, - "comment": "appending 0's to s", - "flags": [ + "tcId" : 122, + "comment" : "appending 0's to s", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502226ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502226ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", + "result" : "invalid" }, { - "tcId": 123, - "comment": "prepending 0's to s", - "flags": [ + "tcId" : 123, + "comment" : "prepending 0's to s", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365022200006ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365022200006ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 124, - "comment": "appending null value to s", - "flags": [ + "tcId" : 124, + "comment" : "appending null value to s", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502226ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0500", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3047022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502226ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0500", + "result" : "invalid" }, { - "tcId": 125, - "comment": "prepending garbage to s", - "flags": [ + "tcId" : 125, + "comment" : "prepending garbage to s", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "304a022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365222549817702206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304a022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365222549817702206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 126, - "comment": "prepending garbage to s", - "flags": [ + "tcId" : 126, + "comment" : "prepending garbage to s", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323652224250002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323652224250002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 127, - "comment": "appending garbage to s", - "flags": [ + "tcId" : 127, + "comment" : "appending garbage to s", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "304d022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365222202206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0004deadbeef", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304d022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365222202206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0004deadbeef", + "result" : "invalid" }, { - "tcId": 128, - "comment": "truncated length of s", - "flags": [ + "tcId" : 128, + "comment" : "truncated length of s", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3025022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650281", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3025022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650281", + "result" : "invalid" }, { - "tcId": 129, - "comment": "including undefined tags to s", - "flags": [ + "tcId" : 129, + "comment" : "including undefined tags to s", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "304b022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323652226aa02aabb02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304b022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323652226aa02aabb02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 130, - "comment": "using composition with indefinite length for s", - "flags": [ + "tcId" : 130, + "comment" : "using composition with indefinite length for s", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365228002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365228002206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", + "result" : "invalid" }, { - "tcId": 131, - "comment": "using composition with wrong tag for s", - "flags": [ + "tcId" : 131, + "comment" : "using composition with wrong tag for s", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365228003206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365228003206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000", + "result" : "invalid" }, { - "tcId": 132, - "comment": "Replacing s with NULL", - "flags": [ + "tcId" : 132, + "comment" : "Replacing s with NULL", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3025022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650500", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3025022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650500", + "result" : "invalid" }, { - "tcId": 133, - "comment": "changing tag value of s", - "flags": [ + "tcId" : 133, + "comment" : "changing tag value of s", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236500206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236500206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 134, - "comment": "changing tag value of s", - "flags": [ + "tcId" : 134, + "comment" : "changing tag value of s", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236501206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236501206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 135, - "comment": "changing tag value of s", - "flags": [ + "tcId" : 135, + "comment" : "changing tag value of s", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236503206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236503206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 136, - "comment": "changing tag value of s", - "flags": [ + "tcId" : 136, + "comment" : "changing tag value of s", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236504206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236504206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 137, - "comment": "changing tag value of s", - "flags": [ + "tcId" : 137, + "comment" : "changing tag value of s", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365ff206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365ff206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 138, - "comment": "dropping value of s", - "flags": [ + "tcId" : 138, + "comment" : "dropping value of s", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3025022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650200", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3025022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650200", + "result" : "invalid" }, { - "tcId": 139, - "comment": "using composition for s", - "flags": [ + "tcId" : 139, + "comment" : "using composition for s", + "flags" : [ "InvalidEncoding" ], - "msg": "313233343030", - "sig": "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365222402016f021ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3049022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365222402016f021ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 140, - "comment": "modifying first byte of s", - "flags": [ + "tcId" : 140, + "comment" : "modifying first byte of s", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206df18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206df18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 141, - "comment": "modifying last byte of s", - "flags": [ + "tcId" : 141, + "comment" : "modifying last byte of s", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb313a", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3045022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb313a", + "result" : "invalid" }, { - "tcId": 142, - "comment": "truncated s", - "flags": [ + "tcId" : 142, + "comment" : "truncated s", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3044022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365021f6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3044022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365021f6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31", + "result" : "invalid" }, { - "tcId": 143, - "comment": "truncated s", - "flags": [ + "tcId" : 143, + "comment" : "truncated s", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3044022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365021ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3044022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365021ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 144, - "comment": "s of size 4129 to check for overflows", - "flags": [ + "tcId" : 144, + "comment" : "s of size 4129 to check for overflows", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "30821048022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365028210216ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30821048022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365028210216ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "result" : "invalid" }, { - "tcId": 145, - "comment": "leading ff in s", - "flags": [ + "tcId" : 145, + "comment" : "leading ff in s", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3046022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650221ff6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc98323650221ff6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 146, - "comment": "replaced s by infinity", - "flags": [ + "tcId" : 146, + "comment" : "replaced s by infinity", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3026022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365090180", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365090180", + "result" : "invalid" }, { - "tcId": 147, - "comment": "replacing s with zero", - "flags": [ + "tcId" : 147, + "comment" : "replacing s with zero", + "flags" : [ "ModifiedSignature" ], - "msg": "313233343030", - "sig": "3026022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365020100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026022100813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc9832365020100", + "result" : "invalid" }, { - "tcId": 148, - "comment": "replaced r by r + n", - "flags": [ + "tcId" : 148, + "comment" : "replaced r by r + n", + "flags" : [ "RangeCheck" ], - "msg": "313233343030", - "sig": "3045022101813ef79ccefa9a56f7ba805f0e478583b90deabca4b05c4574e49b5899b964a602206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3045022101813ef79ccefa9a56f7ba805f0e478583b90deabca4b05c4574e49b5899b964a602206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 149, - "comment": "replaced r by r - n", - "flags": [ + "tcId" : 149, + "comment" : "replaced r by r - n", + "flags" : [ "RangeCheck" ], - "msg": "313233343030", - "sig": "30440220813ef79ccefa9a56f7ba805f0e47858643b030ef461f1bcdf53fde3ef94ce22402206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30440220813ef79ccefa9a56f7ba805f0e47858643b030ef461f1bcdf53fde3ef94ce22402206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 150, - "comment": "replaced r by r + 256 * n", - "flags": [ + "tcId" : 150, + "comment" : "replaced r by r + 256 * n", + "flags" : [ "RangeCheck" ], - "msg": "313233343030", - "sig": "304602220100813ef79ccefa9a56f7ba805f0e47843fad3bf4853e07f7c98770c99bffc4646502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304602220100813ef79ccefa9a56f7ba805f0e47843fad3bf4853e07f7c98770c99bffc4646502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 151, - "comment": "replaced r by -r", - "flags": [ + "tcId" : 151, + "comment" : "replaced r by -r", + "flags" : [ "ModifiedInteger" ], - "msg": "313233343030", - "sig": "30450221ff7ec10863310565a908457fa0f1b87a7b01a0f22a0a9843f64aedc334367cdc9b02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30450221ff7ec10863310565a908457fa0f1b87a7b01a0f22a0a9843f64aedc334367cdc9b02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 152, - "comment": "replaced r by n - r", - "flags": [ + "tcId" : 152, + "comment" : "replaced r by n - r", + "flags" : [ "ModifiedInteger" ], - "msg": "313233343030", - "sig": "304402207ec10863310565a908457fa0f1b87a79bc4fcf10b9e0e4320ac021c106b31ddc02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304402207ec10863310565a908457fa0f1b87a79bc4fcf10b9e0e4320ac021c106b31ddc02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 153, - "comment": "replaced r by -n - r", - "flags": [ + "tcId" : 153, + "comment" : "replaced r by -n - r", + "flags" : [ "ModifiedInteger" ], - "msg": "313233343030", - "sig": "30450221fe7ec10863310565a908457fa0f1b87a7c46f215435b4fa3ba8b1b64a766469b5a02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30450221fe7ec10863310565a908457fa0f1b87a7c46f215435b4fa3ba8b1b64a766469b5a02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 154, - "comment": "replaced r by r + 2**256", - "flags": [ + "tcId" : 154, + "comment" : "replaced r by r + 2**256", + "flags" : [ "IntegerOverflow" ], - "msg": "313233343030", - "sig": "3045022101813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3045022101813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 155, - "comment": "replaced r by r + 2**320", - "flags": [ + "tcId" : 155, + "comment" : "replaced r by r + 2**320", + "flags" : [ "IntegerOverflow" ], - "msg": "313233343030", - "sig": "304d0229010000000000000000813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304d0229010000000000000000813ef79ccefa9a56f7ba805f0e478584fe5f0dd5f567bc09b5123ccbc983236502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 156, - "comment": "replaced s by s + n", - "flags": [ + "tcId" : 156, + "comment" : "replaced s by s + n", + "flags" : [ "RangeCheck" ], - "msg": "313233343030", - "sig": "30450221016ff18a52dcc0336f7af62400a6dd9b7fc1e197d8aebe203c96c87232272172fb02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30450221016ff18a52dcc0336f7af62400a6dd9b7fc1e197d8aebe203c96c87232272172fb02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 157, - "comment": "replaced s by s - n", - "flags": [ + "tcId" : 157, + "comment" : "replaced s by s - n", + "flags" : [ "RangeCheck" ], - "msg": "313233343030", - "sig": "30450221ff6ff18a52dcc0336f7af62400a6dd9b824c83de0b502cdfc51723b51886b4f07902206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30450221ff6ff18a52dcc0336f7af62400a6dd9b824c83de0b502cdfc51723b51886b4f07902206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 158, - "comment": "replaced s by s + 256 * n", - "flags": [ + "tcId" : 158, + "comment" : "replaced s by s + 256 * n", + "flags" : [ "RangeCheck" ], - "msg": "313233343030", - "sig": "3046022201006ff18a52dcc0336f7af62400a6dd9a3bb60fa1a14815bbc0a954a0758d2c72ba02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022201006ff18a52dcc0336f7af62400a6dd9a3bb60fa1a14815bbc0a954a0758d2c72ba02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 159, - "comment": "replaced s by -s", - "flags": [ + "tcId" : 159, + "comment" : "replaced s by -s", + "flags" : [ "ModifiedInteger" ], - "msg": "313233343030", - "sig": "30440220900e75ad233fcc908509dbff5922647ef8cd450e008a7fff2909ec5aa914ce4602206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30440220900e75ad233fcc908509dbff5922647ef8cd450e008a7fff2909ec5aa914ce4602206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 160, - "comment": "replaced s by -n - s", - "flags": [ + "tcId" : 160, + "comment" : "replaced s by -n - s", + "flags" : [ "ModifiedInteger" ], - "msg": "313233343030", - "sig": "30450221fe900e75ad233fcc908509dbff592264803e1e68275141dfc369378dcdd8de8d0502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30450221fe900e75ad233fcc908509dbff592264803e1e68275141dfc369378dcdd8de8d0502206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 161, - "comment": "replaced s by s + 2**256", - "flags": [ + "tcId" : 161, + "comment" : "replaced s by s + 2**256", + "flags" : [ "IntegerOverflow" ], - "msg": "313233343030", - "sig": "30450221016ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30450221016ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 162, - "comment": "replaced s by s - 2**256", - "flags": [ + "tcId" : 162, + "comment" : "replaced s by s - 2**256", + "flags" : [ "IntegerOverflow" ], - "msg": "313233343030", - "sig": "30450221ff6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30450221ff6ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 163, - "comment": "replaced s by s + 2**320", - "flags": [ + "tcId" : 163, + "comment" : "replaced s by s + 2**320", + "flags" : [ "IntegerOverflow" ], - "msg": "313233343030", - "sig": "304d02290100000000000000006ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304d02290100000000000000006ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba02206ff18a52dcc0336f7af62400a6dd9b810732baf1ff758000d6f613a556eb31ba", + "result" : "invalid" }, { - "tcId": 164, - "comment": "Signature with special case values r=0 and s=0", - "flags": [ + "tcId" : 164, + "comment" : "Signature with special case values r=0 and s=0", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3006020100020100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3006020100020100", + "result" : "invalid" }, { - "tcId": 165, - "comment": "Signature with special case values r=0 and s=1", - "flags": [ + "tcId" : 165, + "comment" : "Signature with special case values r=0 and s=1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3006020100020101", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3006020100020101", + "result" : "invalid" }, { - "tcId": 166, - "comment": "Signature with special case values r=0 and s=-1", - "flags": [ + "tcId" : 166, + "comment" : "Signature with special case values r=0 and s=-1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "30060201000201ff", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30060201000201ff", + "result" : "invalid" }, { - "tcId": 167, - "comment": "Signature with special case values r=0 and s=n", - "flags": [ + "tcId" : 167, + "comment" : "Signature with special case values r=0 and s=n", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3026020100022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026020100022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", + "result" : "invalid" }, { - "tcId": 168, - "comment": "Signature with special case values r=0 and s=n - 1", - "flags": [ + "tcId" : 168, + "comment" : "Signature with special case values r=0 and s=n - 1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3026020100022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026020100022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", + "result" : "invalid" }, { - "tcId": 169, - "comment": "Signature with special case values r=0 and s=n + 1", - "flags": [ + "tcId" : 169, + "comment" : "Signature with special case values r=0 and s=n + 1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3026020100022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026020100022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", + "result" : "invalid" }, { - "tcId": 170, - "comment": "Signature with special case values r=0 and s=p", - "flags": [ + "tcId" : 170, + "comment" : "Signature with special case values r=0 and s=p", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3026020100022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026020100022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + "result" : "invalid" }, { - "tcId": 171, - "comment": "Signature with special case values r=0 and s=p + 1", - "flags": [ + "tcId" : 171, + "comment" : "Signature with special case values r=0 and s=p + 1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3026020100022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026020100022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", + "result" : "invalid" }, { - "tcId": 172, - "comment": "Signature with special case values r=1 and s=0", - "flags": [ + "tcId" : 172, + "comment" : "Signature with special case values r=1 and s=0", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3006020101020100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3006020101020100", + "result" : "invalid" }, { - "tcId": 173, - "comment": "Signature with special case values r=1 and s=1", - "flags": [ + "tcId" : 173, + "comment" : "Signature with special case values r=1 and s=1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3006020101020101", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3006020101020101", + "result" : "invalid" }, { - "tcId": 174, - "comment": "Signature with special case values r=1 and s=-1", - "flags": [ + "tcId" : 174, + "comment" : "Signature with special case values r=1 and s=-1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "30060201010201ff", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30060201010201ff", + "result" : "invalid" }, { - "tcId": 175, - "comment": "Signature with special case values r=1 and s=n", - "flags": [ + "tcId" : 175, + "comment" : "Signature with special case values r=1 and s=n", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3026020101022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026020101022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", + "result" : "invalid" }, { - "tcId": 176, - "comment": "Signature with special case values r=1 and s=n - 1", - "flags": [ + "tcId" : 176, + "comment" : "Signature with special case values r=1 and s=n - 1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3026020101022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026020101022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", + "result" : "invalid" }, { - "tcId": 177, - "comment": "Signature with special case values r=1 and s=n + 1", - "flags": [ + "tcId" : 177, + "comment" : "Signature with special case values r=1 and s=n + 1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3026020101022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026020101022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", + "result" : "invalid" }, { - "tcId": 178, - "comment": "Signature with special case values r=1 and s=p", - "flags": [ + "tcId" : 178, + "comment" : "Signature with special case values r=1 and s=p", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3026020101022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026020101022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + "result" : "invalid" }, { - "tcId": 179, - "comment": "Signature with special case values r=1 and s=p + 1", - "flags": [ + "tcId" : 179, + "comment" : "Signature with special case values r=1 and s=p + 1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3026020101022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026020101022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", + "result" : "invalid" }, { - "tcId": 180, - "comment": "Signature with special case values r=-1 and s=0", - "flags": [ + "tcId" : 180, + "comment" : "Signature with special case values r=-1 and s=0", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "30060201ff020100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30060201ff020100", + "result" : "invalid" }, { - "tcId": 181, - "comment": "Signature with special case values r=-1 and s=1", - "flags": [ + "tcId" : 181, + "comment" : "Signature with special case values r=-1 and s=1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "30060201ff020101", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30060201ff020101", + "result" : "invalid" }, { - "tcId": 182, - "comment": "Signature with special case values r=-1 and s=-1", - "flags": [ + "tcId" : 182, + "comment" : "Signature with special case values r=-1 and s=-1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "30060201ff0201ff", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30060201ff0201ff", + "result" : "invalid" }, { - "tcId": 183, - "comment": "Signature with special case values r=-1 and s=n", - "flags": [ + "tcId" : 183, + "comment" : "Signature with special case values r=-1 and s=n", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "30260201ff022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30260201ff022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", + "result" : "invalid" }, { - "tcId": 184, - "comment": "Signature with special case values r=-1 and s=n - 1", - "flags": [ + "tcId" : 184, + "comment" : "Signature with special case values r=-1 and s=n - 1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "30260201ff022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30260201ff022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", + "result" : "invalid" }, { - "tcId": 185, - "comment": "Signature with special case values r=-1 and s=n + 1", - "flags": [ + "tcId" : 185, + "comment" : "Signature with special case values r=-1 and s=n + 1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "30260201ff022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30260201ff022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", + "result" : "invalid" }, { - "tcId": 186, - "comment": "Signature with special case values r=-1 and s=p", - "flags": [ + "tcId" : 186, + "comment" : "Signature with special case values r=-1 and s=p", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "30260201ff022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30260201ff022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + "result" : "invalid" }, { - "tcId": 187, - "comment": "Signature with special case values r=-1 and s=p + 1", - "flags": [ + "tcId" : 187, + "comment" : "Signature with special case values r=-1 and s=p + 1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "30260201ff022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30260201ff022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", + "result" : "invalid" }, { - "tcId": 188, - "comment": "Signature with special case values r=n and s=0", - "flags": [ + "tcId" : 188, + "comment" : "Signature with special case values r=n and s=0", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141020100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141020100", + "result" : "invalid" }, { - "tcId": 189, - "comment": "Signature with special case values r=n and s=1", - "flags": [ + "tcId" : 189, + "comment" : "Signature with special case values r=n and s=1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141020101", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141020101", + "result" : "invalid" }, { - "tcId": 190, - "comment": "Signature with special case values r=n and s=-1", - "flags": [ + "tcId" : 190, + "comment" : "Signature with special case values r=n and s=-1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641410201ff", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641410201ff", + "result" : "invalid" }, { - "tcId": 191, - "comment": "Signature with special case values r=n and s=n", - "flags": [ + "tcId" : 191, + "comment" : "Signature with special case values r=n and s=n", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", + "result" : "invalid" }, { - "tcId": 192, - "comment": "Signature with special case values r=n and s=n - 1", - "flags": [ + "tcId" : 192, + "comment" : "Signature with special case values r=n and s=n - 1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", + "result" : "invalid" }, { - "tcId": 193, - "comment": "Signature with special case values r=n and s=n + 1", - "flags": [ + "tcId" : 193, + "comment" : "Signature with special case values r=n and s=n + 1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", + "result" : "invalid" }, { - "tcId": 194, - "comment": "Signature with special case values r=n and s=p", - "flags": [ + "tcId" : 194, + "comment" : "Signature with special case values r=n and s=p", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + "result" : "invalid" }, { - "tcId": 195, - "comment": "Signature with special case values r=n and s=p + 1", - "flags": [ + "tcId" : 195, + "comment" : "Signature with special case values r=n and s=p + 1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", + "result" : "invalid" }, { - "tcId": 196, - "comment": "Signature with special case values r=n - 1 and s=0", - "flags": [ + "tcId" : 196, + "comment" : "Signature with special case values r=n - 1 and s=0", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140020100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140020100", + "result" : "invalid" }, { - "tcId": 197, - "comment": "Signature with special case values r=n - 1 and s=1", - "flags": [ + "tcId" : 197, + "comment" : "Signature with special case values r=n - 1 and s=1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140020101", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140020101", + "result" : "invalid" }, { - "tcId": 198, - "comment": "Signature with special case values r=n - 1 and s=-1", - "flags": [ + "tcId" : 198, + "comment" : "Signature with special case values r=n - 1 and s=-1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641400201ff", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641400201ff", + "result" : "invalid" }, { - "tcId": 199, - "comment": "Signature with special case values r=n - 1 and s=n", - "flags": [ + "tcId" : 199, + "comment" : "Signature with special case values r=n - 1 and s=n", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", + "result" : "invalid" }, { - "tcId": 200, - "comment": "Signature with special case values r=n - 1 and s=n - 1", - "flags": [ + "tcId" : 200, + "comment" : "Signature with special case values r=n - 1 and s=n - 1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", + "result" : "invalid" }, { - "tcId": 201, - "comment": "Signature with special case values r=n - 1 and s=n + 1", - "flags": [ + "tcId" : 201, + "comment" : "Signature with special case values r=n - 1 and s=n + 1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", + "result" : "invalid" }, { - "tcId": 202, - "comment": "Signature with special case values r=n - 1 and s=p", - "flags": [ + "tcId" : 202, + "comment" : "Signature with special case values r=n - 1 and s=p", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + "result" : "invalid" }, { - "tcId": 203, - "comment": "Signature with special case values r=n - 1 and s=p + 1", - "flags": [ + "tcId" : 203, + "comment" : "Signature with special case values r=n - 1 and s=p + 1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", + "result" : "invalid" }, { - "tcId": 204, - "comment": "Signature with special case values r=n + 1 and s=0", - "flags": [ + "tcId" : 204, + "comment" : "Signature with special case values r=n + 1 and s=0", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142020100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142020100", + "result" : "invalid" }, { - "tcId": 205, - "comment": "Signature with special case values r=n + 1 and s=1", - "flags": [ + "tcId" : 205, + "comment" : "Signature with special case values r=n + 1 and s=1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142020101", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142020101", + "result" : "invalid" }, { - "tcId": 206, - "comment": "Signature with special case values r=n + 1 and s=-1", - "flags": [ + "tcId" : 206, + "comment" : "Signature with special case values r=n + 1 and s=-1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641420201ff", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641420201ff", + "result" : "invalid" }, { - "tcId": 207, - "comment": "Signature with special case values r=n + 1 and s=n", - "flags": [ + "tcId" : 207, + "comment" : "Signature with special case values r=n + 1 and s=n", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", + "result" : "invalid" }, { - "tcId": 208, - "comment": "Signature with special case values r=n + 1 and s=n - 1", - "flags": [ + "tcId" : 208, + "comment" : "Signature with special case values r=n + 1 and s=n - 1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", + "result" : "invalid" }, { - "tcId": 209, - "comment": "Signature with special case values r=n + 1 and s=n + 1", - "flags": [ + "tcId" : 209, + "comment" : "Signature with special case values r=n + 1 and s=n + 1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", + "result" : "invalid" }, { - "tcId": 210, - "comment": "Signature with special case values r=n + 1 and s=p", - "flags": [ + "tcId" : 210, + "comment" : "Signature with special case values r=n + 1 and s=p", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + "result" : "invalid" }, { - "tcId": 211, - "comment": "Signature with special case values r=n + 1 and s=p + 1", - "flags": [ + "tcId" : 211, + "comment" : "Signature with special case values r=n + 1 and s=p + 1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", + "result" : "invalid" }, { - "tcId": 212, - "comment": "Signature with special case values r=p and s=0", - "flags": [ + "tcId" : 212, + "comment" : "Signature with special case values r=p and s=0", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f020100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f020100", + "result" : "invalid" }, { - "tcId": 213, - "comment": "Signature with special case values r=p and s=1", - "flags": [ + "tcId" : 213, + "comment" : "Signature with special case values r=p and s=1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f020101", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f020101", + "result" : "invalid" }, { - "tcId": 214, - "comment": "Signature with special case values r=p and s=-1", - "flags": [ + "tcId" : 214, + "comment" : "Signature with special case values r=p and s=-1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f0201ff", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f0201ff", + "result" : "invalid" }, { - "tcId": 215, - "comment": "Signature with special case values r=p and s=n", - "flags": [ + "tcId" : 215, + "comment" : "Signature with special case values r=p and s=n", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", + "result" : "invalid" }, { - "tcId": 216, - "comment": "Signature with special case values r=p and s=n - 1", - "flags": [ + "tcId" : 216, + "comment" : "Signature with special case values r=p and s=n - 1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", + "result" : "invalid" }, { - "tcId": 217, - "comment": "Signature with special case values r=p and s=n + 1", - "flags": [ + "tcId" : 217, + "comment" : "Signature with special case values r=p and s=n + 1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", + "result" : "invalid" }, { - "tcId": 218, - "comment": "Signature with special case values r=p and s=p", - "flags": [ + "tcId" : 218, + "comment" : "Signature with special case values r=p and s=p", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + "result" : "invalid" }, { - "tcId": 219, - "comment": "Signature with special case values r=p and s=p + 1", - "flags": [ + "tcId" : 219, + "comment" : "Signature with special case values r=p and s=p + 1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", + "result" : "invalid" }, { - "tcId": 220, - "comment": "Signature with special case values r=p + 1 and s=0", - "flags": [ + "tcId" : 220, + "comment" : "Signature with special case values r=p + 1 and s=0", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30020100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30020100", + "result" : "invalid" }, { - "tcId": 221, - "comment": "Signature with special case values r=p + 1 and s=1", - "flags": [ + "tcId" : 221, + "comment" : "Signature with special case values r=p + 1 and s=1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30020101", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30020101", + "result" : "invalid" }, { - "tcId": 222, - "comment": "Signature with special case values r=p + 1 and s=-1", - "flags": [ + "tcId" : 222, + "comment" : "Signature with special case values r=p + 1 and s=-1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc300201ff", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc300201ff", + "result" : "invalid" }, { - "tcId": 223, - "comment": "Signature with special case values r=p + 1 and s=n", - "flags": [ + "tcId" : 223, + "comment" : "Signature with special case values r=p + 1 and s=n", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", + "result" : "invalid" }, { - "tcId": 224, - "comment": "Signature with special case values r=p + 1 and s=n - 1", - "flags": [ + "tcId" : 224, + "comment" : "Signature with special case values r=p + 1 and s=n - 1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", + "result" : "invalid" }, { - "tcId": 225, - "comment": "Signature with special case values r=p + 1 and s=n + 1", - "flags": [ + "tcId" : 225, + "comment" : "Signature with special case values r=p + 1 and s=n + 1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364142", + "result" : "invalid" }, { - "tcId": 226, - "comment": "Signature with special case values r=p + 1 and s=p", - "flags": [ + "tcId" : 226, + "comment" : "Signature with special case values r=p + 1 and s=p", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + "result" : "invalid" }, { - "tcId": 227, - "comment": "Signature with special case values r=p + 1 and s=p + 1", - "flags": [ + "tcId" : 227, + "comment" : "Signature with special case values r=p + 1 and s=p + 1", + "flags" : [ "InvalidSignature" ], - "msg": "313233343030", - "sig": "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3046022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", + "result" : "invalid" }, { - "tcId": 228, - "comment": "Signature encoding contains incorrect types: r=0, s=0.25", - "flags": [ + "tcId" : 228, + "comment" : "Signature encoding contains incorrect types: r=0, s=0.25", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3008020100090380fe01", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3008020100090380fe01", + "result" : "invalid" }, { - "tcId": 229, - "comment": "Signature encoding contains incorrect types: r=0, s=nan", - "flags": [ + "tcId" : 229, + "comment" : "Signature encoding contains incorrect types: r=0, s=nan", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3006020100090142", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3006020100090142", + "result" : "invalid" }, { - "tcId": 230, - "comment": "Signature encoding contains incorrect types: r=0, s=True", - "flags": [ + "tcId" : 230, + "comment" : "Signature encoding contains incorrect types: r=0, s=True", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3006020100010101", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3006020100010101", + "result" : "invalid" }, { - "tcId": 231, - "comment": "Signature encoding contains incorrect types: r=0, s=False", - "flags": [ + "tcId" : 231, + "comment" : "Signature encoding contains incorrect types: r=0, s=False", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3006020100010100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3006020100010100", + "result" : "invalid" }, { - "tcId": 232, - "comment": "Signature encoding contains incorrect types: r=0, s=Null", - "flags": [ + "tcId" : 232, + "comment" : "Signature encoding contains incorrect types: r=0, s=Null", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "30050201000500", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30050201000500", + "result" : "invalid" }, { - "tcId": 233, - "comment": "Signature encoding contains incorrect types: r=0, s=empyt UTF-8 string", - "flags": [ + "tcId" : 233, + "comment" : "Signature encoding contains incorrect types: r=0, s=empyt UTF-8 string", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "30050201000c00", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30050201000c00", + "result" : "invalid" }, { - "tcId": 234, - "comment": "Signature encoding contains incorrect types: r=0, s=\"0\"", - "flags": [ + "tcId" : 234, + "comment" : "Signature encoding contains incorrect types: r=0, s=\"0\"", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "30060201000c0130", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30060201000c0130", + "result" : "invalid" }, { - "tcId": 235, - "comment": "Signature encoding contains incorrect types: r=0, s=empty list", - "flags": [ + "tcId" : 235, + "comment" : "Signature encoding contains incorrect types: r=0, s=empty list", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "30050201003000", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30050201003000", + "result" : "invalid" }, { - "tcId": 236, - "comment": "Signature encoding contains incorrect types: r=0, s=list containing 0", - "flags": [ + "tcId" : 236, + "comment" : "Signature encoding contains incorrect types: r=0, s=list containing 0", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "30080201003003020100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30080201003003020100", + "result" : "invalid" }, { - "tcId": 237, - "comment": "Signature encoding contains incorrect types: r=1, s=0.25", - "flags": [ + "tcId" : 237, + "comment" : "Signature encoding contains incorrect types: r=1, s=0.25", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3008020101090380fe01", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3008020101090380fe01", + "result" : "invalid" }, { - "tcId": 238, - "comment": "Signature encoding contains incorrect types: r=1, s=nan", - "flags": [ + "tcId" : 238, + "comment" : "Signature encoding contains incorrect types: r=1, s=nan", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3006020101090142", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3006020101090142", + "result" : "invalid" }, { - "tcId": 239, - "comment": "Signature encoding contains incorrect types: r=1, s=True", - "flags": [ + "tcId" : 239, + "comment" : "Signature encoding contains incorrect types: r=1, s=True", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3006020101010101", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3006020101010101", + "result" : "invalid" }, { - "tcId": 240, - "comment": "Signature encoding contains incorrect types: r=1, s=False", - "flags": [ + "tcId" : 240, + "comment" : "Signature encoding contains incorrect types: r=1, s=False", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3006020101010100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3006020101010100", + "result" : "invalid" }, { - "tcId": 241, - "comment": "Signature encoding contains incorrect types: r=1, s=Null", - "flags": [ + "tcId" : 241, + "comment" : "Signature encoding contains incorrect types: r=1, s=Null", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "30050201010500", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30050201010500", + "result" : "invalid" }, { - "tcId": 242, - "comment": "Signature encoding contains incorrect types: r=1, s=empyt UTF-8 string", - "flags": [ + "tcId" : 242, + "comment" : "Signature encoding contains incorrect types: r=1, s=empyt UTF-8 string", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "30050201010c00", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30050201010c00", + "result" : "invalid" }, { - "tcId": 243, - "comment": "Signature encoding contains incorrect types: r=1, s=\"0\"", - "flags": [ + "tcId" : 243, + "comment" : "Signature encoding contains incorrect types: r=1, s=\"0\"", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "30060201010c0130", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30060201010c0130", + "result" : "invalid" }, { - "tcId": 244, - "comment": "Signature encoding contains incorrect types: r=1, s=empty list", - "flags": [ + "tcId" : 244, + "comment" : "Signature encoding contains incorrect types: r=1, s=empty list", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "30050201013000", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30050201013000", + "result" : "invalid" }, { - "tcId": 245, - "comment": "Signature encoding contains incorrect types: r=1, s=list containing 0", - "flags": [ + "tcId" : 245, + "comment" : "Signature encoding contains incorrect types: r=1, s=list containing 0", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "30080201013003020100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30080201013003020100", + "result" : "invalid" }, { - "tcId": 246, - "comment": "Signature encoding contains incorrect types: r=-1, s=0.25", - "flags": [ + "tcId" : 246, + "comment" : "Signature encoding contains incorrect types: r=-1, s=0.25", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "30080201ff090380fe01", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30080201ff090380fe01", + "result" : "invalid" }, { - "tcId": 247, - "comment": "Signature encoding contains incorrect types: r=-1, s=nan", - "flags": [ + "tcId" : 247, + "comment" : "Signature encoding contains incorrect types: r=-1, s=nan", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "30060201ff090142", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30060201ff090142", + "result" : "invalid" }, { - "tcId": 248, - "comment": "Signature encoding contains incorrect types: r=-1, s=True", - "flags": [ + "tcId" : 248, + "comment" : "Signature encoding contains incorrect types: r=-1, s=True", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "30060201ff010101", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30060201ff010101", + "result" : "invalid" }, { - "tcId": 249, - "comment": "Signature encoding contains incorrect types: r=-1, s=False", - "flags": [ + "tcId" : 249, + "comment" : "Signature encoding contains incorrect types: r=-1, s=False", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "30060201ff010100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30060201ff010100", + "result" : "invalid" }, { - "tcId": 250, - "comment": "Signature encoding contains incorrect types: r=-1, s=Null", - "flags": [ + "tcId" : 250, + "comment" : "Signature encoding contains incorrect types: r=-1, s=Null", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "30050201ff0500", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30050201ff0500", + "result" : "invalid" }, { - "tcId": 251, - "comment": "Signature encoding contains incorrect types: r=-1, s=empyt UTF-8 string", - "flags": [ + "tcId" : 251, + "comment" : "Signature encoding contains incorrect types: r=-1, s=empyt UTF-8 string", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "30050201ff0c00", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30050201ff0c00", + "result" : "invalid" }, { - "tcId": 252, - "comment": "Signature encoding contains incorrect types: r=-1, s=\"0\"", - "flags": [ + "tcId" : 252, + "comment" : "Signature encoding contains incorrect types: r=-1, s=\"0\"", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "30060201ff0c0130", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30060201ff0c0130", + "result" : "invalid" }, { - "tcId": 253, - "comment": "Signature encoding contains incorrect types: r=-1, s=empty list", - "flags": [ + "tcId" : 253, + "comment" : "Signature encoding contains incorrect types: r=-1, s=empty list", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "30050201ff3000", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30050201ff3000", + "result" : "invalid" }, { - "tcId": 254, - "comment": "Signature encoding contains incorrect types: r=-1, s=list containing 0", - "flags": [ + "tcId" : 254, + "comment" : "Signature encoding contains incorrect types: r=-1, s=list containing 0", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "30080201ff3003020100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30080201ff3003020100", + "result" : "invalid" }, { - "tcId": 255, - "comment": "Signature encoding contains incorrect types: r=n, s=0.25", - "flags": [ + "tcId" : 255, + "comment" : "Signature encoding contains incorrect types: r=n, s=0.25", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3028022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141090380fe01", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3028022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141090380fe01", + "result" : "invalid" }, { - "tcId": 256, - "comment": "Signature encoding contains incorrect types: r=n, s=nan", - "flags": [ + "tcId" : 256, + "comment" : "Signature encoding contains incorrect types: r=n, s=nan", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141090142", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141090142", + "result" : "invalid" }, { - "tcId": 257, - "comment": "Signature encoding contains incorrect types: r=n, s=True", - "flags": [ + "tcId" : 257, + "comment" : "Signature encoding contains incorrect types: r=n, s=True", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141010101", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141010101", + "result" : "invalid" }, { - "tcId": 258, - "comment": "Signature encoding contains incorrect types: r=n, s=False", - "flags": [ + "tcId" : 258, + "comment" : "Signature encoding contains incorrect types: r=n, s=False", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141010100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141010100", + "result" : "invalid" }, { - "tcId": 259, - "comment": "Signature encoding contains incorrect types: r=n, s=Null", - "flags": [ + "tcId" : 259, + "comment" : "Signature encoding contains incorrect types: r=n, s=Null", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3025022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641410500", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3025022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641410500", + "result" : "invalid" }, { - "tcId": 260, - "comment": "Signature encoding contains incorrect types: r=n, s=empyt UTF-8 string", - "flags": [ + "tcId" : 260, + "comment" : "Signature encoding contains incorrect types: r=n, s=empyt UTF-8 string", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3025022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641410c00", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3025022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641410c00", + "result" : "invalid" }, { - "tcId": 261, - "comment": "Signature encoding contains incorrect types: r=n, s=\"0\"", - "flags": [ + "tcId" : 261, + "comment" : "Signature encoding contains incorrect types: r=n, s=\"0\"", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641410c0130", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641410c0130", + "result" : "invalid" }, { - "tcId": 262, - "comment": "Signature encoding contains incorrect types: r=n, s=empty list", - "flags": [ + "tcId" : 262, + "comment" : "Signature encoding contains incorrect types: r=n, s=empty list", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3025022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641413000", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3025022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641413000", + "result" : "invalid" }, { - "tcId": 263, - "comment": "Signature encoding contains incorrect types: r=n, s=list containing 0", - "flags": [ + "tcId" : 263, + "comment" : "Signature encoding contains incorrect types: r=n, s=list containing 0", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3028022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641413003020100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3028022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641413003020100", + "result" : "invalid" }, { - "tcId": 264, - "comment": "Signature encoding contains incorrect types: r=p, s=0.25", - "flags": [ + "tcId" : 264, + "comment" : "Signature encoding contains incorrect types: r=p, s=0.25", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3028022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f090380fe01", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3028022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f090380fe01", + "result" : "invalid" }, { - "tcId": 265, - "comment": "Signature encoding contains incorrect types: r=p, s=nan", - "flags": [ + "tcId" : 265, + "comment" : "Signature encoding contains incorrect types: r=p, s=nan", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f090142", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f090142", + "result" : "invalid" }, { - "tcId": 266, - "comment": "Signature encoding contains incorrect types: r=p, s=True", - "flags": [ + "tcId" : 266, + "comment" : "Signature encoding contains incorrect types: r=p, s=True", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f010101", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f010101", + "result" : "invalid" }, { - "tcId": 267, - "comment": "Signature encoding contains incorrect types: r=p, s=False", - "flags": [ + "tcId" : 267, + "comment" : "Signature encoding contains incorrect types: r=p, s=False", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f010100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f010100", + "result" : "invalid" }, { - "tcId": 268, - "comment": "Signature encoding contains incorrect types: r=p, s=Null", - "flags": [ + "tcId" : 268, + "comment" : "Signature encoding contains incorrect types: r=p, s=Null", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3025022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f0500", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3025022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f0500", + "result" : "invalid" }, { - "tcId": 269, - "comment": "Signature encoding contains incorrect types: r=p, s=empyt UTF-8 string", - "flags": [ + "tcId" : 269, + "comment" : "Signature encoding contains incorrect types: r=p, s=empyt UTF-8 string", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3025022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f0c00", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3025022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f0c00", + "result" : "invalid" }, { - "tcId": 270, - "comment": "Signature encoding contains incorrect types: r=p, s=\"0\"", - "flags": [ + "tcId" : 270, + "comment" : "Signature encoding contains incorrect types: r=p, s=\"0\"", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f0c0130", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f0c0130", + "result" : "invalid" }, { - "tcId": 271, - "comment": "Signature encoding contains incorrect types: r=p, s=empty list", - "flags": [ + "tcId" : 271, + "comment" : "Signature encoding contains incorrect types: r=p, s=empty list", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3025022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f3000", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3025022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f3000", + "result" : "invalid" }, { - "tcId": 272, - "comment": "Signature encoding contains incorrect types: r=p, s=list containing 0", - "flags": [ + "tcId" : 272, + "comment" : "Signature encoding contains incorrect types: r=p, s=list containing 0", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3028022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f3003020100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3028022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f3003020100", + "result" : "invalid" }, { - "tcId": 273, - "comment": "Signature encoding contains incorrect types: r=0.25, s=0.25", - "flags": [ + "tcId" : 273, + "comment" : "Signature encoding contains incorrect types: r=0.25, s=0.25", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "300a090380fe01090380fe01", - "result": "invalid" + "msg" : "313233343030", + "sig" : "300a090380fe01090380fe01", + "result" : "invalid" }, { - "tcId": 274, - "comment": "Signature encoding contains incorrect types: r=nan, s=nan", - "flags": [ + "tcId" : 274, + "comment" : "Signature encoding contains incorrect types: r=nan, s=nan", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3006090142090142", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3006090142090142", + "result" : "invalid" }, { - "tcId": 275, - "comment": "Signature encoding contains incorrect types: r=True, s=True", - "flags": [ + "tcId" : 275, + "comment" : "Signature encoding contains incorrect types: r=True, s=True", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3006010101010101", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3006010101010101", + "result" : "invalid" }, { - "tcId": 276, - "comment": "Signature encoding contains incorrect types: r=False, s=False", - "flags": [ + "tcId" : 276, + "comment" : "Signature encoding contains incorrect types: r=False, s=False", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3006010100010100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3006010100010100", + "result" : "invalid" }, { - "tcId": 277, - "comment": "Signature encoding contains incorrect types: r=Null, s=Null", - "flags": [ + "tcId" : 277, + "comment" : "Signature encoding contains incorrect types: r=Null, s=Null", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "300405000500", - "result": "invalid" + "msg" : "313233343030", + "sig" : "300405000500", + "result" : "invalid" }, { - "tcId": 278, - "comment": "Signature encoding contains incorrect types: r=empyt UTF-8 string, s=empyt UTF-8 string", - "flags": [ + "tcId" : 278, + "comment" : "Signature encoding contains incorrect types: r=empyt UTF-8 string, s=empyt UTF-8 string", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "30040c000c00", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30040c000c00", + "result" : "invalid" }, { - "tcId": 279, - "comment": "Signature encoding contains incorrect types: r=\"0\", s=\"0\"", - "flags": [ + "tcId" : 279, + "comment" : "Signature encoding contains incorrect types: r=\"0\", s=\"0\"", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "30060c01300c0130", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30060c01300c0130", + "result" : "invalid" }, { - "tcId": 280, - "comment": "Signature encoding contains incorrect types: r=empty list, s=empty list", - "flags": [ + "tcId" : 280, + "comment" : "Signature encoding contains incorrect types: r=empty list, s=empty list", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "300430003000", - "result": "invalid" + "msg" : "313233343030", + "sig" : "300430003000", + "result" : "invalid" }, { - "tcId": 281, - "comment": "Signature encoding contains incorrect types: r=list containing 0, s=list containing 0", - "flags": [ + "tcId" : 281, + "comment" : "Signature encoding contains incorrect types: r=list containing 0, s=list containing 0", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "300a30030201003003020100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "300a30030201003003020100", + "result" : "invalid" }, { - "tcId": 282, - "comment": "Signature encoding contains incorrect types: r=0.25, s=0", - "flags": [ + "tcId" : 282, + "comment" : "Signature encoding contains incorrect types: r=0.25, s=0", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3008090380fe01020100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3008090380fe01020100", + "result" : "invalid" }, { - "tcId": 283, - "comment": "Signature encoding contains incorrect types: r=nan, s=0", - "flags": [ + "tcId" : 283, + "comment" : "Signature encoding contains incorrect types: r=nan, s=0", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3006090142020100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3006090142020100", + "result" : "invalid" }, { - "tcId": 284, - "comment": "Signature encoding contains incorrect types: r=True, s=0", - "flags": [ + "tcId" : 284, + "comment" : "Signature encoding contains incorrect types: r=True, s=0", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3006010101020100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3006010101020100", + "result" : "invalid" }, { - "tcId": 285, - "comment": "Signature encoding contains incorrect types: r=False, s=0", - "flags": [ + "tcId" : 285, + "comment" : "Signature encoding contains incorrect types: r=False, s=0", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "3006010100020100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3006010100020100", + "result" : "invalid" }, { - "tcId": 286, - "comment": "Signature encoding contains incorrect types: r=Null, s=0", - "flags": [ + "tcId" : 286, + "comment" : "Signature encoding contains incorrect types: r=Null, s=0", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "30050500020100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30050500020100", + "result" : "invalid" }, { - "tcId": 287, - "comment": "Signature encoding contains incorrect types: r=empyt UTF-8 string, s=0", - "flags": [ + "tcId" : 287, + "comment" : "Signature encoding contains incorrect types: r=empyt UTF-8 string, s=0", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "30050c00020100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30050c00020100", + "result" : "invalid" }, { - "tcId": 288, - "comment": "Signature encoding contains incorrect types: r=\"0\", s=0", - "flags": [ + "tcId" : 288, + "comment" : "Signature encoding contains incorrect types: r=\"0\", s=0", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "30060c0130020100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30060c0130020100", + "result" : "invalid" }, { - "tcId": 289, - "comment": "Signature encoding contains incorrect types: r=empty list, s=0", - "flags": [ + "tcId" : 289, + "comment" : "Signature encoding contains incorrect types: r=empty list, s=0", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "30053000020100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30053000020100", + "result" : "invalid" }, { - "tcId": 290, - "comment": "Signature encoding contains incorrect types: r=list containing 0, s=0", - "flags": [ + "tcId" : 290, + "comment" : "Signature encoding contains incorrect types: r=list containing 0, s=0", + "flags" : [ "InvalidTypesInSignature" ], - "msg": "313233343030", - "sig": "30083003020100020100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30083003020100020100", + "result" : "invalid" }, { - "tcId": 291, - "comment": "Edge case for Shamir multiplication", - "flags": [ + "tcId" : 291, + "comment" : "Edge case for Shamir multiplication", + "flags" : [ "EdgeCaseShamirMultiplication" ], - "msg": "3235353835", - "sig": "3045022100dd1b7d09a7bd8218961034a39a87fecf5314f00c4d25eb58a07ac85e85eab516022035138c401ef8d3493d65c9002fe62b43aee568731b744548358996d9cc427e06", - "result": "valid" + "msg" : "3235353835", + "sig" : "3045022100dd1b7d09a7bd8218961034a39a87fecf5314f00c4d25eb58a07ac85e85eab516022035138c401ef8d3493d65c9002fe62b43aee568731b744548358996d9cc427e06", + "result" : "valid" }, { - "tcId": 292, - "comment": "special case hash", - "flags": [ + "tcId" : 292, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "343236343739373234", - "sig": "304502210095c29267d972a043d955224546222bba343fc1d4db0fec262a33ac61305696ae02206edfe96713aed56f8a28a6653f57e0b829712e5eddc67f34682b24f0676b2640", - "result": "valid" + "msg" : "343236343739373234", + "sig" : "304502210095c29267d972a043d955224546222bba343fc1d4db0fec262a33ac61305696ae02206edfe96713aed56f8a28a6653f57e0b829712e5eddc67f34682b24f0676b2640", + "result" : "valid" }, { - "tcId": 293, - "comment": "special case hash", - "flags": [ + "tcId" : 293, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "37313338363834383931", - "sig": "3044022028f94a894e92024699e345fe66971e3edcd050023386135ab3939d550898fb25022032963e5bd41fa5911ed8f37deb86dae0a762bb6121c894615083c5d95ea01db3", - "result": "valid" + "msg" : "37313338363834383931", + "sig" : "3044022028f94a894e92024699e345fe66971e3edcd050023386135ab3939d550898fb25022032963e5bd41fa5911ed8f37deb86dae0a762bb6121c894615083c5d95ea01db3", + "result" : "valid" }, { - "tcId": 294, - "comment": "special case hash", - "flags": [ + "tcId" : 294, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "3130333539333331363638", - "sig": "3045022100be26b18f9549f89f411a9b52536b15aa270b84548d0e859a1952a27af1a77ac6022070c1d4fa9cd03cc8eaa8d506edb97eed7b8358b453c88aefbb880a3f0e8d472f", - "result": "valid" + "msg" : "3130333539333331363638", + "sig" : "3045022100be26b18f9549f89f411a9b52536b15aa270b84548d0e859a1952a27af1a77ac6022070c1d4fa9cd03cc8eaa8d506edb97eed7b8358b453c88aefbb880a3f0e8d472f", + "result" : "valid" }, { - "tcId": 295, - "comment": "special case hash", - "flags": [ + "tcId" : 295, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "33393439343031323135", - "sig": "3045022100b1a4b1478e65cc3eafdf225d1298b43f2da19e4bcff7eacc0a2e98cd4b74b1140220179aa31e304cc142cf5073171751b28f3f5e0fa88c994e7c55f1bc07b8d56c16", - "result": "valid" + "msg" : "33393439343031323135", + "sig" : "3045022100b1a4b1478e65cc3eafdf225d1298b43f2da19e4bcff7eacc0a2e98cd4b74b1140220179aa31e304cc142cf5073171751b28f3f5e0fa88c994e7c55f1bc07b8d56c16", + "result" : "valid" }, { - "tcId": 296, - "comment": "special case hash", - "flags": [ + "tcId" : 296, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "31333434323933303739", - "sig": "30440220325332021261f1bd18f2712aa1e2252da23796da8a4b1ff6ea18cafec7e171f2022040b4f5e287ee61fc3c804186982360891eaa35c75f05a43ecd48b35d984a6648", - "result": "valid" + "msg" : "31333434323933303739", + "sig" : "30440220325332021261f1bd18f2712aa1e2252da23796da8a4b1ff6ea18cafec7e171f2022040b4f5e287ee61fc3c804186982360891eaa35c75f05a43ecd48b35d984a6648", + "result" : "valid" }, { - "tcId": 297, - "comment": "special case hash", - "flags": [ + "tcId" : 297, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "33373036323131373132", - "sig": "3045022100a23ad18d8fc66d81af0903890cbd453a554cb04cdc1a8ca7f7f78e5367ed88a0022023e3eb2ce1c04ea748c389bd97374aa9413b9268851c04dcd9f88e78813fee56", - "result": "valid" + "msg" : "33373036323131373132", + "sig" : "3045022100a23ad18d8fc66d81af0903890cbd453a554cb04cdc1a8ca7f7f78e5367ed88a0022023e3eb2ce1c04ea748c389bd97374aa9413b9268851c04dcd9f88e78813fee56", + "result" : "valid" }, { - "tcId": 298, - "comment": "special case hash", - "flags": [ + "tcId" : 298, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "333433363838373132", - "sig": "304402202bdea41cda63a2d14bf47353bd20880a690901de7cd6e3cc6d8ed5ba0cdb109102203cea66bccfc9f9bf8c7ca4e1c1457cc9145e13e936d90b3d9c7786b8b26cf4c7", - "result": "valid" + "msg" : "333433363838373132", + "sig" : "304402202bdea41cda63a2d14bf47353bd20880a690901de7cd6e3cc6d8ed5ba0cdb109102203cea66bccfc9f9bf8c7ca4e1c1457cc9145e13e936d90b3d9c7786b8b26cf4c7", + "result" : "valid" }, { - "tcId": 299, - "comment": "special case hash", - "flags": [ + "tcId" : 299, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "31333531353330333730", - "sig": "3045022100d7cd76ec01c1b1079eba9e2aa2a397243c4758c98a1ba0b7404a340b9b00ced602203575001e19d922e6de8b3d6c84ea43b5c3338106cf29990134e7669a826f78e6", - "result": "valid" + "msg" : "31333531353330333730", + "sig" : "3045022100d7cd76ec01c1b1079eba9e2aa2a397243c4758c98a1ba0b7404a340b9b00ced602203575001e19d922e6de8b3d6c84ea43b5c3338106cf29990134e7669a826f78e6", + "result" : "valid" }, { - "tcId": 300, - "comment": "special case hash", - "flags": [ + "tcId" : 300, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "36353533323033313236", - "sig": "3045022100a872c744d936db21a10c361dd5c9063355f84902219652f6fc56dc95a7139d960220400df7575d9756210e9ccc77162c6b593c7746cfb48ac263c42750b421ef4bb9", - "result": "valid" + "msg" : "36353533323033313236", + "sig" : "3045022100a872c744d936db21a10c361dd5c9063355f84902219652f6fc56dc95a7139d960220400df7575d9756210e9ccc77162c6b593c7746cfb48ac263c42750b421ef4bb9", + "result" : "valid" }, { - "tcId": 301, - "comment": "special case hash", - "flags": [ + "tcId" : 301, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "31353634333436363033", - "sig": "30450221009fa9afe07752da10b36d3afcd0fe44bfc40244d75203599cf8f5047fa3453854022050e0a7c013bfbf51819736972d44b4b56bc2a2b2c180df6ec672df171410d77a", - "result": "valid" + "msg" : "31353634333436363033", + "sig" : "30450221009fa9afe07752da10b36d3afcd0fe44bfc40244d75203599cf8f5047fa3453854022050e0a7c013bfbf51819736972d44b4b56bc2a2b2c180df6ec672df171410d77a", + "result" : "valid" }, { - "tcId": 302, - "comment": "special case hash", - "flags": [ + "tcId" : 302, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "34343239353339313137", - "sig": "3045022100885640384d0d910efb177b46be6c3dc5cac81f0b88c3190bb6b5f99c2641f2050220738ed9bff116306d9caa0f8fc608be243e0b567779d8dab03e8e19d553f1dc8e", - "result": "valid" + "msg" : "34343239353339313137", + "sig" : "3045022100885640384d0d910efb177b46be6c3dc5cac81f0b88c3190bb6b5f99c2641f2050220738ed9bff116306d9caa0f8fc608be243e0b567779d8dab03e8e19d553f1dc8e", + "result" : "valid" }, { - "tcId": 303, - "comment": "special case hash", - "flags": [ + "tcId" : 303, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "3130393533323631333531", - "sig": "304402202d051f91c5a9d440c5676985710483bc4f1a6c611b10c95a2ff0363d90c2a45802206ddf94e6fba5be586833d0c53cf216ad3948f37953c26c1cf4968e9a9e8243dc", - "result": "valid" + "msg" : "3130393533323631333531", + "sig" : "304402202d051f91c5a9d440c5676985710483bc4f1a6c611b10c95a2ff0363d90c2a45802206ddf94e6fba5be586833d0c53cf216ad3948f37953c26c1cf4968e9a9e8243dc", + "result" : "valid" }, { - "tcId": 304, - "comment": "special case hash", - "flags": [ + "tcId" : 304, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "35393837333530303431", - "sig": "3045022100f3ac2523967482f53d508522712d583f4379cd824101ff635ea0935117baa54f022027f10812227397e02cea96fb0e680761636dab2b080d1fc5d11685cbe8500cfe", - "result": "valid" + "msg" : "35393837333530303431", + "sig" : "3045022100f3ac2523967482f53d508522712d583f4379cd824101ff635ea0935117baa54f022027f10812227397e02cea96fb0e680761636dab2b080d1fc5d11685cbe8500cfe", + "result" : "valid" }, { - "tcId": 305, - "comment": "special case hash", - "flags": [ + "tcId" : 305, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "33343633303036383738", - "sig": "304502210096447cf68c3ab7266ed7447de3ac52fed7cc08cbdfea391c18a9b8ab370bc91302200f5e7874d3ac0e918f01c885a1639177c923f8660d1ceba1ca1f301bc675cdbc", - "result": "valid" + "msg" : "33343633303036383738", + "sig" : "304502210096447cf68c3ab7266ed7447de3ac52fed7cc08cbdfea391c18a9b8ab370bc91302200f5e7874d3ac0e918f01c885a1639177c923f8660d1ceba1ca1f301bc675cdbc", + "result" : "valid" }, { - "tcId": 306, - "comment": "special case hash", - "flags": [ + "tcId" : 306, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "39383137333230323837", - "sig": "30440220530a0832b691da0b5619a0b11de6877f3c0971baaa68ed122758c29caaf46b7202206c89e44f5eb33060ea4b46318c39138eaedec72de42ba576579a6a4690e339f3", - "result": "valid" + "msg" : "39383137333230323837", + "sig" : "30440220530a0832b691da0b5619a0b11de6877f3c0971baaa68ed122758c29caaf46b7202206c89e44f5eb33060ea4b46318c39138eaedec72de42ba576579a6a4690e339f3", + "result" : "valid" }, { - "tcId": 307, - "comment": "special case hash", - "flags": [ + "tcId" : 307, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "33323232303431303436", - "sig": "30450221009c54c25500bde0b92d72d6ec483dc2482f3654294ca74de796b681255ed58a770220677453c6b56f527631c9f67b3f3eb621fd88582b4aff156d2f1567d6211a2a33", - "result": "valid" + "msg" : "33323232303431303436", + "sig" : "30450221009c54c25500bde0b92d72d6ec483dc2482f3654294ca74de796b681255ed58a770220677453c6b56f527631c9f67b3f3eb621fd88582b4aff156d2f1567d6211a2a33", + "result" : "valid" }, { - "tcId": 308, - "comment": "special case hash", - "flags": [ + "tcId" : 308, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "36363636333037313034", - "sig": "3045022100e7909d41439e2f6af29136c7348ca2641a2b070d5b64f91ea9da7070c7a2618b022042d782f132fa1d36c2c88ba27c3d678d80184a5d1eccac7501f0b47e3d205008", - "result": "valid" + "msg" : "36363636333037313034", + "sig" : "3045022100e7909d41439e2f6af29136c7348ca2641a2b070d5b64f91ea9da7070c7a2618b022042d782f132fa1d36c2c88ba27c3d678d80184a5d1eccac7501f0b47e3d205008", + "result" : "valid" }, { - "tcId": 309, - "comment": "special case hash", - "flags": [ + "tcId" : 309, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "31303335393531383938", - "sig": "304402205924873209593135a4c3da7bb381227f8a4b6aa9f34fe5bb7f8fbc131a039ffe02201f1bb11b441c8feaa40f44213d9a405ed792d59fb49d5bcdd9a4285ae5693022", - "result": "valid" + "msg" : "31303335393531383938", + "sig" : "304402205924873209593135a4c3da7bb381227f8a4b6aa9f34fe5bb7f8fbc131a039ffe02201f1bb11b441c8feaa40f44213d9a405ed792d59fb49d5bcdd9a4285ae5693022", + "result" : "valid" }, { - "tcId": 310, - "comment": "special case hash", - "flags": [ + "tcId" : 310, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "31383436353937313935", - "sig": "3045022100eeb692c9b262969b231c38b5a7f60649e0c875cd64df88f33aa571fa3d29ab0e0220218b3a1eb06379c2c18cf51b06430786d1c64cd2d24c9b232b23e5bac7989acd", - "result": "valid" + "msg" : "31383436353937313935", + "sig" : "3045022100eeb692c9b262969b231c38b5a7f60649e0c875cd64df88f33aa571fa3d29ab0e0220218b3a1eb06379c2c18cf51b06430786d1c64cd2d24c9b232b23e5bac7989acd", + "result" : "valid" }, { - "tcId": 311, - "comment": "special case hash", - "flags": [ + "tcId" : 311, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "33313336303436313839", - "sig": "3045022100a40034177f36091c2b653684a0e3eb5d4bff18e4d09f664c2800e7cafda1daf802203a3ec29853704e52031c58927a800a968353adc3d973beba9172cbbeab4dd149", - "result": "valid" + "msg" : "33313336303436313839", + "sig" : "3045022100a40034177f36091c2b653684a0e3eb5d4bff18e4d09f664c2800e7cafda1daf802203a3ec29853704e52031c58927a800a968353adc3d973beba9172cbbeab4dd149", + "result" : "valid" }, { - "tcId": 312, - "comment": "special case hash", - "flags": [ + "tcId" : 312, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "32363633373834323534", - "sig": "3045022100b5d795cc75cea5c434fa4185180cd6bd21223f3d5a86da6670d71d95680dadbf022054e4d8810a001ecbb9f7ca1c2ebfdb9d009e9031a431aca3c20ab4e0d1374ec1", - "result": "valid" + "msg" : "32363633373834323534", + "sig" : "3045022100b5d795cc75cea5c434fa4185180cd6bd21223f3d5a86da6670d71d95680dadbf022054e4d8810a001ecbb9f7ca1c2ebfdb9d009e9031a431aca3c20ab4e0d1374ec1", + "result" : "valid" }, { - "tcId": 313, - "comment": "special case hash", - "flags": [ + "tcId" : 313, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "31363532313030353234", - "sig": "3044022007dc2478d43c1232a4595608c64426c35510051a631ae6a5a6eb1161e57e42e102204a59ea0fdb72d12165cea3bf1ca86ba97517bd188db3dbd21a5a157850021984", - "result": "valid" + "msg" : "31363532313030353234", + "sig" : "3044022007dc2478d43c1232a4595608c64426c35510051a631ae6a5a6eb1161e57e42e102204a59ea0fdb72d12165cea3bf1ca86ba97517bd188db3dbd21a5a157850021984", + "result" : "valid" }, { - "tcId": 314, - "comment": "special case hash", - "flags": [ + "tcId" : 314, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "35373438303831363936", - "sig": "3045022100ddd20c4a05596ca868b558839fce9f6511ddd83d1ccb53f82e5269d559a0155202205b91734729d93093ff22123c4a25819d7feb66a250663fc780cb66fc7b6e6d17", - "result": "valid" + "msg" : "35373438303831363936", + "sig" : "3045022100ddd20c4a05596ca868b558839fce9f6511ddd83d1ccb53f82e5269d559a0155202205b91734729d93093ff22123c4a25819d7feb66a250663fc780cb66fc7b6e6d17", + "result" : "valid" }, { - "tcId": 315, - "comment": "special case hash", - "flags": [ + "tcId" : 315, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "36333433393133343638", - "sig": "30450221009cde6e0ede0a003f02fda0a01b59facfe5dec063318f279ce2de7a9b1062f7b702202886a5b8c679bdf8224c66f908fd6205492cb70b0068d46ae4f33a4149b12a52", - "result": "valid" + "msg" : "36333433393133343638", + "sig" : "30450221009cde6e0ede0a003f02fda0a01b59facfe5dec063318f279ce2de7a9b1062f7b702202886a5b8c679bdf8224c66f908fd6205492cb70b0068d46ae4f33a4149b12a52", + "result" : "valid" }, { - "tcId": 316, - "comment": "special case hash", - "flags": [ + "tcId" : 316, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "31353431313033353938", - "sig": "3045022100c5771016d0dd6357143c89f684cd740423502554c0c59aa8c99584f1ff38f609022054b405f4477546686e464c5463b4fd4190572e58d0f7e7357f6e61947d20715c", - "result": "valid" + "msg" : "31353431313033353938", + "sig" : "3045022100c5771016d0dd6357143c89f684cd740423502554c0c59aa8c99584f1ff38f609022054b405f4477546686e464c5463b4fd4190572e58d0f7e7357f6e61947d20715c", + "result" : "valid" }, { - "tcId": 317, - "comment": "special case hash", - "flags": [ + "tcId" : 317, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "3130343738353830313238", - "sig": "3045022100a24ebc0ec224bd67ae397cbe6fa37b3125adbd34891abe2d7c7356921916dfe6022034f6eb6374731bbbafc4924fb8b0bdcdda49456d724cdae6178d87014cb53d8c", - "result": "valid" + "msg" : "3130343738353830313238", + "sig" : "3045022100a24ebc0ec224bd67ae397cbe6fa37b3125adbd34891abe2d7c7356921916dfe6022034f6eb6374731bbbafc4924fb8b0bdcdda49456d724cdae6178d87014cb53d8c", + "result" : "valid" }, { - "tcId": 318, - "comment": "special case hash", - "flags": [ + "tcId" : 318, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "3130353336323835353638", - "sig": "304402202557d64a7aee2e0931c012e4fea1cd3a2c334edae68cdeb7158caf21b68e5a2402207f06cdbb6a90023a973882ed97b080fe6b05af3ec93db6f1a4399a69edf7670d", - "result": "valid" + "msg" : "3130353336323835353638", + "sig" : "304402202557d64a7aee2e0931c012e4fea1cd3a2c334edae68cdeb7158caf21b68e5a2402207f06cdbb6a90023a973882ed97b080fe6b05af3ec93db6f1a4399a69edf7670d", + "result" : "valid" }, { - "tcId": 319, - "comment": "special case hash", - "flags": [ + "tcId" : 319, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "393533393034313035", - "sig": "3045022100c4f2eccbb6a24350c8466450b9d61b207ee359e037b3dcedb42a3f2e6dd6aeb502203263c6b59a2f55cdd1c6e14894d5e5963b28bc3e2469ac9ba1197991ca7ff9c7", - "result": "valid" + "msg" : "393533393034313035", + "sig" : "3045022100c4f2eccbb6a24350c8466450b9d61b207ee359e037b3dcedb42a3f2e6dd6aeb502203263c6b59a2f55cdd1c6e14894d5e5963b28bc3e2469ac9ba1197991ca7ff9c7", + "result" : "valid" }, { - "tcId": 320, - "comment": "special case hash", - "flags": [ + "tcId" : 320, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "393738383438303339", - "sig": "3045022100eff04781c9cbcd162d0a25a6e2ebcca43506c523385cb515d49ea38a1b12fcad022015acd73194c91a95478534f23015b672ebed213e45424dd2c8e26ac8b3eb34a5", - "result": "valid" + "msg" : "393738383438303339", + "sig" : "3045022100eff04781c9cbcd162d0a25a6e2ebcca43506c523385cb515d49ea38a1b12fcad022015acd73194c91a95478534f23015b672ebed213e45424dd2c8e26ac8b3eb34a5", + "result" : "valid" }, { - "tcId": 321, - "comment": "special case hash", - "flags": [ + "tcId" : 321, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "33363130363732343432", - "sig": "3045022100f58b4e3110a64bf1b5db97639ee0e5a9c8dfa49dc59b679891f520fdf0584c8702202cd8fe51888aee9db3e075440fd4db73b5c732fb87b510e97093d66415f62af7", - "result": "valid" + "msg" : "33363130363732343432", + "sig" : "3045022100f58b4e3110a64bf1b5db97639ee0e5a9c8dfa49dc59b679891f520fdf0584c8702202cd8fe51888aee9db3e075440fd4db73b5c732fb87b510e97093d66415f62af7", + "result" : "valid" }, { - "tcId": 322, - "comment": "special case hash", - "flags": [ + "tcId" : 322, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "31303534323430373035", - "sig": "3045022100f8abecaa4f0c502de4bf5903d48417f786bf92e8ad72fec0bd7fcb7800c0bbe302204c7f9e231076a30b7ae36b0cebe69ccef1cd194f7cce93a5588fd6814f437c0e", - "result": "valid" + "msg" : "31303534323430373035", + "sig" : "3045022100f8abecaa4f0c502de4bf5903d48417f786bf92e8ad72fec0bd7fcb7800c0bbe302204c7f9e231076a30b7ae36b0cebe69ccef1cd194f7cce93a5588fd6814f437c0e", + "result" : "valid" }, { - "tcId": 323, - "comment": "special case hash", - "flags": [ + "tcId" : 323, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "35313734343438313937", - "sig": "304402205d5b38bd37ad498b2227a633268a8cca879a5c7c94a4e416bd0a614d09e606d2022012b8d664ea9991062ecbb834e58400e25c46007af84f6007d7f1685443269afe", - "result": "valid" + "msg" : "35313734343438313937", + "sig" : "304402205d5b38bd37ad498b2227a633268a8cca879a5c7c94a4e416bd0a614d09e606d2022012b8d664ea9991062ecbb834e58400e25c46007af84f6007d7f1685443269afe", + "result" : "valid" }, { - "tcId": 324, - "comment": "special case hash", - "flags": [ + "tcId" : 324, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "31393637353631323531", - "sig": "304402200c1cd9fe4034f086a2b52d65b9d3834d72aebe7f33dfe8f976da82648177d8e3022013105782e3d0cfe85c2778dec1a848b27ac0ae071aa6da341a9553a946b41e59", - "result": "valid" + "msg" : "31393637353631323531", + "sig" : "304402200c1cd9fe4034f086a2b52d65b9d3834d72aebe7f33dfe8f976da82648177d8e3022013105782e3d0cfe85c2778dec1a848b27ac0ae071aa6da341a9553a946b41e59", + "result" : "valid" }, { - "tcId": 325, - "comment": "special case hash", - "flags": [ + "tcId" : 325, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "33343437323533333433", - "sig": "3045022100ae7935fb96ff246b7b5d5662870d1ba587b03d6e1360baf47988b5c02ccc1a5b02205f00c323272083782d4a59f2dfd65e49de0693627016900ef7e61428056664b3", - "result": "valid" + "msg" : "33343437323533333433", + "sig" : "3045022100ae7935fb96ff246b7b5d5662870d1ba587b03d6e1360baf47988b5c02ccc1a5b02205f00c323272083782d4a59f2dfd65e49de0693627016900ef7e61428056664b3", + "result" : "valid" }, { - "tcId": 326, - "comment": "special case hash", - "flags": [ + "tcId" : 326, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "333638323634333138", - "sig": "3044022000a134b5c6ccbcefd4c882b945baeb4933444172795fa6796aae1490675470980220566e46105d24d890151e3eea3ebf88f5b92b3f5ec93a217765a6dcbd94f2c55b", - "result": "valid" + "msg" : "333638323634333138", + "sig" : "3044022000a134b5c6ccbcefd4c882b945baeb4933444172795fa6796aae1490675470980220566e46105d24d890151e3eea3ebf88f5b92b3f5ec93a217765a6dcbd94f2c55b", + "result" : "valid" }, { - "tcId": 327, - "comment": "special case hash", - "flags": [ + "tcId" : 327, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "33323631313938363038", - "sig": "304402202e4721363ad3992c139e5a1c26395d2c2d777824aa24fde075e0d7381171309d0220740f7c494418e1300dd4512f782a58800bff6a7abdfdd20fbbd4f05515ca1a4f", - "result": "valid" + "msg" : "33323631313938363038", + "sig" : "304402202e4721363ad3992c139e5a1c26395d2c2d777824aa24fde075e0d7381171309d0220740f7c494418e1300dd4512f782a58800bff6a7abdfdd20fbbd4f05515ca1a4f", + "result" : "valid" }, { - "tcId": 328, - "comment": "special case hash", - "flags": [ + "tcId" : 328, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "39363738373831303934", - "sig": "304402206852e9d3cd9fe373c2d504877967d365ab1456707b6817a042864694e1960ccf0220064b27ea142b30887b84c86adccb2fa39a6911ad21fc7e819f593be52bc4f3bd", - "result": "valid" + "msg" : "39363738373831303934", + "sig" : "304402206852e9d3cd9fe373c2d504877967d365ab1456707b6817a042864694e1960ccf0220064b27ea142b30887b84c86adccb2fa39a6911ad21fc7e819f593be52bc4f3bd", + "result" : "valid" }, { - "tcId": 329, - "comment": "special case hash", - "flags": [ + "tcId" : 329, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "34393538383233383233", - "sig": "30440220188a8c5648dc79eace158cf886c62b5468f05fd95f03a7635c5b4c31f09af4c5022036361a0b571a00c6cd5e686ccbfcfa703c4f97e48938346d0c103fdc76dc5867", - "result": "valid" + "msg" : "34393538383233383233", + "sig" : "30440220188a8c5648dc79eace158cf886c62b5468f05fd95f03a7635c5b4c31f09af4c5022036361a0b571a00c6cd5e686ccbfcfa703c4f97e48938346d0c103fdc76dc5867", + "result" : "valid" }, { - "tcId": 330, - "comment": "special case hash", - "flags": [ + "tcId" : 330, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "383234363337383337", - "sig": "3045022100a74f1fb9a8263f62fc4416a5b7d584f4206f3996bb91f6fc8e73b9e92bad0e1302206815032e8c7d76c3ab06a86f33249ce9940148cb36d1f417c2e992e801afa3fa", - "result": "valid" + "msg" : "383234363337383337", + "sig" : "3045022100a74f1fb9a8263f62fc4416a5b7d584f4206f3996bb91f6fc8e73b9e92bad0e1302206815032e8c7d76c3ab06a86f33249ce9940148cb36d1f417c2e992e801afa3fa", + "result" : "valid" }, { - "tcId": 331, - "comment": "special case hash", - "flags": [ + "tcId" : 331, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "3131303230383333373736", - "sig": "3044022007244865b72ff37e62e3146f0dc14682badd7197799135f0b00ade7671742bfe02200d80c2238edb4e4a7a86a8c57ca9af1711f406f7f5da0299aa04e2932d960754", - "result": "valid" + "msg" : "3131303230383333373736", + "sig" : "3044022007244865b72ff37e62e3146f0dc14682badd7197799135f0b00ade7671742bfe02200d80c2238edb4e4a7a86a8c57ca9af1711f406f7f5da0299aa04e2932d960754", + "result" : "valid" }, { - "tcId": 332, - "comment": "special case hash", - "flags": [ + "tcId" : 332, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "313333383731363438", - "sig": "3045022100da7fdd05b5badabd619d805c4ee7d9a84f84ddd5cf9c5bf4d4338140d689ef08022028f1cf4fa1c3c5862cfa149c0013cf5fe6cf5076cae000511063e7de25bb38e5", - "result": "valid" + "msg" : "313333383731363438", + "sig" : "3045022100da7fdd05b5badabd619d805c4ee7d9a84f84ddd5cf9c5bf4d4338140d689ef08022028f1cf4fa1c3c5862cfa149c0013cf5fe6cf5076cae000511063e7de25bb38e5", + "result" : "valid" }, { - "tcId": 333, - "comment": "special case hash", - "flags": [ + "tcId" : 333, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "333232313434313632", - "sig": "3045022100d3027c656f6d4fdfd8ede22093e3c303b0133c340d615e7756f6253aea927238022009aef060c8e4cef972974011558df144fed25ca69ae8d0b2eaf1a8feefbec417", - "result": "valid" + "msg" : "333232313434313632", + "sig" : "3045022100d3027c656f6d4fdfd8ede22093e3c303b0133c340d615e7756f6253aea927238022009aef060c8e4cef972974011558df144fed25ca69ae8d0b2eaf1a8feefbec417", + "result" : "valid" }, { - "tcId": 334, - "comment": "special case hash", - "flags": [ + "tcId" : 334, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "3130363836363535353436", - "sig": "304402200bf6c0188dc9571cd0e21eecac5fbb19d2434988e9cc10244593ef3a98099f6902204864a562661f9221ec88e3dd0bc2f6e27ac128c30cc1a80f79ec670a22b042ee", - "result": "valid" + "msg" : "3130363836363535353436", + "sig" : "304402200bf6c0188dc9571cd0e21eecac5fbb19d2434988e9cc10244593ef3a98099f6902204864a562661f9221ec88e3dd0bc2f6e27ac128c30cc1a80f79ec670a22b042ee", + "result" : "valid" }, { - "tcId": 335, - "comment": "special case hash", - "flags": [ + "tcId" : 335, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "3632313535323436", - "sig": "3045022100ae459640d5d1179be47a47fa538e16d94ddea5585e7a244804a51742c686443a02206c8e30e530a634fae80b3ceb062978b39edbe19777e0a24553b68886181fd897", - "result": "valid" + "msg" : "3632313535323436", + "sig" : "3045022100ae459640d5d1179be47a47fa538e16d94ddea5585e7a244804a51742c686443a02206c8e30e530a634fae80b3ceb062978b39edbe19777e0a24553b68886181fd897", + "result" : "valid" }, { - "tcId": 336, - "comment": "special case hash", - "flags": [ + "tcId" : 336, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "37303330383138373734", - "sig": "304402201cf3517ba3bf2ab8b9ead4ebb6e866cb88a1deacb6a785d3b63b483ca02ac4950220249a798b73606f55f5f1c70de67cb1a0cff95d7dc50b3a617df861bad3c6b1c9", - "result": "valid" + "msg" : "37303330383138373734", + "sig" : "304402201cf3517ba3bf2ab8b9ead4ebb6e866cb88a1deacb6a785d3b63b483ca02ac4950220249a798b73606f55f5f1c70de67cb1a0cff95d7dc50b3a617df861bad3c6b1c9", + "result" : "valid" }, { - "tcId": 337, - "comment": "special case hash", - "flags": [ + "tcId" : 337, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "35393234353233373434", - "sig": "3045022100e69b5238265ea35d77e4dd172288d8cea19810a10292617d5976519dc5757cb802204b03c5bc47e826bdb27328abd38d3056d77476b2130f3df6ec4891af08ba1e29", - "result": "valid" + "msg" : "35393234353233373434", + "sig" : "3045022100e69b5238265ea35d77e4dd172288d8cea19810a10292617d5976519dc5757cb802204b03c5bc47e826bdb27328abd38d3056d77476b2130f3df6ec4891af08ba1e29", + "result" : "valid" }, { - "tcId": 338, - "comment": "special case hash", - "flags": [ + "tcId" : 338, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "31343935353836363231", - "sig": "304402205f9d7d7c870d085fc1d49fff69e4a275812800d2cf8973e7325866cb40fa2b6f02206d1f5491d9f717a597a15fd540406486d76a44697b3f0d9d6dcef6669f8a0a56", - "result": "valid" + "msg" : "31343935353836363231", + "sig" : "304402205f9d7d7c870d085fc1d49fff69e4a275812800d2cf8973e7325866cb40fa2b6f02206d1f5491d9f717a597a15fd540406486d76a44697b3f0d9d6dcef6669f8a0a56", + "result" : "valid" }, { - "tcId": 339, - "comment": "special case hash", - "flags": [ + "tcId" : 339, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "34303035333134343036", - "sig": "304402200a7d5b1959f71df9f817146ee49bd5c89b431e7993e2fdecab6858957da685ae02200f8aad2d254690bdc13f34a4fec44a02fd745a422df05ccbb54635a8b86b9609", - "result": "valid" + "msg" : "34303035333134343036", + "sig" : "304402200a7d5b1959f71df9f817146ee49bd5c89b431e7993e2fdecab6858957da685ae02200f8aad2d254690bdc13f34a4fec44a02fd745a422df05ccbb54635a8b86b9609", + "result" : "valid" }, { - "tcId": 340, - "comment": "special case hash", - "flags": [ + "tcId" : 340, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "33303936343537353132", - "sig": "3044022079e88bf576b74bc07ca142395fda28f03d3d5e640b0b4ff0752c6d94cd553408022032cea05bd2d706c8f6036a507e2ab7766004f0904e2e5c5862749c0073245d6a", - "result": "valid" + "msg" : "33303936343537353132", + "sig" : "3044022079e88bf576b74bc07ca142395fda28f03d3d5e640b0b4ff0752c6d94cd553408022032cea05bd2d706c8f6036a507e2ab7766004f0904e2e5c5862749c0073245d6a", + "result" : "valid" }, { - "tcId": 341, - "comment": "special case hash", - "flags": [ + "tcId" : 341, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "32373834303235363230", - "sig": "30450221009d54e037a00212b377bc8874798b8da080564bbdf7e07591b861285809d01488022018b4e557667a82bd95965f0706f81a29243fbdd86968a7ebeb43069db3b18c7f", - "result": "valid" + "msg" : "32373834303235363230", + "sig" : "30450221009d54e037a00212b377bc8874798b8da080564bbdf7e07591b861285809d01488022018b4e557667a82bd95965f0706f81a29243fbdd86968a7ebeb43069db3b18c7f", + "result" : "valid" }, { - "tcId": 342, - "comment": "special case hash", - "flags": [ + "tcId" : 342, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "32363138373837343138", - "sig": "304402202664f1ffa982fedbcc7cab1b8bc6e2cb420218d2a6077ad08e591ba9feab33bd022049f5c7cb515e83872a3d41b4cdb85f242ad9d61a5bfc01debfbb52c6c84ba728", - "result": "valid" + "msg" : "32363138373837343138", + "sig" : "304402202664f1ffa982fedbcc7cab1b8bc6e2cb420218d2a6077ad08e591ba9feab33bd022049f5c7cb515e83872a3d41b4cdb85f242ad9d61a5bfc01debfbb52c6c84ba728", + "result" : "valid" }, { - "tcId": 343, - "comment": "special case hash", - "flags": [ + "tcId" : 343, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "31363432363235323632", - "sig": "304402205827518344844fd6a7de73cbb0a6befdea7b13d2dee4475317f0f18ffc81524b02204f5ccb4e0b488b5a5d760aacddb2d791970fe43da61eb30e2e90208a817e46db", - "result": "valid" + "msg" : "31363432363235323632", + "sig" : "304402205827518344844fd6a7de73cbb0a6befdea7b13d2dee4475317f0f18ffc81524b02204f5ccb4e0b488b5a5d760aacddb2d791970fe43da61eb30e2e90208a817e46db", + "result" : "valid" }, { - "tcId": 344, - "comment": "special case hash", - "flags": [ + "tcId" : 344, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "36383234313839343336", - "sig": "304502210097ab19bd139cac319325869218b1bce111875d63fb12098a04b0cd59b6fdd3a30220431d9cea3a243847303cebda56476431d034339f31d785ee8852db4f040d4921", - "result": "valid" + "msg" : "36383234313839343336", + "sig" : "304502210097ab19bd139cac319325869218b1bce111875d63fb12098a04b0cd59b6fdd3a30220431d9cea3a243847303cebda56476431d034339f31d785ee8852db4f040d4921", + "result" : "valid" }, { - "tcId": 345, - "comment": "special case hash", - "flags": [ + "tcId" : 345, + "comment" : "special case hash", + "flags" : [ "SpecialCaseHash" ], - "msg": "343834323435343235", - "sig": "3044022052c683144e44119ae2013749d4964ef67509278f6d38ba869adcfa69970e123d02203479910167408f45bda420a626ec9c4ec711c1274be092198b4187c018b562ca", - "result": "valid" + "msg" : "343834323435343235", + "sig" : "3044022052c683144e44119ae2013749d4964ef67509278f6d38ba869adcfa69970e123d02203479910167408f45bda420a626ec9c4ec711c1274be092198b4187c018b562ca", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "0407310f90a9eae149a08402f54194a0f7b4ac427bf8d9bd6c7681071dc47dc36226a6d37ac46d61fd600c0bf1bff87689ed117dda6b0e59318ae010a197a26ca0", - "wx": "07310f90a9eae149a08402f54194a0f7b4ac427bf8d9bd6c7681071dc47dc362", - "wy": "26a6d37ac46d61fd600c0bf1bff87689ed117dda6b0e59318ae010a197a26ca0" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "0407310f90a9eae149a08402f54194a0f7b4ac427bf8d9bd6c7681071dc47dc36226a6d37ac46d61fd600c0bf1bff87689ed117dda6b0e59318ae010a197a26ca0", + "wx" : "07310f90a9eae149a08402f54194a0f7b4ac427bf8d9bd6c7681071dc47dc362", + "wy" : "26a6d37ac46d61fd600c0bf1bff87689ed117dda6b0e59318ae010a197a26ca0" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000407310f90a9eae149a08402f54194a0f7b4ac427bf8d9bd6c7681071dc47dc36226a6d37ac46d61fd600c0bf1bff87689ed117dda6b0e59318ae010a197a26ca0", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEBzEPkKnq4UmghAL1QZSg97SsQnv42b1s\ndoEHHcR9w2ImptN6xG1h/WAMC/G/+HaJ7RF92msOWTGK4BChl6JsoA==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000407310f90a9eae149a08402f54194a0f7b4ac427bf8d9bd6c7681071dc47dc36226a6d37ac46d61fd600c0bf1bff87689ed117dda6b0e59318ae010a197a26ca0", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEBzEPkKnq4UmghAL1QZSg97SsQnv42b1s\ndoEHHcR9w2ImptN6xG1h/WAMC/G/+HaJ7RF92msOWTGK4BChl6JsoA==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 346, - "comment": "k*G has a large x-coordinate", - "flags": [ + "tcId" : 346, + "comment" : "k*G has a large x-coordinate", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "30160211014551231950b75fc4402da1722fc9baeb020103", - "result": "valid" + "msg" : "313233343030", + "sig" : "30160211014551231950b75fc4402da1722fc9baeb020103", + "result" : "valid" }, { - "tcId": 347, - "comment": "r too large", - "flags": [ + "tcId" : 347, + "comment" : "r too large", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2c020103", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2c020103", + "result" : "invalid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04bc97e7585eecad48e16683bc4091708e1a930c683fc47001d4b383594f2c4e22705989cf69daeadd4e4e4b8151ed888dfec20fb01728d89d56b3f38f2ae9c8c5", - "wx": "00bc97e7585eecad48e16683bc4091708e1a930c683fc47001d4b383594f2c4e22", - "wy": "705989cf69daeadd4e4e4b8151ed888dfec20fb01728d89d56b3f38f2ae9c8c5" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04bc97e7585eecad48e16683bc4091708e1a930c683fc47001d4b383594f2c4e22705989cf69daeadd4e4e4b8151ed888dfec20fb01728d89d56b3f38f2ae9c8c5", + "wx" : "00bc97e7585eecad48e16683bc4091708e1a930c683fc47001d4b383594f2c4e22", + "wy" : "705989cf69daeadd4e4e4b8151ed888dfec20fb01728d89d56b3f38f2ae9c8c5" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004bc97e7585eecad48e16683bc4091708e1a930c683fc47001d4b383594f2c4e22705989cf69daeadd4e4e4b8151ed888dfec20fb01728d89d56b3f38f2ae9c8c5", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEvJfnWF7srUjhZoO8QJFwjhqTDGg/xHAB\n1LODWU8sTiJwWYnPadrq3U5OS4FR7YiN/sIPsBco2J1Ws/OPKunIxQ==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004bc97e7585eecad48e16683bc4091708e1a930c683fc47001d4b383594f2c4e22705989cf69daeadd4e4e4b8151ed888dfec20fb01728d89d56b3f38f2ae9c8c5", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEvJfnWF7srUjhZoO8QJFwjhqTDGg/xHAB\n1LODWU8sTiJwWYnPadrq3U5OS4FR7YiN/sIPsBco2J1Ws/OPKunIxQ==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 348, - "comment": "r,s are large", - "flags": [ + "tcId" : 348, + "comment" : "r,s are large", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd036413f020103", - "result": "valid" + "msg" : "313233343030", + "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd036413f020103", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "0444ad339afbc21e9abf7b602a5ca535ea378135b6d10d81310bdd8293d1df3252b63ff7d0774770f8fe1d1722fa83acd02f434e4fc110a0cc8f6dddd37d56c463", - "wx": "44ad339afbc21e9abf7b602a5ca535ea378135b6d10d81310bdd8293d1df3252", - "wy": "00b63ff7d0774770f8fe1d1722fa83acd02f434e4fc110a0cc8f6dddd37d56c463" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "0444ad339afbc21e9abf7b602a5ca535ea378135b6d10d81310bdd8293d1df3252b63ff7d0774770f8fe1d1722fa83acd02f434e4fc110a0cc8f6dddd37d56c463", + "wx" : "44ad339afbc21e9abf7b602a5ca535ea378135b6d10d81310bdd8293d1df3252", + "wy" : "00b63ff7d0774770f8fe1d1722fa83acd02f434e4fc110a0cc8f6dddd37d56c463" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000444ad339afbc21e9abf7b602a5ca535ea378135b6d10d81310bdd8293d1df3252b63ff7d0774770f8fe1d1722fa83acd02f434e4fc110a0cc8f6dddd37d56c463", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAERK0zmvvCHpq/e2AqXKU16jeBNbbRDYEx\nC92Ck9HfMlK2P/fQd0dw+P4dFyL6g6zQL0NOT8EQoMyPbd3TfVbEYw==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000444ad339afbc21e9abf7b602a5ca535ea378135b6d10d81310bdd8293d1df3252b63ff7d0774770f8fe1d1722fa83acd02f434e4fc110a0cc8f6dddd37d56c463", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAERK0zmvvCHpq/e2AqXKU16jeBNbbRDYEx\nC92Ck9HfMlK2P/fQd0dw+P4dFyL6g6zQL0NOT8EQoMyPbd3TfVbEYw==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 349, - "comment": "r and s^-1 have a large Hamming weight", - "flags": [ + "tcId" : 349, + "comment" : "r and s^-1 have a large Hamming weight", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02203e9a7582886089c62fb840cf3b83061cd1cff3ae4341808bb5bdee6191174177", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02203e9a7582886089c62fb840cf3b83061cd1cff3ae4341808bb5bdee6191174177", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "041260c2122c9e244e1af5151bede0c3ae23b54d7c596881d3eebad21f37dd878c5c9a0c1a9ade76737a8811bd6a7f9287c978ee396aa89c11e47229d2ccb552f0", - "wx": "1260c2122c9e244e1af5151bede0c3ae23b54d7c596881d3eebad21f37dd878c", - "wy": "5c9a0c1a9ade76737a8811bd6a7f9287c978ee396aa89c11e47229d2ccb552f0" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "041260c2122c9e244e1af5151bede0c3ae23b54d7c596881d3eebad21f37dd878c5c9a0c1a9ade76737a8811bd6a7f9287c978ee396aa89c11e47229d2ccb552f0", + "wx" : "1260c2122c9e244e1af5151bede0c3ae23b54d7c596881d3eebad21f37dd878c", + "wy" : "5c9a0c1a9ade76737a8811bd6a7f9287c978ee396aa89c11e47229d2ccb552f0" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200041260c2122c9e244e1af5151bede0c3ae23b54d7c596881d3eebad21f37dd878c5c9a0c1a9ade76737a8811bd6a7f9287c978ee396aa89c11e47229d2ccb552f0", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEEmDCEiyeJE4a9RUb7eDDriO1TXxZaIHT\n7rrSHzfdh4xcmgwamt52c3qIEb1qf5KHyXjuOWqonBHkcinSzLVS8A==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200041260c2122c9e244e1af5151bede0c3ae23b54d7c596881d3eebad21f37dd878c5c9a0c1a9ade76737a8811bd6a7f9287c978ee396aa89c11e47229d2ccb552f0", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEEmDCEiyeJE4a9RUb7eDDriO1TXxZaIHT\n7rrSHzfdh4xcmgwamt52c3qIEb1qf5KHyXjuOWqonBHkcinSzLVS8A==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 350, - "comment": "r and s^-1 have a large Hamming weight", - "flags": [ + "tcId" : 350, + "comment" : "r and s^-1 have a large Hamming weight", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022024238e70b431b1a64efdf9032669939d4b77f249503fc6905feb7540dea3e6d2", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022024238e70b431b1a64efdf9032669939d4b77f249503fc6905feb7540dea3e6d2", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "041877045be25d34a1d0600f9d5c00d0645a2a54379b6ceefad2e6bf5c2a3352ce821a532cc1751ee1d36d41c3d6ab4e9b143e44ec46d73478ea6a79a5c0e54159", - "wx": "1877045be25d34a1d0600f9d5c00d0645a2a54379b6ceefad2e6bf5c2a3352ce", - "wy": "00821a532cc1751ee1d36d41c3d6ab4e9b143e44ec46d73478ea6a79a5c0e54159" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "041877045be25d34a1d0600f9d5c00d0645a2a54379b6ceefad2e6bf5c2a3352ce821a532cc1751ee1d36d41c3d6ab4e9b143e44ec46d73478ea6a79a5c0e54159", + "wx" : "1877045be25d34a1d0600f9d5c00d0645a2a54379b6ceefad2e6bf5c2a3352ce", + "wy" : "00821a532cc1751ee1d36d41c3d6ab4e9b143e44ec46d73478ea6a79a5c0e54159" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200041877045be25d34a1d0600f9d5c00d0645a2a54379b6ceefad2e6bf5c2a3352ce821a532cc1751ee1d36d41c3d6ab4e9b143e44ec46d73478ea6a79a5c0e54159", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEGHcEW+JdNKHQYA+dXADQZFoqVDebbO76\n0ua/XCozUs6CGlMswXUe4dNtQcPWq06bFD5E7EbXNHjqanmlwOVBWQ==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200041877045be25d34a1d0600f9d5c00d0645a2a54379b6ceefad2e6bf5c2a3352ce821a532cc1751ee1d36d41c3d6ab4e9b143e44ec46d73478ea6a79a5c0e54159", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEGHcEW+JdNKHQYA+dXADQZFoqVDebbO76\n0ua/XCozUs6CGlMswXUe4dNtQcPWq06bFD5E7EbXNHjqanmlwOVBWQ==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 351, - "comment": "small r and s", - "flags": [ + "tcId" : 351, + "comment" : "small r and s", + "flags" : [ "SmallRandS", "ArithmeticError" ], - "msg": "313233343030", - "sig": "3006020101020101", - "result": "valid" + "msg" : "313233343030", + "sig" : "3006020101020101", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04455439fcc3d2deeceddeaece60e7bd17304f36ebb602adf5a22e0b8f1db46a50aec38fb2baf221e9a8d1887c7bf6222dd1834634e77263315af6d23609d04f77", - "wx": "455439fcc3d2deeceddeaece60e7bd17304f36ebb602adf5a22e0b8f1db46a50", - "wy": "00aec38fb2baf221e9a8d1887c7bf6222dd1834634e77263315af6d23609d04f77" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04455439fcc3d2deeceddeaece60e7bd17304f36ebb602adf5a22e0b8f1db46a50aec38fb2baf221e9a8d1887c7bf6222dd1834634e77263315af6d23609d04f77", + "wx" : "455439fcc3d2deeceddeaece60e7bd17304f36ebb602adf5a22e0b8f1db46a50", + "wy" : "00aec38fb2baf221e9a8d1887c7bf6222dd1834634e77263315af6d23609d04f77" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004455439fcc3d2deeceddeaece60e7bd17304f36ebb602adf5a22e0b8f1db46a50aec38fb2baf221e9a8d1887c7bf6222dd1834634e77263315af6d23609d04f77", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAERVQ5/MPS3uzt3q7OYOe9FzBPNuu2Aq31\noi4Ljx20alCuw4+yuvIh6ajRiHx79iIt0YNGNOdyYzFa9tI2CdBPdw==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004455439fcc3d2deeceddeaece60e7bd17304f36ebb602adf5a22e0b8f1db46a50aec38fb2baf221e9a8d1887c7bf6222dd1834634e77263315af6d23609d04f77", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAERVQ5/MPS3uzt3q7OYOe9FzBPNuu2Aq31\noi4Ljx20alCuw4+yuvIh6ajRiHx79iIt0YNGNOdyYzFa9tI2CdBPdw==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 352, - "comment": "small r and s", - "flags": [ + "tcId" : 352, + "comment" : "small r and s", + "flags" : [ "SmallRandS", "ArithmeticError" ], - "msg": "313233343030", - "sig": "3006020101020102", - "result": "valid" + "msg" : "313233343030", + "sig" : "3006020101020102", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "042e1f466b024c0c3ace2437de09127fed04b706f94b19a21bb1c2acf35cece7180449ae3523d72534e964972cfd3b38af0bddd9619e5af223e4d1a40f34cf9f1d", - "wx": "2e1f466b024c0c3ace2437de09127fed04b706f94b19a21bb1c2acf35cece718", - "wy": "0449ae3523d72534e964972cfd3b38af0bddd9619e5af223e4d1a40f34cf9f1d" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "042e1f466b024c0c3ace2437de09127fed04b706f94b19a21bb1c2acf35cece7180449ae3523d72534e964972cfd3b38af0bddd9619e5af223e4d1a40f34cf9f1d", + "wx" : "2e1f466b024c0c3ace2437de09127fed04b706f94b19a21bb1c2acf35cece718", + "wy" : "0449ae3523d72534e964972cfd3b38af0bddd9619e5af223e4d1a40f34cf9f1d" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200042e1f466b024c0c3ace2437de09127fed04b706f94b19a21bb1c2acf35cece7180449ae3523d72534e964972cfd3b38af0bddd9619e5af223e4d1a40f34cf9f1d", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAELh9GawJMDDrOJDfeCRJ/7QS3BvlLGaIb\nscKs81zs5xgESa41I9clNOlklyz9OzivC93ZYZ5a8iPk0aQPNM+fHQ==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200042e1f466b024c0c3ace2437de09127fed04b706f94b19a21bb1c2acf35cece7180449ae3523d72534e964972cfd3b38af0bddd9619e5af223e4d1a40f34cf9f1d", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAELh9GawJMDDrOJDfeCRJ/7QS3BvlLGaIb\nscKs81zs5xgESa41I9clNOlklyz9OzivC93ZYZ5a8iPk0aQPNM+fHQ==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 353, - "comment": "small r and s", - "flags": [ + "tcId" : 353, + "comment" : "small r and s", + "flags" : [ "SmallRandS", "ArithmeticError" ], - "msg": "313233343030", - "sig": "3006020101020103", - "result": "valid" + "msg" : "313233343030", + "sig" : "3006020101020103", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "048e7abdbbd18de7452374c1879a1c3b01d13261e7d4571c3b47a1c76c55a2337326ed897cd517a4f5349db809780f6d2f2b9f6299d8b5a89077f1119a718fd7b3", - "wx": "008e7abdbbd18de7452374c1879a1c3b01d13261e7d4571c3b47a1c76c55a23373", - "wy": "26ed897cd517a4f5349db809780f6d2f2b9f6299d8b5a89077f1119a718fd7b3" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "048e7abdbbd18de7452374c1879a1c3b01d13261e7d4571c3b47a1c76c55a2337326ed897cd517a4f5349db809780f6d2f2b9f6299d8b5a89077f1119a718fd7b3", + "wx" : "008e7abdbbd18de7452374c1879a1c3b01d13261e7d4571c3b47a1c76c55a23373", + "wy" : "26ed897cd517a4f5349db809780f6d2f2b9f6299d8b5a89077f1119a718fd7b3" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200048e7abdbbd18de7452374c1879a1c3b01d13261e7d4571c3b47a1c76c55a2337326ed897cd517a4f5349db809780f6d2f2b9f6299d8b5a89077f1119a718fd7b3", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEjnq9u9GN50UjdMGHmhw7AdEyYefUVxw7\nR6HHbFWiM3Mm7Yl81Rek9TSduAl4D20vK59imdi1qJB38RGacY/Xsw==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200048e7abdbbd18de7452374c1879a1c3b01d13261e7d4571c3b47a1c76c55a2337326ed897cd517a4f5349db809780f6d2f2b9f6299d8b5a89077f1119a718fd7b3", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEjnq9u9GN50UjdMGHmhw7AdEyYefUVxw7\nR6HHbFWiM3Mm7Yl81Rek9TSduAl4D20vK59imdi1qJB38RGacY/Xsw==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 354, - "comment": "small r and s", - "flags": [ + "tcId" : 354, + "comment" : "small r and s", + "flags" : [ "SmallRandS", "ArithmeticError" ], - "msg": "313233343030", - "sig": "3006020102020101", - "result": "valid" + "msg" : "313233343030", + "sig" : "3006020102020101", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "047b333d4340d3d718dd3e6aff7de7bbf8b72bfd616c8420056052842376b9af1942117c5afeac755d6f376fc6329a7d76051b87123a4a5d0bc4a539380f03de7b", - "wx": "7b333d4340d3d718dd3e6aff7de7bbf8b72bfd616c8420056052842376b9af19", - "wy": "42117c5afeac755d6f376fc6329a7d76051b87123a4a5d0bc4a539380f03de7b" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "047b333d4340d3d718dd3e6aff7de7bbf8b72bfd616c8420056052842376b9af1942117c5afeac755d6f376fc6329a7d76051b87123a4a5d0bc4a539380f03de7b", + "wx" : "7b333d4340d3d718dd3e6aff7de7bbf8b72bfd616c8420056052842376b9af19", + "wy" : "42117c5afeac755d6f376fc6329a7d76051b87123a4a5d0bc4a539380f03de7b" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200047b333d4340d3d718dd3e6aff7de7bbf8b72bfd616c8420056052842376b9af1942117c5afeac755d6f376fc6329a7d76051b87123a4a5d0bc4a539380f03de7b", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEezM9Q0DT1xjdPmr/fee7+Lcr/WFshCAF\nYFKEI3a5rxlCEXxa/qx1XW83b8Yymn12BRuHEjpKXQvEpTk4DwPeew==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200047b333d4340d3d718dd3e6aff7de7bbf8b72bfd616c8420056052842376b9af1942117c5afeac755d6f376fc6329a7d76051b87123a4a5d0bc4a539380f03de7b", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEezM9Q0DT1xjdPmr/fee7+Lcr/WFshCAF\nYFKEI3a5rxlCEXxa/qx1XW83b8Yymn12BRuHEjpKXQvEpTk4DwPeew==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 355, - "comment": "small r and s", - "flags": [ + "tcId" : 355, + "comment" : "small r and s", + "flags" : [ "SmallRandS", "ArithmeticError" ], - "msg": "313233343030", - "sig": "3006020102020102", - "result": "valid" + "msg" : "313233343030", + "sig" : "3006020102020102", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04d30ca4a0ddb6616c851d30ced682c40f83c62758a1f2759988d6763a88f1c0e503a80d5415650d41239784e8e2fb1235e9fe991d112ebb81186cbf0da2de3aff", - "wx": "00d30ca4a0ddb6616c851d30ced682c40f83c62758a1f2759988d6763a88f1c0e5", - "wy": "03a80d5415650d41239784e8e2fb1235e9fe991d112ebb81186cbf0da2de3aff" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04d30ca4a0ddb6616c851d30ced682c40f83c62758a1f2759988d6763a88f1c0e503a80d5415650d41239784e8e2fb1235e9fe991d112ebb81186cbf0da2de3aff", + "wx" : "00d30ca4a0ddb6616c851d30ced682c40f83c62758a1f2759988d6763a88f1c0e5", + "wy" : "03a80d5415650d41239784e8e2fb1235e9fe991d112ebb81186cbf0da2de3aff" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004d30ca4a0ddb6616c851d30ced682c40f83c62758a1f2759988d6763a88f1c0e503a80d5415650d41239784e8e2fb1235e9fe991d112ebb81186cbf0da2de3aff", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE0wykoN22YWyFHTDO1oLED4PGJ1ih8nWZ\niNZ2OojxwOUDqA1UFWUNQSOXhOji+xI16f6ZHREuu4EYbL8Not46/w==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004d30ca4a0ddb6616c851d30ced682c40f83c62758a1f2759988d6763a88f1c0e503a80d5415650d41239784e8e2fb1235e9fe991d112ebb81186cbf0da2de3aff", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE0wykoN22YWyFHTDO1oLED4PGJ1ih8nWZ\niNZ2OojxwOUDqA1UFWUNQSOXhOji+xI16f6ZHREuu4EYbL8Not46/w==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 356, - "comment": "small r and s", - "flags": [ + "tcId" : 356, + "comment" : "small r and s", + "flags" : [ "SmallRandS", "ArithmeticError" ], - "msg": "313233343030", - "sig": "3006020102020103", - "result": "valid" + "msg" : "313233343030", + "sig" : "3006020102020103", + "result" : "valid" }, { - "tcId": 357, - "comment": "r is larger than n", - "flags": [ + "tcId" : 357, + "comment" : "r is larger than n", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364143020103", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3026022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364143020103", + "result" : "invalid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "0448969b39991297b332a652d3ee6e01e909b39904e71fa2354a7830c7750baf24b4012d1b830d199ccb1fc972b32bfded55f09cd62d257e5e844e27e57a1594ec", - "wx": "48969b39991297b332a652d3ee6e01e909b39904e71fa2354a7830c7750baf24", - "wy": "00b4012d1b830d199ccb1fc972b32bfded55f09cd62d257e5e844e27e57a1594ec" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "0448969b39991297b332a652d3ee6e01e909b39904e71fa2354a7830c7750baf24b4012d1b830d199ccb1fc972b32bfded55f09cd62d257e5e844e27e57a1594ec", + "wx" : "48969b39991297b332a652d3ee6e01e909b39904e71fa2354a7830c7750baf24", + "wy" : "00b4012d1b830d199ccb1fc972b32bfded55f09cd62d257e5e844e27e57a1594ec" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000448969b39991297b332a652d3ee6e01e909b39904e71fa2354a7830c7750baf24b4012d1b830d199ccb1fc972b32bfded55f09cd62d257e5e844e27e57a1594ec", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAESJabOZkSl7MyplLT7m4B6QmzmQTnH6I1\nSngwx3ULryS0AS0bgw0ZnMsfyXKzK/3tVfCc1i0lfl6ETiflehWU7A==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000448969b39991297b332a652d3ee6e01e909b39904e71fa2354a7830c7750baf24b4012d1b830d199ccb1fc972b32bfded55f09cd62d257e5e844e27e57a1594ec", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAESJabOZkSl7MyplLT7m4B6QmzmQTnH6I1\nSngwx3ULryS0AS0bgw0ZnMsfyXKzK/3tVfCc1i0lfl6ETiflehWU7A==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 358, - "comment": "s is larger than n", - "flags": [ + "tcId" : 358, + "comment" : "s is larger than n", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "30080201020203ed2979", - "result": "invalid" + "msg" : "313233343030", + "sig" : "30080201020203ed2979", + "result" : "invalid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "0402ef4d6d6cfd5a94f1d7784226e3e2a6c0a436c55839619f38fb4472b5f9ee777eb4acd4eebda5cd72875ffd2a2f26229c2dc6b46500919a432c86739f3ae866", - "wx": "02ef4d6d6cfd5a94f1d7784226e3e2a6c0a436c55839619f38fb4472b5f9ee77", - "wy": "7eb4acd4eebda5cd72875ffd2a2f26229c2dc6b46500919a432c86739f3ae866" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "0402ef4d6d6cfd5a94f1d7784226e3e2a6c0a436c55839619f38fb4472b5f9ee777eb4acd4eebda5cd72875ffd2a2f26229c2dc6b46500919a432c86739f3ae866", + "wx" : "02ef4d6d6cfd5a94f1d7784226e3e2a6c0a436c55839619f38fb4472b5f9ee77", + "wy" : "7eb4acd4eebda5cd72875ffd2a2f26229c2dc6b46500919a432c86739f3ae866" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000402ef4d6d6cfd5a94f1d7784226e3e2a6c0a436c55839619f38fb4472b5f9ee777eb4acd4eebda5cd72875ffd2a2f26229c2dc6b46500919a432c86739f3ae866", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEAu9NbWz9WpTx13hCJuPipsCkNsVYOWGf\nOPtEcrX57nd+tKzU7r2lzXKHX/0qLyYinC3GtGUAkZpDLIZznzroZg==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000402ef4d6d6cfd5a94f1d7784226e3e2a6c0a436c55839619f38fb4472b5f9ee777eb4acd4eebda5cd72875ffd2a2f26229c2dc6b46500919a432c86739f3ae866", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEAu9NbWz9WpTx13hCJuPipsCkNsVYOWGf\nOPtEcrX57nd+tKzU7r2lzXKHX/0qLyYinC3GtGUAkZpDLIZznzroZg==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 359, - "comment": "small r and s^-1", - "flags": [ + "tcId" : 359, + "comment" : "small r and s^-1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "30260202010102203a74e9d3a74e9d3a74e9d3a74e9d3a749f8ab3732a0a89604a09bce5b2916da4", - "result": "valid" + "msg" : "313233343030", + "sig" : "30260202010102203a74e9d3a74e9d3a74e9d3a74e9d3a749f8ab3732a0a89604a09bce5b2916da4", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04464f4ff715729cae5072ca3bd801d3195b67aec65e9b01aad20a2943dcbcb584b1afd29d31a39a11d570aa1597439b3b2d1971bf2f1abf15432d0207b10d1d08", - "wx": "464f4ff715729cae5072ca3bd801d3195b67aec65e9b01aad20a2943dcbcb584", - "wy": "00b1afd29d31a39a11d570aa1597439b3b2d1971bf2f1abf15432d0207b10d1d08" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04464f4ff715729cae5072ca3bd801d3195b67aec65e9b01aad20a2943dcbcb584b1afd29d31a39a11d570aa1597439b3b2d1971bf2f1abf15432d0207b10d1d08", + "wx" : "464f4ff715729cae5072ca3bd801d3195b67aec65e9b01aad20a2943dcbcb584", + "wy" : "00b1afd29d31a39a11d570aa1597439b3b2d1971bf2f1abf15432d0207b10d1d08" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004464f4ff715729cae5072ca3bd801d3195b67aec65e9b01aad20a2943dcbcb584b1afd29d31a39a11d570aa1597439b3b2d1971bf2f1abf15432d0207b10d1d08", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAERk9P9xVynK5Qcso72AHTGVtnrsZemwGq\n0gopQ9y8tYSxr9KdMaOaEdVwqhWXQ5s7LRlxvy8avxVDLQIHsQ0dCA==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004464f4ff715729cae5072ca3bd801d3195b67aec65e9b01aad20a2943dcbcb584b1afd29d31a39a11d570aa1597439b3b2d1971bf2f1abf15432d0207b10d1d08", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAERk9P9xVynK5Qcso72AHTGVtnrsZemwGq\n0gopQ9y8tYSxr9KdMaOaEdVwqhWXQ5s7LRlxvy8avxVDLQIHsQ0dCA==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 360, - "comment": "smallish r and s^-1", - "flags": [ + "tcId" : 360, + "comment" : "smallish r and s^-1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "302b02072d9b4d347952cc02200343aefc2f25d98b882e86eb9e30d55a6eb508b516510b34024ae4b6362330b3", - "result": "valid" + "msg" : "313233343030", + "sig" : "302b02072d9b4d347952cc02200343aefc2f25d98b882e86eb9e30d55a6eb508b516510b34024ae4b6362330b3", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04157f8fddf373eb5f49cfcf10d8b853cf91cbcd7d665c3522ba7dd738ddb79a4cdeadf1a5c448ea3c9f4191a8999abfcc757ac6d64567ef072c47fec613443b8f", - "wx": "157f8fddf373eb5f49cfcf10d8b853cf91cbcd7d665c3522ba7dd738ddb79a4c", - "wy": "00deadf1a5c448ea3c9f4191a8999abfcc757ac6d64567ef072c47fec613443b8f" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04157f8fddf373eb5f49cfcf10d8b853cf91cbcd7d665c3522ba7dd738ddb79a4cdeadf1a5c448ea3c9f4191a8999abfcc757ac6d64567ef072c47fec613443b8f", + "wx" : "157f8fddf373eb5f49cfcf10d8b853cf91cbcd7d665c3522ba7dd738ddb79a4c", + "wy" : "00deadf1a5c448ea3c9f4191a8999abfcc757ac6d64567ef072c47fec613443b8f" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004157f8fddf373eb5f49cfcf10d8b853cf91cbcd7d665c3522ba7dd738ddb79a4cdeadf1a5c448ea3c9f4191a8999abfcc757ac6d64567ef072c47fec613443b8f", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEFX+P3fNz619Jz88Q2LhTz5HLzX1mXDUi\nun3XON23mkzerfGlxEjqPJ9BkaiZmr/MdXrG1kVn7wcsR/7GE0Q7jw==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004157f8fddf373eb5f49cfcf10d8b853cf91cbcd7d665c3522ba7dd738ddb79a4cdeadf1a5c448ea3c9f4191a8999abfcc757ac6d64567ef072c47fec613443b8f", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEFX+P3fNz619Jz88Q2LhTz5HLzX1mXDUi\nun3XON23mkzerfGlxEjqPJ9BkaiZmr/MdXrG1kVn7wcsR/7GE0Q7jw==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 361, - "comment": "100-bit r and small s^-1", - "flags": [ + "tcId" : 361, + "comment" : "100-bit r and small s^-1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "3031020d1033e67e37b32b445580bf4efc02206f906f906f906f906f906f906f906f8fe1cab5eefdb214061dce3b22789f1d6f", - "result": "valid" + "msg" : "313233343030", + "sig" : "3031020d1033e67e37b32b445580bf4efc02206f906f906f906f906f906f906f906f8fe1cab5eefdb214061dce3b22789f1d6f", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "040934a537466c07430e2c48feb990bb19fb78cecc9cee424ea4d130291aa237f0d4f92d23b462804b5b68c52558c01c9996dbf727fccabbeedb9621a400535afa", - "wx": "0934a537466c07430e2c48feb990bb19fb78cecc9cee424ea4d130291aa237f0", - "wy": "00d4f92d23b462804b5b68c52558c01c9996dbf727fccabbeedb9621a400535afa" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "040934a537466c07430e2c48feb990bb19fb78cecc9cee424ea4d130291aa237f0d4f92d23b462804b5b68c52558c01c9996dbf727fccabbeedb9621a400535afa", + "wx" : "0934a537466c07430e2c48feb990bb19fb78cecc9cee424ea4d130291aa237f0", + "wy" : "00d4f92d23b462804b5b68c52558c01c9996dbf727fccabbeedb9621a400535afa" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200040934a537466c07430e2c48feb990bb19fb78cecc9cee424ea4d130291aa237f0d4f92d23b462804b5b68c52558c01c9996dbf727fccabbeedb9621a400535afa", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAECTSlN0ZsB0MOLEj+uZC7Gft4zsyc7kJO\npNEwKRqiN/DU+S0jtGKAS1toxSVYwByZltv3J/zKu+7bliGkAFNa+g==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200040934a537466c07430e2c48feb990bb19fb78cecc9cee424ea4d130291aa237f0d4f92d23b462804b5b68c52558c01c9996dbf727fccabbeedb9621a400535afa", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAECTSlN0ZsB0MOLEj+uZC7Gft4zsyc7kJO\npNEwKRqiN/DU+S0jtGKAS1toxSVYwByZltv3J/zKu+7bliGkAFNa+g==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 362, - "comment": "small r and 100 bit s^-1", - "flags": [ + "tcId" : 362, + "comment" : "small r and 100 bit s^-1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "3026020201010220783266e90f43dafe5cd9b3b0be86de22f9de83677d0f50713a468ec72fcf5d57", - "result": "valid" + "msg" : "313233343030", + "sig" : "3026020201010220783266e90f43dafe5cd9b3b0be86de22f9de83677d0f50713a468ec72fcf5d57", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04d6ef20be66c893f741a9bf90d9b74675d1c2a31296397acb3ef174fd0b300c654a0c95478ca00399162d7f0f2dc89efdc2b28a30fbabe285857295a4b0c4e265", - "wx": "00d6ef20be66c893f741a9bf90d9b74675d1c2a31296397acb3ef174fd0b300c65", - "wy": "4a0c95478ca00399162d7f0f2dc89efdc2b28a30fbabe285857295a4b0c4e265" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04d6ef20be66c893f741a9bf90d9b74675d1c2a31296397acb3ef174fd0b300c654a0c95478ca00399162d7f0f2dc89efdc2b28a30fbabe285857295a4b0c4e265", + "wx" : "00d6ef20be66c893f741a9bf90d9b74675d1c2a31296397acb3ef174fd0b300c65", + "wy" : "4a0c95478ca00399162d7f0f2dc89efdc2b28a30fbabe285857295a4b0c4e265" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004d6ef20be66c893f741a9bf90d9b74675d1c2a31296397acb3ef174fd0b300c654a0c95478ca00399162d7f0f2dc89efdc2b28a30fbabe285857295a4b0c4e265", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE1u8gvmbIk/dBqb+Q2bdGddHCoxKWOXrL\nPvF0/QswDGVKDJVHjKADmRYtfw8tyJ79wrKKMPur4oWFcpWksMTiZQ==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004d6ef20be66c893f741a9bf90d9b74675d1c2a31296397acb3ef174fd0b300c654a0c95478ca00399162d7f0f2dc89efdc2b28a30fbabe285857295a4b0c4e265", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE1u8gvmbIk/dBqb+Q2bdGddHCoxKWOXrL\nPvF0/QswDGVKDJVHjKADmRYtfw8tyJ79wrKKMPur4oWFcpWksMTiZQ==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 363, - "comment": "100-bit r and s^-1", - "flags": [ + "tcId" : 363, + "comment" : "100-bit r and s^-1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "3031020d062522bbd3ecbe7c39e93e7c260220783266e90f43dafe5cd9b3b0be86de22f9de83677d0f50713a468ec72fcf5d57", - "result": "valid" + "msg" : "313233343030", + "sig" : "3031020d062522bbd3ecbe7c39e93e7c260220783266e90f43dafe5cd9b3b0be86de22f9de83677d0f50713a468ec72fcf5d57", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04b7291d1404e0c0c07dab9372189f4bd58d2ceaa8d15ede544d9514545ba9ee0629c9a63d5e308769cc30ec276a410e6464a27eeafd9e599db10f053a4fe4a829", - "wx": "00b7291d1404e0c0c07dab9372189f4bd58d2ceaa8d15ede544d9514545ba9ee06", - "wy": "29c9a63d5e308769cc30ec276a410e6464a27eeafd9e599db10f053a4fe4a829" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04b7291d1404e0c0c07dab9372189f4bd58d2ceaa8d15ede544d9514545ba9ee0629c9a63d5e308769cc30ec276a410e6464a27eeafd9e599db10f053a4fe4a829", + "wx" : "00b7291d1404e0c0c07dab9372189f4bd58d2ceaa8d15ede544d9514545ba9ee06", + "wy" : "29c9a63d5e308769cc30ec276a410e6464a27eeafd9e599db10f053a4fe4a829" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004b7291d1404e0c0c07dab9372189f4bd58d2ceaa8d15ede544d9514545ba9ee0629c9a63d5e308769cc30ec276a410e6464a27eeafd9e599db10f053a4fe4a829", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEtykdFATgwMB9q5NyGJ9L1Y0s6qjRXt5U\nTZUUVFup7gYpyaY9XjCHacww7CdqQQ5kZKJ+6v2eWZ2xDwU6T+SoKQ==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004b7291d1404e0c0c07dab9372189f4bd58d2ceaa8d15ede544d9514545ba9ee0629c9a63d5e308769cc30ec276a410e6464a27eeafd9e599db10f053a4fe4a829", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEtykdFATgwMB9q5NyGJ9L1Y0s6qjRXt5U\nTZUUVFup7gYpyaY9XjCHacww7CdqQQ5kZKJ+6v2eWZ2xDwU6T+SoKQ==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 364, - "comment": "r and s^-1 are close to n", - "flags": [ + "tcId" : 364, + "comment" : "r and s^-1 are close to n", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "3045022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03640c1022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c0", - "result": "valid" + "msg" : "313233343030", + "sig" : "3045022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03640c1022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c0", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "046e28303305d642ccb923b722ea86b2a0bc8e3735ecb26e849b19c9f76b2fdbb8186e80d64d8cab164f5238f5318461bf89d4d96ee6544c816c7566947774e0f6", - "wx": "6e28303305d642ccb923b722ea86b2a0bc8e3735ecb26e849b19c9f76b2fdbb8", - "wy": "186e80d64d8cab164f5238f5318461bf89d4d96ee6544c816c7566947774e0f6" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "046e28303305d642ccb923b722ea86b2a0bc8e3735ecb26e849b19c9f76b2fdbb8186e80d64d8cab164f5238f5318461bf89d4d96ee6544c816c7566947774e0f6", + "wx" : "6e28303305d642ccb923b722ea86b2a0bc8e3735ecb26e849b19c9f76b2fdbb8", + "wy" : "186e80d64d8cab164f5238f5318461bf89d4d96ee6544c816c7566947774e0f6" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200046e28303305d642ccb923b722ea86b2a0bc8e3735ecb26e849b19c9f76b2fdbb8186e80d64d8cab164f5238f5318461bf89d4d96ee6544c816c7566947774e0f6", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEbigwMwXWQsy5I7ci6oayoLyONzXssm6E\nmxnJ92sv27gYboDWTYyrFk9SOPUxhGG/idTZbuZUTIFsdWaUd3Tg9g==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200046e28303305d642ccb923b722ea86b2a0bc8e3735ecb26e849b19c9f76b2fdbb8186e80d64d8cab164f5238f5318461bf89d4d96ee6544c816c7566947774e0f6", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEbigwMwXWQsy5I7ci6oayoLyONzXssm6E\nmxnJ92sv27gYboDWTYyrFk9SOPUxhGG/idTZbuZUTIFsdWaUd3Tg9g==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 365, - "comment": "r and s are 64-bit integer", - "flags": [ + "tcId" : 365, + "comment" : "r and s are 64-bit integer", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "30160209009c44febf31c3594d020900839ed28247c2b06b", - "result": "valid" + "msg" : "313233343030", + "sig" : "30160209009c44febf31c3594d020900839ed28247c2b06b", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04375bda93f6af92fb5f8f4b1b5f0534e3bafab34cb7ad9fb9d0b722e4a5c302a9a00b9f387a5a396097aa2162fc5bbcf4a5263372f681c94da51e9799120990fd", - "wx": "375bda93f6af92fb5f8f4b1b5f0534e3bafab34cb7ad9fb9d0b722e4a5c302a9", - "wy": "00a00b9f387a5a396097aa2162fc5bbcf4a5263372f681c94da51e9799120990fd" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04375bda93f6af92fb5f8f4b1b5f0534e3bafab34cb7ad9fb9d0b722e4a5c302a9a00b9f387a5a396097aa2162fc5bbcf4a5263372f681c94da51e9799120990fd", + "wx" : "375bda93f6af92fb5f8f4b1b5f0534e3bafab34cb7ad9fb9d0b722e4a5c302a9", + "wy" : "00a00b9f387a5a396097aa2162fc5bbcf4a5263372f681c94da51e9799120990fd" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004375bda93f6af92fb5f8f4b1b5f0534e3bafab34cb7ad9fb9d0b722e4a5c302a9a00b9f387a5a396097aa2162fc5bbcf4a5263372f681c94da51e9799120990fd", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEN1vak/avkvtfj0sbXwU047r6s0y3rZ+5\n0Lci5KXDAqmgC584elo5YJeqIWL8W7z0pSYzcvaByU2lHpeZEgmQ/Q==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004375bda93f6af92fb5f8f4b1b5f0534e3bafab34cb7ad9fb9d0b722e4a5c302a9a00b9f387a5a396097aa2162fc5bbcf4a5263372f681c94da51e9799120990fd", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEN1vak/avkvtfj0sbXwU047r6s0y3rZ+5\n0Lci5KXDAqmgC584elo5YJeqIWL8W7z0pSYzcvaByU2lHpeZEgmQ/Q==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 366, - "comment": "r and s are 100-bit integer", - "flags": [ + "tcId" : 366, + "comment" : "r and s are 100-bit integer", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "301e020d09df8b682430beef6f5fd7c7cf020d0fd0a62e13778f4222a0d61c8a", - "result": "valid" + "msg" : "313233343030", + "sig" : "301e020d09df8b682430beef6f5fd7c7cf020d0fd0a62e13778f4222a0d61c8a", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04d75b68216babe03ae257e94b4e3bf1c52f44e3df266d1524ff8c5ea69da73197da4bff9ed1c53f44917a67d7b978598e89df359e3d5913eaea24f3ae259abc44", - "wx": "00d75b68216babe03ae257e94b4e3bf1c52f44e3df266d1524ff8c5ea69da73197", - "wy": "00da4bff9ed1c53f44917a67d7b978598e89df359e3d5913eaea24f3ae259abc44" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04d75b68216babe03ae257e94b4e3bf1c52f44e3df266d1524ff8c5ea69da73197da4bff9ed1c53f44917a67d7b978598e89df359e3d5913eaea24f3ae259abc44", + "wx" : "00d75b68216babe03ae257e94b4e3bf1c52f44e3df266d1524ff8c5ea69da73197", + "wy" : "00da4bff9ed1c53f44917a67d7b978598e89df359e3d5913eaea24f3ae259abc44" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004d75b68216babe03ae257e94b4e3bf1c52f44e3df266d1524ff8c5ea69da73197da4bff9ed1c53f44917a67d7b978598e89df359e3d5913eaea24f3ae259abc44", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE11toIWur4DriV+lLTjvxxS9E498mbRUk\n/4xepp2nMZfaS/+e0cU/RJF6Z9e5eFmOid81nj1ZE+rqJPOuJZq8RA==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004d75b68216babe03ae257e94b4e3bf1c52f44e3df266d1524ff8c5ea69da73197da4bff9ed1c53f44917a67d7b978598e89df359e3d5913eaea24f3ae259abc44", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE11toIWur4DriV+lLTjvxxS9E498mbRUk\n/4xepp2nMZfaS/+e0cU/RJF6Z9e5eFmOid81nj1ZE+rqJPOuJZq8RA==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 367, - "comment": "r and s are 128-bit integer", - "flags": [ + "tcId" : 367, + "comment" : "r and s are 128-bit integer", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "30260211008a598e563a89f526c32ebec8de26367a02110084f633e2042630e99dd0f1e16f7a04bf", - "result": "valid" + "msg" : "313233343030", + "sig" : "30260211008a598e563a89f526c32ebec8de26367a02110084f633e2042630e99dd0f1e16f7a04bf", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "0478bcda140aed23d430cb23c3dc0d01f423db134ee94a3a8cb483f2deac2ac653118114f6f33045d4e9ed9107085007bfbddf8f58fe7a1a2445d66a990045476e", - "wx": "78bcda140aed23d430cb23c3dc0d01f423db134ee94a3a8cb483f2deac2ac653", - "wy": "118114f6f33045d4e9ed9107085007bfbddf8f58fe7a1a2445d66a990045476e" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "0478bcda140aed23d430cb23c3dc0d01f423db134ee94a3a8cb483f2deac2ac653118114f6f33045d4e9ed9107085007bfbddf8f58fe7a1a2445d66a990045476e", + "wx" : "78bcda140aed23d430cb23c3dc0d01f423db134ee94a3a8cb483f2deac2ac653", + "wy" : "118114f6f33045d4e9ed9107085007bfbddf8f58fe7a1a2445d66a990045476e" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000478bcda140aed23d430cb23c3dc0d01f423db134ee94a3a8cb483f2deac2ac653118114f6f33045d4e9ed9107085007bfbddf8f58fe7a1a2445d66a990045476e", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEeLzaFArtI9QwyyPD3A0B9CPbE07pSjqM\ntIPy3qwqxlMRgRT28zBF1OntkQcIUAe/vd+PWP56GiRF1mqZAEVHbg==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000478bcda140aed23d430cb23c3dc0d01f423db134ee94a3a8cb483f2deac2ac653118114f6f33045d4e9ed9107085007bfbddf8f58fe7a1a2445d66a990045476e", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEeLzaFArtI9QwyyPD3A0B9CPbE07pSjqM\ntIPy3qwqxlMRgRT28zBF1OntkQcIUAe/vd+PWP56GiRF1mqZAEVHbg==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 368, - "comment": "r and s are 160-bit integer", - "flags": [ + "tcId" : 368, + "comment" : "r and s are 160-bit integer", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "302e021500aa6eeb5823f7fa31b466bb473797f0d0314c0bdf021500e2977c479e6d25703cebbc6bd561938cc9d1bfb9", - "result": "valid" + "msg" : "313233343030", + "sig" : "302e021500aa6eeb5823f7fa31b466bb473797f0d0314c0bdf021500e2977c479e6d25703cebbc6bd561938cc9d1bfb9", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04bb79f61857f743bfa1b6e7111ce4094377256969e4e15159123d9548acc3be6c1f9d9f8860dcffd3eb36dd6c31ff2e7226c2009c4c94d8d7d2b5686bf7abd677", - "wx": "00bb79f61857f743bfa1b6e7111ce4094377256969e4e15159123d9548acc3be6c", - "wy": "1f9d9f8860dcffd3eb36dd6c31ff2e7226c2009c4c94d8d7d2b5686bf7abd677" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04bb79f61857f743bfa1b6e7111ce4094377256969e4e15159123d9548acc3be6c1f9d9f8860dcffd3eb36dd6c31ff2e7226c2009c4c94d8d7d2b5686bf7abd677", + "wx" : "00bb79f61857f743bfa1b6e7111ce4094377256969e4e15159123d9548acc3be6c", + "wy" : "1f9d9f8860dcffd3eb36dd6c31ff2e7226c2009c4c94d8d7d2b5686bf7abd677" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004bb79f61857f743bfa1b6e7111ce4094377256969e4e15159123d9548acc3be6c1f9d9f8860dcffd3eb36dd6c31ff2e7226c2009c4c94d8d7d2b5686bf7abd677", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEu3n2GFf3Q7+htucRHOQJQ3claWnk4VFZ\nEj2VSKzDvmwfnZ+IYNz/0+s23Wwx/y5yJsIAnEyU2NfStWhr96vWdw==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004bb79f61857f743bfa1b6e7111ce4094377256969e4e15159123d9548acc3be6c1f9d9f8860dcffd3eb36dd6c31ff2e7226c2009c4c94d8d7d2b5686bf7abd677", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEu3n2GFf3Q7+htucRHOQJQ3claWnk4VFZ\nEj2VSKzDvmwfnZ+IYNz/0+s23Wwx/y5yJsIAnEyU2NfStWhr96vWdw==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 369, - "comment": "s == 1", - "flags": [ + "tcId" : 369, + "comment" : "s == 1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "3025022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c1020101", - "result": "valid" + "msg" : "313233343030", + "sig" : "3025022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c1020101", + "result" : "valid" }, { - "tcId": 370, - "comment": "s == 0", - "flags": [ + "tcId" : 370, + "comment" : "s == 0", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "3025022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c1020100", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3025022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c1020100", + "result" : "invalid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "0493591827d9e6713b4e9faea62c72b28dfefa68e0c05160b5d6aae88fd2e36c36073f5545ad5af410af26afff68654cf72d45e493489311203247347a890f4518", - "wx": "0093591827d9e6713b4e9faea62c72b28dfefa68e0c05160b5d6aae88fd2e36c36", - "wy": "073f5545ad5af410af26afff68654cf72d45e493489311203247347a890f4518" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "0493591827d9e6713b4e9faea62c72b28dfefa68e0c05160b5d6aae88fd2e36c36073f5545ad5af410af26afff68654cf72d45e493489311203247347a890f4518", + "wx" : "0093591827d9e6713b4e9faea62c72b28dfefa68e0c05160b5d6aae88fd2e36c36", + "wy" : "073f5545ad5af410af26afff68654cf72d45e493489311203247347a890f4518" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000493591827d9e6713b4e9faea62c72b28dfefa68e0c05160b5d6aae88fd2e36c36073f5545ad5af410af26afff68654cf72d45e493489311203247347a890f4518", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEk1kYJ9nmcTtOn66mLHKyjf76aODAUWC1\n1qroj9LjbDYHP1VFrVr0EK8mr/9oZUz3LUXkk0iTESAyRzR6iQ9FGA==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000493591827d9e6713b4e9faea62c72b28dfefa68e0c05160b5d6aae88fd2e36c36073f5545ad5af410af26afff68654cf72d45e493489311203247347a890f4518", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEk1kYJ9nmcTtOn66mLHKyjf76aODAUWC1\n1qroj9LjbDYHP1VFrVr0EK8mr/9oZUz3LUXkk0iTESAyRzR6iQ9FGA==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 371, - "comment": "edge case modular inverse", - "flags": [ + "tcId" : 371, + "comment" : "edge case modular inverse", + "flags" : [ "ModularInverse", "ArithmeticError" ], - "msg": "313233343030", - "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c10220419d981c515af8cc82545aac0c85e9e308fbb2eab6acd7ed497e0b4145a18fd9", - "result": "valid" + "msg" : "313233343030", + "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c10220419d981c515af8cc82545aac0c85e9e308fbb2eab6acd7ed497e0b4145a18fd9", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "0431ed3081aefe001eb6402069ee2ccc1862937b85995144dba9503943587bf0dada01b8cc4df34f5ab3b1a359615208946e5ee35f98ee775b8ccecd86ccc1650f", - "wx": "31ed3081aefe001eb6402069ee2ccc1862937b85995144dba9503943587bf0da", - "wy": "00da01b8cc4df34f5ab3b1a359615208946e5ee35f98ee775b8ccecd86ccc1650f" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "0431ed3081aefe001eb6402069ee2ccc1862937b85995144dba9503943587bf0dada01b8cc4df34f5ab3b1a359615208946e5ee35f98ee775b8ccecd86ccc1650f", + "wx" : "31ed3081aefe001eb6402069ee2ccc1862937b85995144dba9503943587bf0da", + "wy" : "00da01b8cc4df34f5ab3b1a359615208946e5ee35f98ee775b8ccecd86ccc1650f" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000431ed3081aefe001eb6402069ee2ccc1862937b85995144dba9503943587bf0dada01b8cc4df34f5ab3b1a359615208946e5ee35f98ee775b8ccecd86ccc1650f", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEMe0wga7+AB62QCBp7izMGGKTe4WZUUTb\nqVA5Q1h78NraAbjMTfNPWrOxo1lhUgiUbl7jX5jud1uMzs2GzMFlDw==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000431ed3081aefe001eb6402069ee2ccc1862937b85995144dba9503943587bf0dada01b8cc4df34f5ab3b1a359615208946e5ee35f98ee775b8ccecd86ccc1650f", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEMe0wga7+AB62QCBp7izMGGKTe4WZUUTb\nqVA5Q1h78NraAbjMTfNPWrOxo1lhUgiUbl7jX5jud1uMzs2GzMFlDw==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 372, - "comment": "edge case modular inverse", - "flags": [ + "tcId" : 372, + "comment" : "edge case modular inverse", + "flags" : [ "ModularInverse", "ArithmeticError" ], - "msg": "313233343030", - "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102201b21717ad71d23bbac60a9ad0baf75b063c9fdf52a00ebf99d022172910993c9", - "result": "valid" + "msg" : "313233343030", + "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102201b21717ad71d23bbac60a9ad0baf75b063c9fdf52a00ebf99d022172910993c9", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "047dff66fa98509ff3e2e51045f4390523dccda43a3bc2885e58c248090990eea854c76c2b9adeb6bb571823e07fd7c65c8639cf9d905260064c8e7675ce6d98b4", - "wx": "7dff66fa98509ff3e2e51045f4390523dccda43a3bc2885e58c248090990eea8", - "wy": "54c76c2b9adeb6bb571823e07fd7c65c8639cf9d905260064c8e7675ce6d98b4" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "047dff66fa98509ff3e2e51045f4390523dccda43a3bc2885e58c248090990eea854c76c2b9adeb6bb571823e07fd7c65c8639cf9d905260064c8e7675ce6d98b4", + "wx" : "7dff66fa98509ff3e2e51045f4390523dccda43a3bc2885e58c248090990eea8", + "wy" : "54c76c2b9adeb6bb571823e07fd7c65c8639cf9d905260064c8e7675ce6d98b4" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200047dff66fa98509ff3e2e51045f4390523dccda43a3bc2885e58c248090990eea854c76c2b9adeb6bb571823e07fd7c65c8639cf9d905260064c8e7675ce6d98b4", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEff9m+phQn/Pi5RBF9DkFI9zNpDo7wohe\nWMJICQmQ7qhUx2wrmt62u1cYI+B/18ZchjnPnZBSYAZMjnZ1zm2YtA==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200047dff66fa98509ff3e2e51045f4390523dccda43a3bc2885e58c248090990eea854c76c2b9adeb6bb571823e07fd7c65c8639cf9d905260064c8e7675ce6d98b4", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEff9m+phQn/Pi5RBF9DkFI9zNpDo7wohe\nWMJICQmQ7qhUx2wrmt62u1cYI+B/18ZchjnPnZBSYAZMjnZ1zm2YtA==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 373, - "comment": "edge case modular inverse", - "flags": [ + "tcId" : 373, + "comment" : "edge case modular inverse", + "flags" : [ "ModularInverse", "ArithmeticError" ], - "msg": "313233343030", - "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102202f588f66018f3dd14db3e28e77996487e32486b521ed8e5a20f06591951777e9", - "result": "valid" + "msg" : "313233343030", + "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102202f588f66018f3dd14db3e28e77996487e32486b521ed8e5a20f06591951777e9", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "044280509aab64edfc0b4a2967e4cbce849cb544e4a77313c8e6ece579fbd7420a2e89fe5cc1927d554e6a3bb14033ea7c922cd75cba2c7415fdab52f20b1860f1", - "wx": "4280509aab64edfc0b4a2967e4cbce849cb544e4a77313c8e6ece579fbd7420a", - "wy": "2e89fe5cc1927d554e6a3bb14033ea7c922cd75cba2c7415fdab52f20b1860f1" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "044280509aab64edfc0b4a2967e4cbce849cb544e4a77313c8e6ece579fbd7420a2e89fe5cc1927d554e6a3bb14033ea7c922cd75cba2c7415fdab52f20b1860f1", + "wx" : "4280509aab64edfc0b4a2967e4cbce849cb544e4a77313c8e6ece579fbd7420a", + "wy" : "2e89fe5cc1927d554e6a3bb14033ea7c922cd75cba2c7415fdab52f20b1860f1" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200044280509aab64edfc0b4a2967e4cbce849cb544e4a77313c8e6ece579fbd7420a2e89fe5cc1927d554e6a3bb14033ea7c922cd75cba2c7415fdab52f20b1860f1", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEQoBQmqtk7fwLSiln5MvOhJy1ROSncxPI\n5uzlefvXQgouif5cwZJ9VU5qO7FAM+p8kizXXLosdBX9q1LyCxhg8Q==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200044280509aab64edfc0b4a2967e4cbce849cb544e4a77313c8e6ece579fbd7420a2e89fe5cc1927d554e6a3bb14033ea7c922cd75cba2c7415fdab52f20b1860f1", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEQoBQmqtk7fwLSiln5MvOhJy1ROSncxPI\n5uzlefvXQgouif5cwZJ9VU5qO7FAM+p8kizXXLosdBX9q1LyCxhg8Q==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 374, - "comment": "edge case modular inverse", - "flags": [ + "tcId" : 374, + "comment" : "edge case modular inverse", + "flags" : [ "ModularInverse", "ArithmeticError" ], - "msg": "313233343030", - "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c10220091a08870ff4daf9123b30c20e8c4fc8505758dcf4074fcaff2170c9bfcf74f4", - "result": "valid" + "msg" : "313233343030", + "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c10220091a08870ff4daf9123b30c20e8c4fc8505758dcf4074fcaff2170c9bfcf74f4", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "044f8df145194e3c4fc3eea26d43ce75b402d6b17472ddcbb254b8a79b0bf3d9cb2aa20d82844cb266344e71ca78f2ad27a75a09e5bc0fa57e4efd9d465a0888db", - "wx": "4f8df145194e3c4fc3eea26d43ce75b402d6b17472ddcbb254b8a79b0bf3d9cb", - "wy": "2aa20d82844cb266344e71ca78f2ad27a75a09e5bc0fa57e4efd9d465a0888db" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "044f8df145194e3c4fc3eea26d43ce75b402d6b17472ddcbb254b8a79b0bf3d9cb2aa20d82844cb266344e71ca78f2ad27a75a09e5bc0fa57e4efd9d465a0888db", + "wx" : "4f8df145194e3c4fc3eea26d43ce75b402d6b17472ddcbb254b8a79b0bf3d9cb", + "wy" : "2aa20d82844cb266344e71ca78f2ad27a75a09e5bc0fa57e4efd9d465a0888db" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200044f8df145194e3c4fc3eea26d43ce75b402d6b17472ddcbb254b8a79b0bf3d9cb2aa20d82844cb266344e71ca78f2ad27a75a09e5bc0fa57e4efd9d465a0888db", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAET43xRRlOPE/D7qJtQ851tALWsXRy3cuy\nVLinmwvz2csqog2ChEyyZjROccp48q0np1oJ5bwPpX5O/Z1GWgiI2w==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200044f8df145194e3c4fc3eea26d43ce75b402d6b17472ddcbb254b8a79b0bf3d9cb2aa20d82844cb266344e71ca78f2ad27a75a09e5bc0fa57e4efd9d465a0888db", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAET43xRRlOPE/D7qJtQ851tALWsXRy3cuy\nVLinmwvz2csqog2ChEyyZjROccp48q0np1oJ5bwPpX5O/Z1GWgiI2w==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 375, - "comment": "edge case modular inverse", - "flags": [ + "tcId" : 375, + "comment" : "edge case modular inverse", + "flags" : [ "ModularInverse", "ArithmeticError" ], - "msg": "313233343030", - "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102207c370dc0ce8c59a8b273cba44a7c1191fc3186dc03cab96b0567312df0d0b250", - "result": "valid" + "msg" : "313233343030", + "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102207c370dc0ce8c59a8b273cba44a7c1191fc3186dc03cab96b0567312df0d0b250", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "049598a57dd67ec3e16b587a338aa3a10a3a3913b41a3af32e3ed3ff01358c6b14122819edf8074bbc521f7d4cdce82fef7a516706affba1d93d9dea9ccae1a207", - "wx": "009598a57dd67ec3e16b587a338aa3a10a3a3913b41a3af32e3ed3ff01358c6b14", - "wy": "122819edf8074bbc521f7d4cdce82fef7a516706affba1d93d9dea9ccae1a207" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "049598a57dd67ec3e16b587a338aa3a10a3a3913b41a3af32e3ed3ff01358c6b14122819edf8074bbc521f7d4cdce82fef7a516706affba1d93d9dea9ccae1a207", + "wx" : "009598a57dd67ec3e16b587a338aa3a10a3a3913b41a3af32e3ed3ff01358c6b14", + "wy" : "122819edf8074bbc521f7d4cdce82fef7a516706affba1d93d9dea9ccae1a207" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200049598a57dd67ec3e16b587a338aa3a10a3a3913b41a3af32e3ed3ff01358c6b14122819edf8074bbc521f7d4cdce82fef7a516706affba1d93d9dea9ccae1a207", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAElZilfdZ+w+FrWHoziqOhCjo5E7QaOvMu\nPtP/ATWMaxQSKBnt+AdLvFIffUzc6C/velFnBq/7odk9neqcyuGiBw==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200049598a57dd67ec3e16b587a338aa3a10a3a3913b41a3af32e3ed3ff01358c6b14122819edf8074bbc521f7d4cdce82fef7a516706affba1d93d9dea9ccae1a207", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAElZilfdZ+w+FrWHoziqOhCjo5E7QaOvMu\nPtP/ATWMaxQSKBnt+AdLvFIffUzc6C/velFnBq/7odk9neqcyuGiBw==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 376, - "comment": "edge case modular inverse", - "flags": [ + "tcId" : 376, + "comment" : "edge case modular inverse", + "flags" : [ "ModularInverse", "ArithmeticError" ], - "msg": "313233343030", - "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c1022070b59a7d1ee77a2f9e0491c2a7cfcd0ed04df4a35192f6132dcc668c79a6160e", - "result": "valid" + "msg" : "313233343030", + "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c1022070b59a7d1ee77a2f9e0491c2a7cfcd0ed04df4a35192f6132dcc668c79a6160e", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "049171fec3ca20806bc084f12f0760911b60990bd80e5b2a71ca03a048b20f837e634fd17863761b2958d2be4e149f8d3d7abbdc18be03f451ab6c17fa0a1f8330", - "wx": "009171fec3ca20806bc084f12f0760911b60990bd80e5b2a71ca03a048b20f837e", - "wy": "634fd17863761b2958d2be4e149f8d3d7abbdc18be03f451ab6c17fa0a1f8330" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "049171fec3ca20806bc084f12f0760911b60990bd80e5b2a71ca03a048b20f837e634fd17863761b2958d2be4e149f8d3d7abbdc18be03f451ab6c17fa0a1f8330", + "wx" : "009171fec3ca20806bc084f12f0760911b60990bd80e5b2a71ca03a048b20f837e", + "wy" : "634fd17863761b2958d2be4e149f8d3d7abbdc18be03f451ab6c17fa0a1f8330" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200049171fec3ca20806bc084f12f0760911b60990bd80e5b2a71ca03a048b20f837e634fd17863761b2958d2be4e149f8d3d7abbdc18be03f451ab6c17fa0a1f8330", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEkXH+w8oggGvAhPEvB2CRG2CZC9gOWypx\nygOgSLIPg35jT9F4Y3YbKVjSvk4Un409ervcGL4D9FGrbBf6Ch+DMA==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200049171fec3ca20806bc084f12f0760911b60990bd80e5b2a71ca03a048b20f837e634fd17863761b2958d2be4e149f8d3d7abbdc18be03f451ab6c17fa0a1f8330", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEkXH+w8oggGvAhPEvB2CRG2CZC9gOWypx\nygOgSLIPg35jT9F4Y3YbKVjSvk4Un409ervcGL4D9FGrbBf6Ch+DMA==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 377, - "comment": "edge case modular inverse", - "flags": [ + "tcId" : 377, + "comment" : "edge case modular inverse", + "flags" : [ "ModularInverse", "ArithmeticError" ], - "msg": "313233343030", - "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102202736d76e412246e097148e2bf62915614eb7c428913a58eb5e9cd4674a9423de", - "result": "valid" + "msg" : "313233343030", + "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102202736d76e412246e097148e2bf62915614eb7c428913a58eb5e9cd4674a9423de", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04777c8930b6e1d271100fe68ce93f163fa37612c5fff67f4a62fc3bafaf3d17a9ed73d86f60a51b5ed91353a3b054edc0aa92c9ebcbd0b75d188fdc882791d68d", - "wx": "777c8930b6e1d271100fe68ce93f163fa37612c5fff67f4a62fc3bafaf3d17a9", - "wy": "00ed73d86f60a51b5ed91353a3b054edc0aa92c9ebcbd0b75d188fdc882791d68d" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04777c8930b6e1d271100fe68ce93f163fa37612c5fff67f4a62fc3bafaf3d17a9ed73d86f60a51b5ed91353a3b054edc0aa92c9ebcbd0b75d188fdc882791d68d", + "wx" : "777c8930b6e1d271100fe68ce93f163fa37612c5fff67f4a62fc3bafaf3d17a9", + "wy" : "00ed73d86f60a51b5ed91353a3b054edc0aa92c9ebcbd0b75d188fdc882791d68d" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004777c8930b6e1d271100fe68ce93f163fa37612c5fff67f4a62fc3bafaf3d17a9ed73d86f60a51b5ed91353a3b054edc0aa92c9ebcbd0b75d188fdc882791d68d", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEd3yJMLbh0nEQD+aM6T8WP6N2EsX/9n9K\nYvw7r689F6ntc9hvYKUbXtkTU6OwVO3AqpLJ68vQt10Yj9yIJ5HWjQ==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004777c8930b6e1d271100fe68ce93f163fa37612c5fff67f4a62fc3bafaf3d17a9ed73d86f60a51b5ed91353a3b054edc0aa92c9ebcbd0b75d188fdc882791d68d", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEd3yJMLbh0nEQD+aM6T8WP6N2EsX/9n9K\nYvw7r689F6ntc9hvYKUbXtkTU6OwVO3AqpLJ68vQt10Yj9yIJ5HWjQ==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 378, - "comment": "edge case modular inverse", - "flags": [ + "tcId" : 378, + "comment" : "edge case modular inverse", + "flags" : [ "ModularInverse", "ArithmeticError" ], - "msg": "313233343030", - "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102204a1e12831fbe93627b02d6e7f24bccdd6ef4b2d0f46739eaf3b1eaf0ca117770", - "result": "valid" + "msg" : "313233343030", + "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102204a1e12831fbe93627b02d6e7f24bccdd6ef4b2d0f46739eaf3b1eaf0ca117770", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04eabc248f626e0a63e1eb81c43d461a39a1dba881eb6ee2152b07c32d71bcf4700603caa8b9d33db13af44c6efbec8a198ed6124ac9eb17eaafd2824a545ec000", - "wx": "00eabc248f626e0a63e1eb81c43d461a39a1dba881eb6ee2152b07c32d71bcf470", - "wy": "0603caa8b9d33db13af44c6efbec8a198ed6124ac9eb17eaafd2824a545ec000" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04eabc248f626e0a63e1eb81c43d461a39a1dba881eb6ee2152b07c32d71bcf4700603caa8b9d33db13af44c6efbec8a198ed6124ac9eb17eaafd2824a545ec000", + "wx" : "00eabc248f626e0a63e1eb81c43d461a39a1dba881eb6ee2152b07c32d71bcf470", + "wy" : "0603caa8b9d33db13af44c6efbec8a198ed6124ac9eb17eaafd2824a545ec000" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004eabc248f626e0a63e1eb81c43d461a39a1dba881eb6ee2152b07c32d71bcf4700603caa8b9d33db13af44c6efbec8a198ed6124ac9eb17eaafd2824a545ec000", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE6rwkj2JuCmPh64HEPUYaOaHbqIHrbuIV\nKwfDLXG89HAGA8qoudM9sTr0TG777IoZjtYSSsnrF+qv0oJKVF7AAA==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004eabc248f626e0a63e1eb81c43d461a39a1dba881eb6ee2152b07c32d71bcf4700603caa8b9d33db13af44c6efbec8a198ed6124ac9eb17eaafd2824a545ec000", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE6rwkj2JuCmPh64HEPUYaOaHbqIHrbuIV\nKwfDLXG89HAGA8qoudM9sTr0TG777IoZjtYSSsnrF+qv0oJKVF7AAA==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 379, - "comment": "edge case modular inverse", - "flags": [ + "tcId" : 379, + "comment" : "edge case modular inverse", + "flags" : [ "ModularInverse", "ArithmeticError" ], - "msg": "313233343030", - "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c1022006c778d4dfff7dee06ed88bc4e0ed34fc553aad67caf796f2a1c6487c1b2e877", - "result": "valid" + "msg" : "313233343030", + "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c1022006c778d4dfff7dee06ed88bc4e0ed34fc553aad67caf796f2a1c6487c1b2e877", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "049f7a13ada158a55f9ddf1a45f044f073d9b80030efdcfc9f9f58418fbceaf001f8ada0175090f80d47227d6713b6740f9a0091d88a837d0a1cd77b58a8f28d73", - "wx": "009f7a13ada158a55f9ddf1a45f044f073d9b80030efdcfc9f9f58418fbceaf001", - "wy": "00f8ada0175090f80d47227d6713b6740f9a0091d88a837d0a1cd77b58a8f28d73" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "049f7a13ada158a55f9ddf1a45f044f073d9b80030efdcfc9f9f58418fbceaf001f8ada0175090f80d47227d6713b6740f9a0091d88a837d0a1cd77b58a8f28d73", + "wx" : "009f7a13ada158a55f9ddf1a45f044f073d9b80030efdcfc9f9f58418fbceaf001", + "wy" : "00f8ada0175090f80d47227d6713b6740f9a0091d88a837d0a1cd77b58a8f28d73" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200049f7a13ada158a55f9ddf1a45f044f073d9b80030efdcfc9f9f58418fbceaf001f8ada0175090f80d47227d6713b6740f9a0091d88a837d0a1cd77b58a8f28d73", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEn3oTraFYpV+d3xpF8ETwc9m4ADDv3Pyf\nn1hBj7zq8AH4raAXUJD4DUcifWcTtnQPmgCR2IqDfQoc13tYqPKNcw==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200049f7a13ada158a55f9ddf1a45f044f073d9b80030efdcfc9f9f58418fbceaf001f8ada0175090f80d47227d6713b6740f9a0091d88a837d0a1cd77b58a8f28d73", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEn3oTraFYpV+d3xpF8ETwc9m4ADDv3Pyf\nn1hBj7zq8AH4raAXUJD4DUcifWcTtnQPmgCR2IqDfQoc13tYqPKNcw==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 380, - "comment": "edge case modular inverse", - "flags": [ + "tcId" : 380, + "comment" : "edge case modular inverse", + "flags" : [ "ModularInverse", "ArithmeticError" ], - "msg": "313233343030", - "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102204de459ef9159afa057feb3ec40fef01c45b809f4ab296ea48c206d4249a2b451", - "result": "valid" + "msg" : "313233343030", + "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102204de459ef9159afa057feb3ec40fef01c45b809f4ab296ea48c206d4249a2b451", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "0411c4f3e461cd019b5c06ea0cea4c4090c3cc3e3c5d9f3c6d65b436826da9b4dbbbeb7a77e4cbfda207097c43423705f72c80476da3dac40a483b0ab0f2ead1cb", - "wx": "11c4f3e461cd019b5c06ea0cea4c4090c3cc3e3c5d9f3c6d65b436826da9b4db", - "wy": "00bbeb7a77e4cbfda207097c43423705f72c80476da3dac40a483b0ab0f2ead1cb" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "0411c4f3e461cd019b5c06ea0cea4c4090c3cc3e3c5d9f3c6d65b436826da9b4dbbbeb7a77e4cbfda207097c43423705f72c80476da3dac40a483b0ab0f2ead1cb", + "wx" : "11c4f3e461cd019b5c06ea0cea4c4090c3cc3e3c5d9f3c6d65b436826da9b4db", + "wy" : "00bbeb7a77e4cbfda207097c43423705f72c80476da3dac40a483b0ab0f2ead1cb" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000411c4f3e461cd019b5c06ea0cea4c4090c3cc3e3c5d9f3c6d65b436826da9b4dbbbeb7a77e4cbfda207097c43423705f72c80476da3dac40a483b0ab0f2ead1cb", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEEcTz5GHNAZtcBuoM6kxAkMPMPjxdnzxt\nZbQ2gm2ptNu763p35Mv9ogcJfENCNwX3LIBHbaPaxApIOwqw8urRyw==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000411c4f3e461cd019b5c06ea0cea4c4090c3cc3e3c5d9f3c6d65b436826da9b4dbbbeb7a77e4cbfda207097c43423705f72c80476da3dac40a483b0ab0f2ead1cb", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEEcTz5GHNAZtcBuoM6kxAkMPMPjxdnzxt\nZbQ2gm2ptNu763p35Mv9ogcJfENCNwX3LIBHbaPaxApIOwqw8urRyw==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 381, - "comment": "edge case modular inverse", - "flags": [ + "tcId" : 381, + "comment" : "edge case modular inverse", + "flags" : [ "ModularInverse", "ArithmeticError" ], - "msg": "313233343030", - "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c10220745d294978007302033502e1acc48b63ae6500be43adbea1b258d6b423dbb416", - "result": "valid" + "msg" : "313233343030", + "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c10220745d294978007302033502e1acc48b63ae6500be43adbea1b258d6b423dbb416", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04e2e18682d53123aa01a6c5d00b0c623d671b462ea80bddd65227fd5105988aa4161907b3fd25044a949ea41c8e2ea8459dc6f1654856b8b61b31543bb1b45bdb", - "wx": "00e2e18682d53123aa01a6c5d00b0c623d671b462ea80bddd65227fd5105988aa4", - "wy": "161907b3fd25044a949ea41c8e2ea8459dc6f1654856b8b61b31543bb1b45bdb" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04e2e18682d53123aa01a6c5d00b0c623d671b462ea80bddd65227fd5105988aa4161907b3fd25044a949ea41c8e2ea8459dc6f1654856b8b61b31543bb1b45bdb", + "wx" : "00e2e18682d53123aa01a6c5d00b0c623d671b462ea80bddd65227fd5105988aa4", + "wy" : "161907b3fd25044a949ea41c8e2ea8459dc6f1654856b8b61b31543bb1b45bdb" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004e2e18682d53123aa01a6c5d00b0c623d671b462ea80bddd65227fd5105988aa4161907b3fd25044a949ea41c8e2ea8459dc6f1654856b8b61b31543bb1b45bdb", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE4uGGgtUxI6oBpsXQCwxiPWcbRi6oC93W\nUif9UQWYiqQWGQez/SUESpSepByOLqhFncbxZUhWuLYbMVQ7sbRb2w==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004e2e18682d53123aa01a6c5d00b0c623d671b462ea80bddd65227fd5105988aa4161907b3fd25044a949ea41c8e2ea8459dc6f1654856b8b61b31543bb1b45bdb", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE4uGGgtUxI6oBpsXQCwxiPWcbRi6oC93W\nUif9UQWYiqQWGQez/SUESpSepByOLqhFncbxZUhWuLYbMVQ7sbRb2w==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 382, - "comment": "edge case modular inverse", - "flags": [ + "tcId" : 382, + "comment" : "edge case modular inverse", + "flags" : [ "ModularInverse", "ArithmeticError" ], - "msg": "313233343030", - "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102207b2a785e3896f59b2d69da57648e80ad3c133a750a2847fd2098ccd902042b6c", - "result": "valid" + "msg" : "313233343030", + "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102207b2a785e3896f59b2d69da57648e80ad3c133a750a2847fd2098ccd902042b6c", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "0490f8d4ca73de08a6564aaf005247b6f0ffe978504dce52605f46b7c3e56197dafadbe528eb70d9ee7ea0e70702db54f721514c7b8604ac2cb214f1decb7e383d", - "wx": "0090f8d4ca73de08a6564aaf005247b6f0ffe978504dce52605f46b7c3e56197da", - "wy": "00fadbe528eb70d9ee7ea0e70702db54f721514c7b8604ac2cb214f1decb7e383d" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "0490f8d4ca73de08a6564aaf005247b6f0ffe978504dce52605f46b7c3e56197dafadbe528eb70d9ee7ea0e70702db54f721514c7b8604ac2cb214f1decb7e383d", + "wx" : "0090f8d4ca73de08a6564aaf005247b6f0ffe978504dce52605f46b7c3e56197da", + "wy" : "00fadbe528eb70d9ee7ea0e70702db54f721514c7b8604ac2cb214f1decb7e383d" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000490f8d4ca73de08a6564aaf005247b6f0ffe978504dce52605f46b7c3e56197dafadbe528eb70d9ee7ea0e70702db54f721514c7b8604ac2cb214f1decb7e383d", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEkPjUynPeCKZWSq8AUke28P/peFBNzlJg\nX0a3w+Vhl9r62+Uo63DZ7n6g5wcC21T3IVFMe4YErCyyFPHey344PQ==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000490f8d4ca73de08a6564aaf005247b6f0ffe978504dce52605f46b7c3e56197dafadbe528eb70d9ee7ea0e70702db54f721514c7b8604ac2cb214f1decb7e383d", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEkPjUynPeCKZWSq8AUke28P/peFBNzlJg\nX0a3w+Vhl9r62+Uo63DZ7n6g5wcC21T3IVFMe4YErCyyFPHey344PQ==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 383, - "comment": "edge case modular inverse", - "flags": [ + "tcId" : 383, + "comment" : "edge case modular inverse", + "flags" : [ "ModularInverse", "ArithmeticError" ], - "msg": "313233343030", - "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c1022071ae94a72ca896875e7aa4a4c3d29afdb4b35b6996273e63c47ac519256c5eb1", - "result": "valid" + "msg" : "313233343030", + "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c1022071ae94a72ca896875e7aa4a4c3d29afdb4b35b6996273e63c47ac519256c5eb1", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04824c195c73cffdf038d101bce1687b5c3b6146f395c885976f7753b2376b948e3cdefa6fc347d13e4dcbc63a0b03a165180cd2be1431a0cf74ce1ea25082d2bc", - "wx": "00824c195c73cffdf038d101bce1687b5c3b6146f395c885976f7753b2376b948e", - "wy": "3cdefa6fc347d13e4dcbc63a0b03a165180cd2be1431a0cf74ce1ea25082d2bc" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04824c195c73cffdf038d101bce1687b5c3b6146f395c885976f7753b2376b948e3cdefa6fc347d13e4dcbc63a0b03a165180cd2be1431a0cf74ce1ea25082d2bc", + "wx" : "00824c195c73cffdf038d101bce1687b5c3b6146f395c885976f7753b2376b948e", + "wy" : "3cdefa6fc347d13e4dcbc63a0b03a165180cd2be1431a0cf74ce1ea25082d2bc" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004824c195c73cffdf038d101bce1687b5c3b6146f395c885976f7753b2376b948e3cdefa6fc347d13e4dcbc63a0b03a165180cd2be1431a0cf74ce1ea25082d2bc", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEgkwZXHPP/fA40QG84Wh7XDthRvOVyIWX\nb3dTsjdrlI483vpvw0fRPk3LxjoLA6FlGAzSvhQxoM90zh6iUILSvA==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004824c195c73cffdf038d101bce1687b5c3b6146f395c885976f7753b2376b948e3cdefa6fc347d13e4dcbc63a0b03a165180cd2be1431a0cf74ce1ea25082d2bc", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEgkwZXHPP/fA40QG84Wh7XDthRvOVyIWX\nb3dTsjdrlI483vpvw0fRPk3LxjoLA6FlGAzSvhQxoM90zh6iUILSvA==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 384, - "comment": "edge case modular inverse", - "flags": [ + "tcId" : 384, + "comment" : "edge case modular inverse", + "flags" : [ "ModularInverse", "ArithmeticError" ], - "msg": "313233343030", - "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102200fa527fa7343c0bc9ec35a6278bfbff4d83301b154fc4bd14aee7eb93445b5f9", - "result": "valid" + "msg" : "313233343030", + "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102200fa527fa7343c0bc9ec35a6278bfbff4d83301b154fc4bd14aee7eb93445b5f9", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "042788a52f078eb3f202c4fa73e0d3386faf3df6be856003636f599922d4f5268f30b4f207c919bbdf5e67a8be4265a8174754b3aba8f16e575b77ff4d5a7eb64f", - "wx": "2788a52f078eb3f202c4fa73e0d3386faf3df6be856003636f599922d4f5268f", - "wy": "30b4f207c919bbdf5e67a8be4265a8174754b3aba8f16e575b77ff4d5a7eb64f" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "042788a52f078eb3f202c4fa73e0d3386faf3df6be856003636f599922d4f5268f30b4f207c919bbdf5e67a8be4265a8174754b3aba8f16e575b77ff4d5a7eb64f", + "wx" : "2788a52f078eb3f202c4fa73e0d3386faf3df6be856003636f599922d4f5268f", + "wy" : "30b4f207c919bbdf5e67a8be4265a8174754b3aba8f16e575b77ff4d5a7eb64f" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200042788a52f078eb3f202c4fa73e0d3386faf3df6be856003636f599922d4f5268f30b4f207c919bbdf5e67a8be4265a8174754b3aba8f16e575b77ff4d5a7eb64f", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEJ4ilLweOs/ICxPpz4NM4b6899r6FYANj\nb1mZItT1Jo8wtPIHyRm7315nqL5CZagXR1Szq6jxbldbd/9NWn62Tw==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200042788a52f078eb3f202c4fa73e0d3386faf3df6be856003636f599922d4f5268f30b4f207c919bbdf5e67a8be4265a8174754b3aba8f16e575b77ff4d5a7eb64f", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEJ4ilLweOs/ICxPpz4NM4b6899r6FYANj\nb1mZItT1Jo8wtPIHyRm7315nqL5CZagXR1Szq6jxbldbd/9NWn62Tw==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 385, - "comment": "edge case modular inverse", - "flags": [ + "tcId" : 385, + "comment" : "edge case modular inverse", + "flags" : [ "ModularInverse", "ArithmeticError" ], - "msg": "313233343030", - "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102206539c0adadd0525ff42622164ce9314348bd0863b4c80e936b23ca0414264671", - "result": "valid" + "msg" : "313233343030", + "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c102206539c0adadd0525ff42622164ce9314348bd0863b4c80e936b23ca0414264671", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04d533b789a4af890fa7a82a1fae58c404f9a62a50b49adafab349c513b415087401b4171b803e76b34a9861e10f7bc289a066fd01bd29f84c987a10a5fb18c2d4", - "wx": "00d533b789a4af890fa7a82a1fae58c404f9a62a50b49adafab349c513b4150874", - "wy": "01b4171b803e76b34a9861e10f7bc289a066fd01bd29f84c987a10a5fb18c2d4" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04d533b789a4af890fa7a82a1fae58c404f9a62a50b49adafab349c513b415087401b4171b803e76b34a9861e10f7bc289a066fd01bd29f84c987a10a5fb18c2d4", + "wx" : "00d533b789a4af890fa7a82a1fae58c404f9a62a50b49adafab349c513b4150874", + "wy" : "01b4171b803e76b34a9861e10f7bc289a066fd01bd29f84c987a10a5fb18c2d4" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004d533b789a4af890fa7a82a1fae58c404f9a62a50b49adafab349c513b415087401b4171b803e76b34a9861e10f7bc289a066fd01bd29f84c987a10a5fb18c2d4", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE1TO3iaSviQ+nqCofrljEBPmmKlC0mtr6\ns0nFE7QVCHQBtBcbgD52s0qYYeEPe8KJoGb9Ab0p+EyYehCl+xjC1A==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004d533b789a4af890fa7a82a1fae58c404f9a62a50b49adafab349c513b415087401b4171b803e76b34a9861e10f7bc289a066fd01bd29f84c987a10a5fb18c2d4", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE1TO3iaSviQ+nqCofrljEBPmmKlC0mtr6\ns0nFE7QVCHQBtBcbgD52s0qYYeEPe8KJoGb9Ab0p+EyYehCl+xjC1A==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 386, - "comment": "point at infinity during verify", - "flags": [ + "tcId" : 386, + "comment" : "point at infinity during verify", + "flags" : [ "PointDuplication", "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a0022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c0", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304402207fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a0022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c0", + "result" : "invalid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "043a3150798c8af69d1e6e981f3a45402ba1d732f4be8330c5164f49e10ec555b4221bd842bc5e4d97eff37165f60e3998a424d72a450cf95ea477c78287d0343a", - "wx": "3a3150798c8af69d1e6e981f3a45402ba1d732f4be8330c5164f49e10ec555b4", - "wy": "221bd842bc5e4d97eff37165f60e3998a424d72a450cf95ea477c78287d0343a" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "043a3150798c8af69d1e6e981f3a45402ba1d732f4be8330c5164f49e10ec555b4221bd842bc5e4d97eff37165f60e3998a424d72a450cf95ea477c78287d0343a", + "wx" : "3a3150798c8af69d1e6e981f3a45402ba1d732f4be8330c5164f49e10ec555b4", + "wy" : "221bd842bc5e4d97eff37165f60e3998a424d72a450cf95ea477c78287d0343a" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200043a3150798c8af69d1e6e981f3a45402ba1d732f4be8330c5164f49e10ec555b4221bd842bc5e4d97eff37165f60e3998a424d72a450cf95ea477c78287d0343a", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEOjFQeYyK9p0ebpgfOkVAK6HXMvS+gzDF\nFk9J4Q7FVbQiG9hCvF5Nl+/zcWX2DjmYpCTXKkUM+V6kd8eCh9A0Og==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200043a3150798c8af69d1e6e981f3a45402ba1d732f4be8330c5164f49e10ec555b4221bd842bc5e4d97eff37165f60e3998a424d72a450cf95ea477c78287d0343a", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEOjFQeYyK9p0ebpgfOkVAK6HXMvS+gzDF\nFk9J4Q7FVbQiG9hCvF5Nl+/zcWX2DjmYpCTXKkUM+V6kd8eCh9A0Og==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 387, - "comment": "edge case for signature malleability", - "flags": [ + "tcId" : 387, + "comment" : "edge case for signature malleability", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a002207fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a0", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a002207fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a0", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "043b37df5fb347c69a0f17d85c0c7ca83736883a825e13143d0fcfc8101e851e800de3c090b6ca21ba543517330c04b12f948c6badf14a63abffdf4ef8c7537026", - "wx": "3b37df5fb347c69a0f17d85c0c7ca83736883a825e13143d0fcfc8101e851e80", - "wy": "0de3c090b6ca21ba543517330c04b12f948c6badf14a63abffdf4ef8c7537026" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "043b37df5fb347c69a0f17d85c0c7ca83736883a825e13143d0fcfc8101e851e800de3c090b6ca21ba543517330c04b12f948c6badf14a63abffdf4ef8c7537026", + "wx" : "3b37df5fb347c69a0f17d85c0c7ca83736883a825e13143d0fcfc8101e851e80", + "wy" : "0de3c090b6ca21ba543517330c04b12f948c6badf14a63abffdf4ef8c7537026" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200043b37df5fb347c69a0f17d85c0c7ca83736883a825e13143d0fcfc8101e851e800de3c090b6ca21ba543517330c04b12f948c6badf14a63abffdf4ef8c7537026", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEOzffX7NHxpoPF9hcDHyoNzaIOoJeExQ9\nD8/IEB6FHoAN48CQtsohulQ1FzMMBLEvlIxrrfFKY6v/3074x1NwJg==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200043b37df5fb347c69a0f17d85c0c7ca83736883a825e13143d0fcfc8101e851e800de3c090b6ca21ba543517330c04b12f948c6badf14a63abffdf4ef8c7537026", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEOzffX7NHxpoPF9hcDHyoNzaIOoJeExQ9\nD8/IEB6FHoAN48CQtsohulQ1FzMMBLEvlIxrrfFKY6v/3074x1NwJg==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 388, - "comment": "edge case for signature malleability", - "flags": [ + "tcId" : 388, + "comment" : "edge case for signature malleability", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a002207fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a1", - "result": "invalid" + "msg" : "313233343030", + "sig" : "304402207fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a002207fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a1", + "result" : "invalid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04feb5163b0ece30ff3e03c7d55c4380fa2fa81ee2c0354942ff6f08c99d0cd82ce87de05ee1bda089d3e4e248fa0f721102acfffdf50e654be281433999df897e", - "wx": "00feb5163b0ece30ff3e03c7d55c4380fa2fa81ee2c0354942ff6f08c99d0cd82c", - "wy": "00e87de05ee1bda089d3e4e248fa0f721102acfffdf50e654be281433999df897e" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04feb5163b0ece30ff3e03c7d55c4380fa2fa81ee2c0354942ff6f08c99d0cd82ce87de05ee1bda089d3e4e248fa0f721102acfffdf50e654be281433999df897e", + "wx" : "00feb5163b0ece30ff3e03c7d55c4380fa2fa81ee2c0354942ff6f08c99d0cd82c", + "wy" : "00e87de05ee1bda089d3e4e248fa0f721102acfffdf50e654be281433999df897e" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004feb5163b0ece30ff3e03c7d55c4380fa2fa81ee2c0354942ff6f08c99d0cd82ce87de05ee1bda089d3e4e248fa0f721102acfffdf50e654be281433999df897e", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE/rUWOw7OMP8+A8fVXEOA+i+oHuLANUlC\n/28IyZ0M2CzofeBe4b2gidPk4kj6D3IRAqz//fUOZUvigUM5md+Jfg==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004feb5163b0ece30ff3e03c7d55c4380fa2fa81ee2c0354942ff6f08c99d0cd82ce87de05ee1bda089d3e4e248fa0f721102acfffdf50e654be281433999df897e", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE/rUWOw7OMP8+A8fVXEOA+i+oHuLANUlC\n/28IyZ0M2CzofeBe4b2gidPk4kj6D3IRAqz//fUOZUvigUM5md+Jfg==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 389, - "comment": "u1 == 1", - "flags": [ + "tcId" : 389, + "comment" : "u1 == 1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215b8022044a5ad0bd0636d9e12bc9e0a6bdd5e1bba77f523842193b3b82e448e05d5f11e", - "result": "valid" + "msg" : "313233343030", + "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215b8022044a5ad0bd0636d9e12bc9e0a6bdd5e1bba77f523842193b3b82e448e05d5f11e", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04238ced001cf22b8853e02edc89cbeca5050ba7e042a7a77f9382cd414922897640683d3094643840f295890aa4c18aa39b41d77dd0fb3bb2700e4f9ec284ffc2", - "wx": "238ced001cf22b8853e02edc89cbeca5050ba7e042a7a77f9382cd4149228976", - "wy": "40683d3094643840f295890aa4c18aa39b41d77dd0fb3bb2700e4f9ec284ffc2" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04238ced001cf22b8853e02edc89cbeca5050ba7e042a7a77f9382cd414922897640683d3094643840f295890aa4c18aa39b41d77dd0fb3bb2700e4f9ec284ffc2", + "wx" : "238ced001cf22b8853e02edc89cbeca5050ba7e042a7a77f9382cd4149228976", + "wy" : "40683d3094643840f295890aa4c18aa39b41d77dd0fb3bb2700e4f9ec284ffc2" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004238ced001cf22b8853e02edc89cbeca5050ba7e042a7a77f9382cd414922897640683d3094643840f295890aa4c18aa39b41d77dd0fb3bb2700e4f9ec284ffc2", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEI4ztABzyK4hT4C7cicvspQULp+BCp6d/\nk4LNQUkiiXZAaD0wlGQ4QPKViQqkwYqjm0HXfdD7O7JwDk+ewoT/wg==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004238ced001cf22b8853e02edc89cbeca5050ba7e042a7a77f9382cd414922897640683d3094643840f295890aa4c18aa39b41d77dd0fb3bb2700e4f9ec284ffc2", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEI4ztABzyK4hT4C7cicvspQULp+BCp6d/\nk4LNQUkiiXZAaD0wlGQ4QPKViQqkwYqjm0HXfdD7O7JwDk+ewoT/wg==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 390, - "comment": "u1 == n - 1", - "flags": [ + "tcId" : 390, + "comment" : "u1 == n - 1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215b8022044a5ad0bd0636d9e12bc9e0a6bdd5e1bba77f523842193b3b82e448e05d5f11e", - "result": "valid" + "msg" : "313233343030", + "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215b8022044a5ad0bd0636d9e12bc9e0a6bdd5e1bba77f523842193b3b82e448e05d5f11e", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04961cf64817c06c0e51b3c2736c922fde18bd8c4906fcd7f5ef66c4678508f35ed2c5d18168cfbe70f2f123bd7419232bb92dd69113e2941061889481c5a027bf", - "wx": "00961cf64817c06c0e51b3c2736c922fde18bd8c4906fcd7f5ef66c4678508f35e", - "wy": "00d2c5d18168cfbe70f2f123bd7419232bb92dd69113e2941061889481c5a027bf" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04961cf64817c06c0e51b3c2736c922fde18bd8c4906fcd7f5ef66c4678508f35ed2c5d18168cfbe70f2f123bd7419232bb92dd69113e2941061889481c5a027bf", + "wx" : "00961cf64817c06c0e51b3c2736c922fde18bd8c4906fcd7f5ef66c4678508f35e", + "wy" : "00d2c5d18168cfbe70f2f123bd7419232bb92dd69113e2941061889481c5a027bf" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004961cf64817c06c0e51b3c2736c922fde18bd8c4906fcd7f5ef66c4678508f35ed2c5d18168cfbe70f2f123bd7419232bb92dd69113e2941061889481c5a027bf", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAElhz2SBfAbA5Rs8JzbJIv3hi9jEkG/Nf1\n72bEZ4UI817SxdGBaM++cPLxI710GSMruS3WkRPilBBhiJSBxaAnvw==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004961cf64817c06c0e51b3c2736c922fde18bd8c4906fcd7f5ef66c4678508f35ed2c5d18168cfbe70f2f123bd7419232bb92dd69113e2941061889481c5a027bf", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAElhz2SBfAbA5Rs8JzbJIv3hi9jEkG/Nf1\n72bEZ4UI817SxdGBaM++cPLxI710GSMruS3WkRPilBBhiJSBxaAnvw==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 391, - "comment": "u2 == 1", - "flags": [ + "tcId" : 391, + "comment" : "u2 == 1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215b8022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215b8", - "result": "valid" + "msg" : "313233343030", + "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215b8022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215b8", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "0413681eae168cd4ea7cf2e2a45d052742d10a9f64e796867dbdcb829fe0b1028816528760d177376c09df79de39557c329cc1753517acffe8fa2ec298026b8384", - "wx": "13681eae168cd4ea7cf2e2a45d052742d10a9f64e796867dbdcb829fe0b10288", - "wy": "16528760d177376c09df79de39557c329cc1753517acffe8fa2ec298026b8384" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "0413681eae168cd4ea7cf2e2a45d052742d10a9f64e796867dbdcb829fe0b1028816528760d177376c09df79de39557c329cc1753517acffe8fa2ec298026b8384", + "wx" : "13681eae168cd4ea7cf2e2a45d052742d10a9f64e796867dbdcb829fe0b10288", + "wy" : "16528760d177376c09df79de39557c329cc1753517acffe8fa2ec298026b8384" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000413681eae168cd4ea7cf2e2a45d052742d10a9f64e796867dbdcb829fe0b1028816528760d177376c09df79de39557c329cc1753517acffe8fa2ec298026b8384", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEE2gerhaM1Op88uKkXQUnQtEKn2TnloZ9\nvcuCn+CxAogWUodg0Xc3bAnfed45VXwynMF1NRes/+j6LsKYAmuDhA==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000413681eae168cd4ea7cf2e2a45d052742d10a9f64e796867dbdcb829fe0b1028816528760d177376c09df79de39557c329cc1753517acffe8fa2ec298026b8384", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEE2gerhaM1Op88uKkXQUnQtEKn2TnloZ9\nvcuCn+CxAogWUodg0Xc3bAnfed45VXwynMF1NRes/+j6LsKYAmuDhA==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 392, - "comment": "u2 == n - 1", - "flags": [ + "tcId" : 392, + "comment" : "u2 == n - 1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215b8022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215b8", - "result": "valid" + "msg" : "313233343030", + "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215b8022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215b8", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "045aa7abfdb6b4086d543325e5d79c6e95ce42f866d2bb84909633a04bb1aa31c291c80088794905e1da33336d874e2f91ccf45cc59185bede5dd6f3f7acaae18b", - "wx": "5aa7abfdb6b4086d543325e5d79c6e95ce42f866d2bb84909633a04bb1aa31c2", - "wy": "0091c80088794905e1da33336d874e2f91ccf45cc59185bede5dd6f3f7acaae18b" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "045aa7abfdb6b4086d543325e5d79c6e95ce42f866d2bb84909633a04bb1aa31c291c80088794905e1da33336d874e2f91ccf45cc59185bede5dd6f3f7acaae18b", + "wx" : "5aa7abfdb6b4086d543325e5d79c6e95ce42f866d2bb84909633a04bb1aa31c2", + "wy" : "0091c80088794905e1da33336d874e2f91ccf45cc59185bede5dd6f3f7acaae18b" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200045aa7abfdb6b4086d543325e5d79c6e95ce42f866d2bb84909633a04bb1aa31c291c80088794905e1da33336d874e2f91ccf45cc59185bede5dd6f3f7acaae18b", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEWqer/ba0CG1UMyXl15xulc5C+GbSu4SQ\nljOgS7GqMcKRyACIeUkF4dozM22HTi+RzPRcxZGFvt5d1vP3rKrhiw==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200045aa7abfdb6b4086d543325e5d79c6e95ce42f866d2bb84909633a04bb1aa31c291c80088794905e1da33336d874e2f91ccf45cc59185bede5dd6f3f7acaae18b", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEWqer/ba0CG1UMyXl15xulc5C+GbSu4SQ\nljOgS7GqMcKRyACIeUkF4dozM22HTi+RzPRcxZGFvt5d1vP3rKrhiw==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 393, - "comment": "edge case for u1", - "flags": [ + "tcId" : 393, + "comment" : "edge case for u1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022016e1e459457679df5b9434ae23f474b3e8d2a70bd6b5dbe692ba16da01f1fb0a", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022016e1e459457679df5b9434ae23f474b3e8d2a70bd6b5dbe692ba16da01f1fb0a", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "0400277791b305a45b2b39590b2f05d3392a6c8182cef4eb540120e0f5c206c3e464108233fb0b8c3ac892d79ef8e0fbf92ed133addb4554270132584dc52eef41", - "wx": "277791b305a45b2b39590b2f05d3392a6c8182cef4eb540120e0f5c206c3e4", - "wy": "64108233fb0b8c3ac892d79ef8e0fbf92ed133addb4554270132584dc52eef41" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "0400277791b305a45b2b39590b2f05d3392a6c8182cef4eb540120e0f5c206c3e464108233fb0b8c3ac892d79ef8e0fbf92ed133addb4554270132584dc52eef41", + "wx" : "277791b305a45b2b39590b2f05d3392a6c8182cef4eb540120e0f5c206c3e4", + "wy" : "64108233fb0b8c3ac892d79ef8e0fbf92ed133addb4554270132584dc52eef41" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000400277791b305a45b2b39590b2f05d3392a6c8182cef4eb540120e0f5c206c3e464108233fb0b8c3ac892d79ef8e0fbf92ed133addb4554270132584dc52eef41", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEACd3kbMFpFsrOVkLLwXTOSpsgYLO9OtU\nASDg9cIGw+RkEIIz+wuMOsiS15744Pv5LtEzrdtFVCcBMlhNxS7vQQ==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000400277791b305a45b2b39590b2f05d3392a6c8182cef4eb540120e0f5c206c3e464108233fb0b8c3ac892d79ef8e0fbf92ed133addb4554270132584dc52eef41", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEACd3kbMFpFsrOVkLLwXTOSpsgYLO9OtU\nASDg9cIGw+RkEIIz+wuMOsiS15744Pv5LtEzrdtFVCcBMlhNxS7vQQ==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 394, - "comment": "edge case for u1", - "flags": [ + "tcId" : 394, + "comment" : "edge case for u1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02201c940f313f92647be257eccd7ed08b0baef3f0478f25871b53635302c5f6314a", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02201c940f313f92647be257eccd7ed08b0baef3f0478f25871b53635302c5f6314a", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "046efa092b68de9460f0bcc919005a5f6e80e19de98968be3cd2c770a9949bfb1ac75e6e5087d6550d5f9beb1e79e5029307bc255235e2d5dc99241ac3ab886c49", - "wx": "6efa092b68de9460f0bcc919005a5f6e80e19de98968be3cd2c770a9949bfb1a", - "wy": "00c75e6e5087d6550d5f9beb1e79e5029307bc255235e2d5dc99241ac3ab886c49" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "046efa092b68de9460f0bcc919005a5f6e80e19de98968be3cd2c770a9949bfb1ac75e6e5087d6550d5f9beb1e79e5029307bc255235e2d5dc99241ac3ab886c49", + "wx" : "6efa092b68de9460f0bcc919005a5f6e80e19de98968be3cd2c770a9949bfb1a", + "wy" : "00c75e6e5087d6550d5f9beb1e79e5029307bc255235e2d5dc99241ac3ab886c49" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200046efa092b68de9460f0bcc919005a5f6e80e19de98968be3cd2c770a9949bfb1ac75e6e5087d6550d5f9beb1e79e5029307bc255235e2d5dc99241ac3ab886c49", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEbvoJK2jelGDwvMkZAFpfboDhnemJaL48\n0sdwqZSb+xrHXm5Qh9ZVDV+b6x555QKTB7wlUjXi1dyZJBrDq4hsSQ==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200046efa092b68de9460f0bcc919005a5f6e80e19de98968be3cd2c770a9949bfb1ac75e6e5087d6550d5f9beb1e79e5029307bc255235e2d5dc99241ac3ab886c49", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEbvoJK2jelGDwvMkZAFpfboDhnemJaL48\n0sdwqZSb+xrHXm5Qh9ZVDV+b6x555QKTB7wlUjXi1dyZJBrDq4hsSQ==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 395, - "comment": "edge case for u1", - "flags": [ + "tcId" : 395, + "comment" : "edge case for u1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022015d94a85077b493f91cb7101ec63e1b01be58b594e855f45050a8c14062d689b", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022015d94a85077b493f91cb7101ec63e1b01be58b594e855f45050a8c14062d689b", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "0472d4a19c4f9d2cf5848ea40445b70d4696b5f02d632c0c654cc7d7eeb0c6d058e8c4cd9943e459174c7ac01fa742198e47e6c19a6bdb0c4f6c237831c1b3f942", - "wx": "72d4a19c4f9d2cf5848ea40445b70d4696b5f02d632c0c654cc7d7eeb0c6d058", - "wy": "00e8c4cd9943e459174c7ac01fa742198e47e6c19a6bdb0c4f6c237831c1b3f942" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "0472d4a19c4f9d2cf5848ea40445b70d4696b5f02d632c0c654cc7d7eeb0c6d058e8c4cd9943e459174c7ac01fa742198e47e6c19a6bdb0c4f6c237831c1b3f942", + "wx" : "72d4a19c4f9d2cf5848ea40445b70d4696b5f02d632c0c654cc7d7eeb0c6d058", + "wy" : "00e8c4cd9943e459174c7ac01fa742198e47e6c19a6bdb0c4f6c237831c1b3f942" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000472d4a19c4f9d2cf5848ea40445b70d4696b5f02d632c0c654cc7d7eeb0c6d058e8c4cd9943e459174c7ac01fa742198e47e6c19a6bdb0c4f6c237831c1b3f942", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEctShnE+dLPWEjqQERbcNRpa18C1jLAxl\nTMfX7rDG0FjoxM2ZQ+RZF0x6wB+nQhmOR+bBmmvbDE9sI3gxwbP5Qg==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000472d4a19c4f9d2cf5848ea40445b70d4696b5f02d632c0c654cc7d7eeb0c6d058e8c4cd9943e459174c7ac01fa742198e47e6c19a6bdb0c4f6c237831c1b3f942", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEctShnE+dLPWEjqQERbcNRpa18C1jLAxl\nTMfX7rDG0FjoxM2ZQ+RZF0x6wB+nQhmOR+bBmmvbDE9sI3gxwbP5Qg==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 396, - "comment": "edge case for u1", - "flags": [ + "tcId" : 396, + "comment" : "edge case for u1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02205b1d27a7694c146244a5ad0bd0636d9d9ef3b9fb58385418d9c982105077d1b7", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02205b1d27a7694c146244a5ad0bd0636d9d9ef3b9fb58385418d9c982105077d1b7", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "042a8ea2f50dcced0c217575bdfa7cd47d1c6f100041ec0e35512794c1be7e740258f8c17122ed303fda7143eb58bede70295b653266013b0b0ebd3f053137f6ec", - "wx": "2a8ea2f50dcced0c217575bdfa7cd47d1c6f100041ec0e35512794c1be7e7402", - "wy": "58f8c17122ed303fda7143eb58bede70295b653266013b0b0ebd3f053137f6ec" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "042a8ea2f50dcced0c217575bdfa7cd47d1c6f100041ec0e35512794c1be7e740258f8c17122ed303fda7143eb58bede70295b653266013b0b0ebd3f053137f6ec", + "wx" : "2a8ea2f50dcced0c217575bdfa7cd47d1c6f100041ec0e35512794c1be7e7402", + "wy" : "58f8c17122ed303fda7143eb58bede70295b653266013b0b0ebd3f053137f6ec" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200042a8ea2f50dcced0c217575bdfa7cd47d1c6f100041ec0e35512794c1be7e740258f8c17122ed303fda7143eb58bede70295b653266013b0b0ebd3f053137f6ec", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEKo6i9Q3M7QwhdXW9+nzUfRxvEABB7A41\nUSeUwb5+dAJY+MFxIu0wP9pxQ+tYvt5wKVtlMmYBOwsOvT8FMTf27A==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200042a8ea2f50dcced0c217575bdfa7cd47d1c6f100041ec0e35512794c1be7e740258f8c17122ed303fda7143eb58bede70295b653266013b0b0ebd3f053137f6ec", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEKo6i9Q3M7QwhdXW9+nzUfRxvEABB7A41\nUSeUwb5+dAJY+MFxIu0wP9pxQ+tYvt5wKVtlMmYBOwsOvT8FMTf27A==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 397, - "comment": "edge case for u1", - "flags": [ + "tcId" : 397, + "comment" : "edge case for u1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02202d85896b3eb9dbb5a52f42f9c9261ed3fc46644ec65f06ade3fd78f257e43432", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02202d85896b3eb9dbb5a52f42f9c9261ed3fc46644ec65f06ade3fd78f257e43432", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "0488de689ce9af1e94be6a2089c8a8b1253ffdbb6c8e9c86249ba220001a4ad3b80c4998e54842f413b9edb1825acbb6335e81e4d184b2b01c8bebdc85d1f28946", - "wx": "0088de689ce9af1e94be6a2089c8a8b1253ffdbb6c8e9c86249ba220001a4ad3b8", - "wy": "0c4998e54842f413b9edb1825acbb6335e81e4d184b2b01c8bebdc85d1f28946" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "0488de689ce9af1e94be6a2089c8a8b1253ffdbb6c8e9c86249ba220001a4ad3b80c4998e54842f413b9edb1825acbb6335e81e4d184b2b01c8bebdc85d1f28946", + "wx" : "0088de689ce9af1e94be6a2089c8a8b1253ffdbb6c8e9c86249ba220001a4ad3b8", + "wy" : "0c4998e54842f413b9edb1825acbb6335e81e4d184b2b01c8bebdc85d1f28946" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000488de689ce9af1e94be6a2089c8a8b1253ffdbb6c8e9c86249ba220001a4ad3b80c4998e54842f413b9edb1825acbb6335e81e4d184b2b01c8bebdc85d1f28946", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEiN5onOmvHpS+aiCJyKixJT/9u2yOnIYk\nm6IgABpK07gMSZjlSEL0E7ntsYJay7YzXoHk0YSysByL69yF0fKJRg==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000488de689ce9af1e94be6a2089c8a8b1253ffdbb6c8e9c86249ba220001a4ad3b80c4998e54842f413b9edb1825acbb6335e81e4d184b2b01c8bebdc85d1f28946", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEiN5onOmvHpS+aiCJyKixJT/9u2yOnIYk\nm6IgABpK07gMSZjlSEL0E7ntsYJay7YzXoHk0YSysByL69yF0fKJRg==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 398, - "comment": "edge case for u1", - "flags": [ + "tcId" : 398, + "comment" : "edge case for u1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02205b0b12d67d73b76b4a5e85f3924c3da7f88cc89d8cbe0d5bc7faf1e4afc86864", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02205b0b12d67d73b76b4a5e85f3924c3da7f88cc89d8cbe0d5bc7faf1e4afc86864", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04fea2d31f70f90d5fb3e00e186ac42ab3c1615cee714e0b4e1131b3d4d8225bf7b037a18df2ac15343f30f74067ddf29e817d5f77f8dce05714da59c094f0cda9", - "wx": "00fea2d31f70f90d5fb3e00e186ac42ab3c1615cee714e0b4e1131b3d4d8225bf7", - "wy": "00b037a18df2ac15343f30f74067ddf29e817d5f77f8dce05714da59c094f0cda9" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04fea2d31f70f90d5fb3e00e186ac42ab3c1615cee714e0b4e1131b3d4d8225bf7b037a18df2ac15343f30f74067ddf29e817d5f77f8dce05714da59c094f0cda9", + "wx" : "00fea2d31f70f90d5fb3e00e186ac42ab3c1615cee714e0b4e1131b3d4d8225bf7", + "wy" : "00b037a18df2ac15343f30f74067ddf29e817d5f77f8dce05714da59c094f0cda9" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004fea2d31f70f90d5fb3e00e186ac42ab3c1615cee714e0b4e1131b3d4d8225bf7b037a18df2ac15343f30f74067ddf29e817d5f77f8dce05714da59c094f0cda9", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE/qLTH3D5DV+z4A4YasQqs8FhXO5xTgtO\nETGz1NgiW/ewN6GN8qwVND8w90Bn3fKegX1fd/jc4FcU2lnAlPDNqQ==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004fea2d31f70f90d5fb3e00e186ac42ab3c1615cee714e0b4e1131b3d4d8225bf7b037a18df2ac15343f30f74067ddf29e817d5f77f8dce05714da59c094f0cda9", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE/qLTH3D5DV+z4A4YasQqs8FhXO5xTgtO\nETGz1NgiW/ewN6GN8qwVND8w90Bn3fKegX1fd/jc4FcU2lnAlPDNqQ==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 399, - "comment": "edge case for u1", - "flags": [ + "tcId" : 399, + "comment" : "edge case for u1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc0220694c146244a5ad0bd0636d9e12bc9e09e60e68b90d0b5e6c5dddd0cb694d8799", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc0220694c146244a5ad0bd0636d9e12bc9e09e60e68b90d0b5e6c5dddd0cb694d8799", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "047258911e3d423349166479dbe0b8341af7fbd03d0a7e10edccb36b6ceea5a3db17ac2b8992791128fa3b96dc2fbd4ca3bfa782ef2832fc6656943db18e7346b0", - "wx": "7258911e3d423349166479dbe0b8341af7fbd03d0a7e10edccb36b6ceea5a3db", - "wy": "17ac2b8992791128fa3b96dc2fbd4ca3bfa782ef2832fc6656943db18e7346b0" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "047258911e3d423349166479dbe0b8341af7fbd03d0a7e10edccb36b6ceea5a3db17ac2b8992791128fa3b96dc2fbd4ca3bfa782ef2832fc6656943db18e7346b0", + "wx" : "7258911e3d423349166479dbe0b8341af7fbd03d0a7e10edccb36b6ceea5a3db", + "wy" : "17ac2b8992791128fa3b96dc2fbd4ca3bfa782ef2832fc6656943db18e7346b0" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200047258911e3d423349166479dbe0b8341af7fbd03d0a7e10edccb36b6ceea5a3db17ac2b8992791128fa3b96dc2fbd4ca3bfa782ef2832fc6656943db18e7346b0", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEcliRHj1CM0kWZHnb4Lg0Gvf70D0KfhDt\nzLNrbO6lo9sXrCuJknkRKPo7ltwvvUyjv6eC7ygy/GZWlD2xjnNGsA==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200047258911e3d423349166479dbe0b8341af7fbd03d0a7e10edccb36b6ceea5a3db17ac2b8992791128fa3b96dc2fbd4ca3bfa782ef2832fc6656943db18e7346b0", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEcliRHj1CM0kWZHnb4Lg0Gvf70D0KfhDt\nzLNrbO6lo9sXrCuJknkRKPo7ltwvvUyjv6eC7ygy/GZWlD2xjnNGsA==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 400, - "comment": "edge case for u1", - "flags": [ + "tcId" : 400, + "comment" : "edge case for u1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02203d7f487c07bfc5f30846938a3dcef696444707cf9677254a92b06c63ab867d22", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02203d7f487c07bfc5f30846938a3dcef696444707cf9677254a92b06c63ab867d22", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "044f28461dea64474d6bb34d1499c97d37b9e95633df1ceeeaacd45016c98b3914c8818810b8cc06ddb40e8a1261c528faa589455d5a6df93b77bc5e0e493c7470", - "wx": "4f28461dea64474d6bb34d1499c97d37b9e95633df1ceeeaacd45016c98b3914", - "wy": "00c8818810b8cc06ddb40e8a1261c528faa589455d5a6df93b77bc5e0e493c7470" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "044f28461dea64474d6bb34d1499c97d37b9e95633df1ceeeaacd45016c98b3914c8818810b8cc06ddb40e8a1261c528faa589455d5a6df93b77bc5e0e493c7470", + "wx" : "4f28461dea64474d6bb34d1499c97d37b9e95633df1ceeeaacd45016c98b3914", + "wy" : "00c8818810b8cc06ddb40e8a1261c528faa589455d5a6df93b77bc5e0e493c7470" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200044f28461dea64474d6bb34d1499c97d37b9e95633df1ceeeaacd45016c98b3914c8818810b8cc06ddb40e8a1261c528faa589455d5a6df93b77bc5e0e493c7470", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAETyhGHepkR01rs00Umcl9N7npVjPfHO7q\nrNRQFsmLORTIgYgQuMwG3bQOihJhxSj6pYlFXVpt+Tt3vF4OSTx0cA==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200044f28461dea64474d6bb34d1499c97d37b9e95633df1ceeeaacd45016c98b3914c8818810b8cc06ddb40e8a1261c528faa589455d5a6df93b77bc5e0e493c7470", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAETyhGHepkR01rs00Umcl9N7npVjPfHO7q\nrNRQFsmLORTIgYgQuMwG3bQOihJhxSj6pYlFXVpt+Tt3vF4OSTx0cA==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 401, - "comment": "edge case for u1", - "flags": [ + "tcId" : 401, + "comment" : "edge case for u1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02206c7648fc0fbf8a06adb8b839f97b4ff7a800f11b1e37c593b261394599792ba4", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02206c7648fc0fbf8a06adb8b839f97b4ff7a800f11b1e37c593b261394599792ba4", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "0474f2a814fb5d8eca91a69b5e60712732b3937de32829be974ed7b68c5c2f5d66eff0f07c56f987a657f42196205f588c0f1d96fd8a63a5f238b48f478788fe3b", - "wx": "74f2a814fb5d8eca91a69b5e60712732b3937de32829be974ed7b68c5c2f5d66", - "wy": "00eff0f07c56f987a657f42196205f588c0f1d96fd8a63a5f238b48f478788fe3b" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "0474f2a814fb5d8eca91a69b5e60712732b3937de32829be974ed7b68c5c2f5d66eff0f07c56f987a657f42196205f588c0f1d96fd8a63a5f238b48f478788fe3b", + "wx" : "74f2a814fb5d8eca91a69b5e60712732b3937de32829be974ed7b68c5c2f5d66", + "wy" : "00eff0f07c56f987a657f42196205f588c0f1d96fd8a63a5f238b48f478788fe3b" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000474f2a814fb5d8eca91a69b5e60712732b3937de32829be974ed7b68c5c2f5d66eff0f07c56f987a657f42196205f588c0f1d96fd8a63a5f238b48f478788fe3b", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEdPKoFPtdjsqRppteYHEnMrOTfeMoKb6X\nTte2jFwvXWbv8PB8VvmHplf0IZYgX1iMDx2W/YpjpfI4tI9Hh4j+Ow==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000474f2a814fb5d8eca91a69b5e60712732b3937de32829be974ed7b68c5c2f5d66eff0f07c56f987a657f42196205f588c0f1d96fd8a63a5f238b48f478788fe3b", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEdPKoFPtdjsqRppteYHEnMrOTfeMoKb6X\nTte2jFwvXWbv8PB8VvmHplf0IZYgX1iMDx2W/YpjpfI4tI9Hh4j+Ow==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 402, - "comment": "edge case for u1", - "flags": [ + "tcId" : 402, + "comment" : "edge case for u1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc0220641c9c5d790dc09cdd3dfabb62cdf453e69747a7e3d7aa1a714189ef53171a99", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc0220641c9c5d790dc09cdd3dfabb62cdf453e69747a7e3d7aa1a714189ef53171a99", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04195b51a7cc4a21b8274a70a90de779814c3c8ca358328208c09a29f336b82d6ab2416b7c92fffdc29c3b1282dd2a77a4d04df7f7452047393d849989c5cee9ad", - "wx": "195b51a7cc4a21b8274a70a90de779814c3c8ca358328208c09a29f336b82d6a", - "wy": "00b2416b7c92fffdc29c3b1282dd2a77a4d04df7f7452047393d849989c5cee9ad" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04195b51a7cc4a21b8274a70a90de779814c3c8ca358328208c09a29f336b82d6ab2416b7c92fffdc29c3b1282dd2a77a4d04df7f7452047393d849989c5cee9ad", + "wx" : "195b51a7cc4a21b8274a70a90de779814c3c8ca358328208c09a29f336b82d6a", + "wy" : "00b2416b7c92fffdc29c3b1282dd2a77a4d04df7f7452047393d849989c5cee9ad" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004195b51a7cc4a21b8274a70a90de779814c3c8ca358328208c09a29f336b82d6ab2416b7c92fffdc29c3b1282dd2a77a4d04df7f7452047393d849989c5cee9ad", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEGVtRp8xKIbgnSnCpDed5gUw8jKNYMoII\nwJop8za4LWqyQWt8kv/9wpw7EoLdKnek0E3390UgRzk9hJmJxc7prQ==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004195b51a7cc4a21b8274a70a90de779814c3c8ca358328208c09a29f336b82d6ab2416b7c92fffdc29c3b1282dd2a77a4d04df7f7452047393d849989c5cee9ad", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEGVtRp8xKIbgnSnCpDed5gUw8jKNYMoII\nwJop8za4LWqyQWt8kv/9wpw7EoLdKnek0E3390UgRzk9hJmJxc7prQ==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 403, - "comment": "edge case for u1", - "flags": [ + "tcId" : 403, + "comment" : "edge case for u1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022029798c5c45bdf58b4a7b2fdc2c46ab4af1218c7eeb9f0f27a88f1267674de3b0", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022029798c5c45bdf58b4a7b2fdc2c46ab4af1218c7eeb9f0f27a88f1267674de3b0", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04622fc74732034bec2ddf3bc16d34b3d1f7a327dd2a8c19bab4bb4fe3a24b58aa736b2f2fae76f4dfaecc9096333b01328d51eb3fda9c9227e90d0b449983c4f0", - "wx": "622fc74732034bec2ddf3bc16d34b3d1f7a327dd2a8c19bab4bb4fe3a24b58aa", - "wy": "736b2f2fae76f4dfaecc9096333b01328d51eb3fda9c9227e90d0b449983c4f0" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04622fc74732034bec2ddf3bc16d34b3d1f7a327dd2a8c19bab4bb4fe3a24b58aa736b2f2fae76f4dfaecc9096333b01328d51eb3fda9c9227e90d0b449983c4f0", + "wx" : "622fc74732034bec2ddf3bc16d34b3d1f7a327dd2a8c19bab4bb4fe3a24b58aa", + "wy" : "736b2f2fae76f4dfaecc9096333b01328d51eb3fda9c9227e90d0b449983c4f0" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004622fc74732034bec2ddf3bc16d34b3d1f7a327dd2a8c19bab4bb4fe3a24b58aa736b2f2fae76f4dfaecc9096333b01328d51eb3fda9c9227e90d0b449983c4f0", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEYi/HRzIDS+wt3zvBbTSz0fejJ90qjBm6\ntLtP46JLWKpzay8vrnb0367MkJYzOwEyjVHrP9qckifpDQtEmYPE8A==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004622fc74732034bec2ddf3bc16d34b3d1f7a327dd2a8c19bab4bb4fe3a24b58aa736b2f2fae76f4dfaecc9096333b01328d51eb3fda9c9227e90d0b449983c4f0", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEYi/HRzIDS+wt3zvBbTSz0fejJ90qjBm6\ntLtP46JLWKpzay8vrnb0367MkJYzOwEyjVHrP9qckifpDQtEmYPE8A==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 404, - "comment": "edge case for u1", - "flags": [ + "tcId" : 404, + "comment" : "edge case for u1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02200b70f22ca2bb3cefadca1a5711fa3a59f4695385eb5aedf3495d0b6d00f8fd85", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02200b70f22ca2bb3cefadca1a5711fa3a59f4695385eb5aedf3495d0b6d00f8fd85", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "041f7f85caf2d7550e7af9b65023ebb4dce3450311692309db269969b834b611c70827f45b78020ecbbaf484fdd5bfaae6870f1184c21581baf6ef82bd7b530f93", - "wx": "1f7f85caf2d7550e7af9b65023ebb4dce3450311692309db269969b834b611c7", - "wy": "0827f45b78020ecbbaf484fdd5bfaae6870f1184c21581baf6ef82bd7b530f93" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "041f7f85caf2d7550e7af9b65023ebb4dce3450311692309db269969b834b611c70827f45b78020ecbbaf484fdd5bfaae6870f1184c21581baf6ef82bd7b530f93", + "wx" : "1f7f85caf2d7550e7af9b65023ebb4dce3450311692309db269969b834b611c7", + "wy" : "0827f45b78020ecbbaf484fdd5bfaae6870f1184c21581baf6ef82bd7b530f93" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200041f7f85caf2d7550e7af9b65023ebb4dce3450311692309db269969b834b611c70827f45b78020ecbbaf484fdd5bfaae6870f1184c21581baf6ef82bd7b530f93", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEH3+FyvLXVQ56+bZQI+u03ONFAxFpIwnb\nJplpuDS2EccIJ/RbeAIOy7r0hP3Vv6rmhw8RhMIVgbr274K9e1MPkw==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200041f7f85caf2d7550e7af9b65023ebb4dce3450311692309db269969b834b611c70827f45b78020ecbbaf484fdd5bfaae6870f1184c21581baf6ef82bd7b530f93", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEH3+FyvLXVQ56+bZQI+u03ONFAxFpIwnb\nJplpuDS2EccIJ/RbeAIOy7r0hP3Vv6rmhw8RhMIVgbr274K9e1MPkw==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 405, - "comment": "edge case for u1", - "flags": [ + "tcId" : 405, + "comment" : "edge case for u1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022016e1e459457679df5b9434ae23f474b3e8d2a70bd6b5dbe692ba16da01f1fb0a", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022016e1e459457679df5b9434ae23f474b3e8d2a70bd6b5dbe692ba16da01f1fb0a", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "0449c197dc80ad1da47a4342b93893e8e1fb0bb94fc33a83e783c00b24c781377aefc20da92bac762951f72474becc734d4cc22ba81b895e282fdac4df7af0f37d", - "wx": "49c197dc80ad1da47a4342b93893e8e1fb0bb94fc33a83e783c00b24c781377a", - "wy": "00efc20da92bac762951f72474becc734d4cc22ba81b895e282fdac4df7af0f37d" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "0449c197dc80ad1da47a4342b93893e8e1fb0bb94fc33a83e783c00b24c781377aefc20da92bac762951f72474becc734d4cc22ba81b895e282fdac4df7af0f37d", + "wx" : "49c197dc80ad1da47a4342b93893e8e1fb0bb94fc33a83e783c00b24c781377a", + "wy" : "00efc20da92bac762951f72474becc734d4cc22ba81b895e282fdac4df7af0f37d" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000449c197dc80ad1da47a4342b93893e8e1fb0bb94fc33a83e783c00b24c781377aefc20da92bac762951f72474becc734d4cc22ba81b895e282fdac4df7af0f37d", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEScGX3ICtHaR6Q0K5OJPo4fsLuU/DOoPn\ng8ALJMeBN3rvwg2pK6x2KVH3JHS+zHNNTMIrqBuJXigv2sTfevDzfQ==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000449c197dc80ad1da47a4342b93893e8e1fb0bb94fc33a83e783c00b24c781377aefc20da92bac762951f72474becc734d4cc22ba81b895e282fdac4df7af0f37d", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEScGX3ICtHaR6Q0K5OJPo4fsLuU/DOoPn\ng8ALJMeBN3rvwg2pK6x2KVH3JHS+zHNNTMIrqBuJXigv2sTfevDzfQ==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 406, - "comment": "edge case for u1", - "flags": [ + "tcId" : 406, + "comment" : "edge case for u1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02202252d685e831b6cf095e4f0535eeaf0ddd3bfa91c210c9d9dc17224702eaf88f", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02202252d685e831b6cf095e4f0535eeaf0ddd3bfa91c210c9d9dc17224702eaf88f", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04d8cb68517b616a56400aa3868635e54b6f699598a2f6167757654980baf6acbe7ec8cf449c849aa03461a30efada41453c57c6e6fbc93bbc6fa49ada6dc0555c", - "wx": "00d8cb68517b616a56400aa3868635e54b6f699598a2f6167757654980baf6acbe", - "wy": "7ec8cf449c849aa03461a30efada41453c57c6e6fbc93bbc6fa49ada6dc0555c" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04d8cb68517b616a56400aa3868635e54b6f699598a2f6167757654980baf6acbe7ec8cf449c849aa03461a30efada41453c57c6e6fbc93bbc6fa49ada6dc0555c", + "wx" : "00d8cb68517b616a56400aa3868635e54b6f699598a2f6167757654980baf6acbe", + "wy" : "7ec8cf449c849aa03461a30efada41453c57c6e6fbc93bbc6fa49ada6dc0555c" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004d8cb68517b616a56400aa3868635e54b6f699598a2f6167757654980baf6acbe7ec8cf449c849aa03461a30efada41453c57c6e6fbc93bbc6fa49ada6dc0555c", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE2MtoUXthalZACqOGhjXlS29plZii9hZ3\nV2VJgLr2rL5+yM9EnISaoDRhow762kFFPFfG5vvJO7xvpJrabcBVXA==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004d8cb68517b616a56400aa3868635e54b6f699598a2f6167757654980baf6acbe7ec8cf449c849aa03461a30efada41453c57c6e6fbc93bbc6fa49ada6dc0555c", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE2MtoUXthalZACqOGhjXlS29plZii9hZ3\nV2VJgLr2rL5+yM9EnISaoDRhow762kFFPFfG5vvJO7xvpJrabcBVXA==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 407, - "comment": "edge case for u1", - "flags": [ + "tcId" : 407, + "comment" : "edge case for u1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022075135abd7c425b60371a477f09ce0f274f64a8c6b061a07b5d63e93c65046c53", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022075135abd7c425b60371a477f09ce0f274f64a8c6b061a07b5d63e93c65046c53", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04030713fb63f2aa6fe2cadf1b20efc259c77445dafa87dac398b84065ca347df3b227818de1a39b589cb071d83e5317cccdc2338e51e312fe31d8dc34a4801750", - "wx": "030713fb63f2aa6fe2cadf1b20efc259c77445dafa87dac398b84065ca347df3", - "wy": "00b227818de1a39b589cb071d83e5317cccdc2338e51e312fe31d8dc34a4801750" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04030713fb63f2aa6fe2cadf1b20efc259c77445dafa87dac398b84065ca347df3b227818de1a39b589cb071d83e5317cccdc2338e51e312fe31d8dc34a4801750", + "wx" : "030713fb63f2aa6fe2cadf1b20efc259c77445dafa87dac398b84065ca347df3", + "wy" : "00b227818de1a39b589cb071d83e5317cccdc2338e51e312fe31d8dc34a4801750" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004030713fb63f2aa6fe2cadf1b20efc259c77445dafa87dac398b84065ca347df3b227818de1a39b589cb071d83e5317cccdc2338e51e312fe31d8dc34a4801750", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEAwcT+2Pyqm/iyt8bIO/CWcd0Rdr6h9rD\nmLhAZco0ffOyJ4GN4aObWJywcdg+UxfMzcIzjlHjEv4x2Nw0pIAXUA==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004030713fb63f2aa6fe2cadf1b20efc259c77445dafa87dac398b84065ca347df3b227818de1a39b589cb071d83e5317cccdc2338e51e312fe31d8dc34a4801750", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEAwcT+2Pyqm/iyt8bIO/CWcd0Rdr6h9rD\nmLhAZco0ffOyJ4GN4aObWJywcdg+UxfMzcIzjlHjEv4x2Nw0pIAXUA==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 408, - "comment": "edge case for u2", - "flags": [ + "tcId" : 408, + "comment" : "edge case for u2", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02202aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa3e3a49a23a6d8abe95461f8445676b17", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02202aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa3e3a49a23a6d8abe95461f8445676b17", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04babb3677b0955802d8e929a41355640eaf1ea1353f8a771331c4946e3480afa7252f196c87ed3d2a59d3b1b559137fed0013fecefc19fb5a92682b9bca51b950", - "wx": "00babb3677b0955802d8e929a41355640eaf1ea1353f8a771331c4946e3480afa7", - "wy": "252f196c87ed3d2a59d3b1b559137fed0013fecefc19fb5a92682b9bca51b950" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04babb3677b0955802d8e929a41355640eaf1ea1353f8a771331c4946e3480afa7252f196c87ed3d2a59d3b1b559137fed0013fecefc19fb5a92682b9bca51b950", + "wx" : "00babb3677b0955802d8e929a41355640eaf1ea1353f8a771331c4946e3480afa7", + "wy" : "252f196c87ed3d2a59d3b1b559137fed0013fecefc19fb5a92682b9bca51b950" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004babb3677b0955802d8e929a41355640eaf1ea1353f8a771331c4946e3480afa7252f196c87ed3d2a59d3b1b559137fed0013fecefc19fb5a92682b9bca51b950", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEurs2d7CVWALY6SmkE1VkDq8eoTU/incT\nMcSUbjSAr6clLxlsh+09KlnTsbVZE3/tABP+zvwZ+1qSaCubylG5UA==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004babb3677b0955802d8e929a41355640eaf1ea1353f8a771331c4946e3480afa7252f196c87ed3d2a59d3b1b559137fed0013fecefc19fb5a92682b9bca51b950", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEurs2d7CVWALY6SmkE1VkDq8eoTU/incT\nMcSUbjSAr6clLxlsh+09KlnTsbVZE3/tABP+zvwZ+1qSaCubylG5UA==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 409, - "comment": "edge case for u2", - "flags": [ + "tcId" : 409, + "comment" : "edge case for u2", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02203e888377ac6c71ac9dec3fdb9b56c9feaf0cfaca9f827fc5eb65fc3eac811210", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02203e888377ac6c71ac9dec3fdb9b56c9feaf0cfaca9f827fc5eb65fc3eac811210", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "041aab2018793471111a8a0e9b143fde02fc95920796d3a63de329b424396fba60bbe4130705174792441b318d3aa31dfe8577821e9b446ec573d272e036c4ebe9", - "wx": "1aab2018793471111a8a0e9b143fde02fc95920796d3a63de329b424396fba60", - "wy": "00bbe4130705174792441b318d3aa31dfe8577821e9b446ec573d272e036c4ebe9" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "041aab2018793471111a8a0e9b143fde02fc95920796d3a63de329b424396fba60bbe4130705174792441b318d3aa31dfe8577821e9b446ec573d272e036c4ebe9", + "wx" : "1aab2018793471111a8a0e9b143fde02fc95920796d3a63de329b424396fba60", + "wy" : "00bbe4130705174792441b318d3aa31dfe8577821e9b446ec573d272e036c4ebe9" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200041aab2018793471111a8a0e9b143fde02fc95920796d3a63de329b424396fba60bbe4130705174792441b318d3aa31dfe8577821e9b446ec573d272e036c4ebe9", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEGqsgGHk0cREaig6bFD/eAvyVkgeW06Y9\n4ym0JDlvumC75BMHBRdHkkQbMY06ox3+hXeCHptEbsVz0nLgNsTr6Q==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200041aab2018793471111a8a0e9b143fde02fc95920796d3a63de329b424396fba60bbe4130705174792441b318d3aa31dfe8577821e9b446ec573d272e036c4ebe9", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEGqsgGHk0cREaig6bFD/eAvyVkgeW06Y9\n4ym0JDlvumC75BMHBRdHkkQbMY06ox3+hXeCHptEbsVz0nLgNsTr6Q==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 410, - "comment": "edge case for u2", - "flags": [ + "tcId" : 410, + "comment" : "edge case for u2", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022030bbb794db588363b40679f6c182a50d3ce9679acdd3ffbe36d7813dacbdc818", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022030bbb794db588363b40679f6c182a50d3ce9679acdd3ffbe36d7813dacbdc818", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "048cb0b909499c83ea806cd885b1dd467a0119f06a88a0276eb0cfda274535a8ff47b5428833bc3f2c8bf9d9041158cf33718a69961cd01729bc0011d1e586ab75", - "wx": "008cb0b909499c83ea806cd885b1dd467a0119f06a88a0276eb0cfda274535a8ff", - "wy": "47b5428833bc3f2c8bf9d9041158cf33718a69961cd01729bc0011d1e586ab75" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "048cb0b909499c83ea806cd885b1dd467a0119f06a88a0276eb0cfda274535a8ff47b5428833bc3f2c8bf9d9041158cf33718a69961cd01729bc0011d1e586ab75", + "wx" : "008cb0b909499c83ea806cd885b1dd467a0119f06a88a0276eb0cfda274535a8ff", + "wy" : "47b5428833bc3f2c8bf9d9041158cf33718a69961cd01729bc0011d1e586ab75" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200048cb0b909499c83ea806cd885b1dd467a0119f06a88a0276eb0cfda274535a8ff47b5428833bc3f2c8bf9d9041158cf33718a69961cd01729bc0011d1e586ab75", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEjLC5CUmcg+qAbNiFsd1GegEZ8GqIoCdu\nsM/aJ0U1qP9HtUKIM7w/LIv52QQRWM8zcYpplhzQFym8ABHR5YardQ==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200048cb0b909499c83ea806cd885b1dd467a0119f06a88a0276eb0cfda274535a8ff47b5428833bc3f2c8bf9d9041158cf33718a69961cd01729bc0011d1e586ab75", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEjLC5CUmcg+qAbNiFsd1GegEZ8GqIoCdu\nsM/aJ0U1qP9HtUKIM7w/LIv52QQRWM8zcYpplhzQFym8ABHR5YardQ==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 411, - "comment": "edge case for u2", - "flags": [ + "tcId" : 411, + "comment" : "edge case for u2", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02202c37fd995622c4fb7fffffffffffffffc7cee745110cb45ab558ed7c90c15a2f", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02202c37fd995622c4fb7fffffffffffffffc7cee745110cb45ab558ed7c90c15a2f", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "048f03cf1a42272bb1532723093f72e6feeac85e1700e9fbe9a6a2dd642d74bf5d3b89a7189dad8cf75fc22f6f158aa27f9c2ca00daca785be3358f2bda3862ca0", - "wx": "008f03cf1a42272bb1532723093f72e6feeac85e1700e9fbe9a6a2dd642d74bf5d", - "wy": "3b89a7189dad8cf75fc22f6f158aa27f9c2ca00daca785be3358f2bda3862ca0" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "048f03cf1a42272bb1532723093f72e6feeac85e1700e9fbe9a6a2dd642d74bf5d3b89a7189dad8cf75fc22f6f158aa27f9c2ca00daca785be3358f2bda3862ca0", + "wx" : "008f03cf1a42272bb1532723093f72e6feeac85e1700e9fbe9a6a2dd642d74bf5d", + "wy" : "3b89a7189dad8cf75fc22f6f158aa27f9c2ca00daca785be3358f2bda3862ca0" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200048f03cf1a42272bb1532723093f72e6feeac85e1700e9fbe9a6a2dd642d74bf5d3b89a7189dad8cf75fc22f6f158aa27f9c2ca00daca785be3358f2bda3862ca0", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEjwPPGkInK7FTJyMJP3Lm/urIXhcA6fvp\npqLdZC10v107iacYna2M91/CL28ViqJ/nCygDaynhb4zWPK9o4YsoA==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200048f03cf1a42272bb1532723093f72e6feeac85e1700e9fbe9a6a2dd642d74bf5d3b89a7189dad8cf75fc22f6f158aa27f9c2ca00daca785be3358f2bda3862ca0", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEjwPPGkInK7FTJyMJP3Lm/urIXhcA6fvp\npqLdZC10v107iacYna2M91/CL28ViqJ/nCygDaynhb4zWPK9o4YsoA==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 412, - "comment": "edge case for u2", - "flags": [ + "tcId" : 412, + "comment" : "edge case for u2", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02207fd995622c4fb7ffffffffffffffffff5d883ffab5b32652ccdcaa290fccb97d", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02207fd995622c4fb7ffffffffffffffffff5d883ffab5b32652ccdcaa290fccb97d", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "0444de3b9c7a57a8c9e820952753421e7d987bb3d79f71f013805c897e018f8acea2460758c8f98d3fdce121a943659e372c326fff2e5fc2ae7fa3f79daae13c12", - "wx": "44de3b9c7a57a8c9e820952753421e7d987bb3d79f71f013805c897e018f8ace", - "wy": "00a2460758c8f98d3fdce121a943659e372c326fff2e5fc2ae7fa3f79daae13c12" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "0444de3b9c7a57a8c9e820952753421e7d987bb3d79f71f013805c897e018f8acea2460758c8f98d3fdce121a943659e372c326fff2e5fc2ae7fa3f79daae13c12", + "wx" : "44de3b9c7a57a8c9e820952753421e7d987bb3d79f71f013805c897e018f8ace", + "wy" : "00a2460758c8f98d3fdce121a943659e372c326fff2e5fc2ae7fa3f79daae13c12" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000444de3b9c7a57a8c9e820952753421e7d987bb3d79f71f013805c897e018f8acea2460758c8f98d3fdce121a943659e372c326fff2e5fc2ae7fa3f79daae13c12", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAERN47nHpXqMnoIJUnU0IefZh7s9efcfAT\ngFyJfgGPis6iRgdYyPmNP9zhIalDZZ43LDJv/y5fwq5/o/edquE8Eg==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000444de3b9c7a57a8c9e820952753421e7d987bb3d79f71f013805c897e018f8acea2460758c8f98d3fdce121a943659e372c326fff2e5fc2ae7fa3f79daae13c12", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAERN47nHpXqMnoIJUnU0IefZh7s9efcfAT\ngFyJfgGPis6iRgdYyPmNP9zhIalDZZ43LDJv/y5fwq5/o/edquE8Eg==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 413, - "comment": "edge case for u2", - "flags": [ + "tcId" : 413, + "comment" : "edge case for u2", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304302207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc021f4cd53ba7608fffffffffffffffffffff9e5cf143e2539626190a3ab09cce47", - "result": "valid" + "msg" : "313233343030", + "sig" : "304302207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc021f4cd53ba7608fffffffffffffffffffff9e5cf143e2539626190a3ab09cce47", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "046fb8b2b48e33031268ad6a517484dc8839ea90f6669ea0c7ac3233e2ac31394a0ac8bbe7f73c2ff4df9978727ac1dfc2fd58647d20f31f99105316b64671f204", - "wx": "6fb8b2b48e33031268ad6a517484dc8839ea90f6669ea0c7ac3233e2ac31394a", - "wy": "0ac8bbe7f73c2ff4df9978727ac1dfc2fd58647d20f31f99105316b64671f204" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "046fb8b2b48e33031268ad6a517484dc8839ea90f6669ea0c7ac3233e2ac31394a0ac8bbe7f73c2ff4df9978727ac1dfc2fd58647d20f31f99105316b64671f204", + "wx" : "6fb8b2b48e33031268ad6a517484dc8839ea90f6669ea0c7ac3233e2ac31394a", + "wy" : "0ac8bbe7f73c2ff4df9978727ac1dfc2fd58647d20f31f99105316b64671f204" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200046fb8b2b48e33031268ad6a517484dc8839ea90f6669ea0c7ac3233e2ac31394a0ac8bbe7f73c2ff4df9978727ac1dfc2fd58647d20f31f99105316b64671f204", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEb7iytI4zAxJorWpRdITciDnqkPZmnqDH\nrDIz4qwxOUoKyLvn9zwv9N+ZeHJ6wd/C/VhkfSDzH5kQUxa2RnHyBA==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200046fb8b2b48e33031268ad6a517484dc8839ea90f6669ea0c7ac3233e2ac31394a0ac8bbe7f73c2ff4df9978727ac1dfc2fd58647d20f31f99105316b64671f204", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEb7iytI4zAxJorWpRdITciDnqkPZmnqDH\nrDIz4qwxOUoKyLvn9zwv9N+ZeHJ6wd/C/VhkfSDzH5kQUxa2RnHyBA==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 414, - "comment": "edge case for u2", - "flags": [ + "tcId" : 414, + "comment" : "edge case for u2", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02205622c4fb7fffffffffffffffffffffff928a8f1c7ac7bec1808b9f61c01ec327", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02205622c4fb7fffffffffffffffffffffff928a8f1c7ac7bec1808b9f61c01ec327", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04bea71122a048693e905ff602b3cf9dd18af69b9fc9d8431d2b1dd26b942c95e6f43c7b8b95eb62082c12db9dbda7fe38e45cbe4a4886907fb81bdb0c5ea9246c", - "wx": "00bea71122a048693e905ff602b3cf9dd18af69b9fc9d8431d2b1dd26b942c95e6", - "wy": "00f43c7b8b95eb62082c12db9dbda7fe38e45cbe4a4886907fb81bdb0c5ea9246c" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04bea71122a048693e905ff602b3cf9dd18af69b9fc9d8431d2b1dd26b942c95e6f43c7b8b95eb62082c12db9dbda7fe38e45cbe4a4886907fb81bdb0c5ea9246c", + "wx" : "00bea71122a048693e905ff602b3cf9dd18af69b9fc9d8431d2b1dd26b942c95e6", + "wy" : "00f43c7b8b95eb62082c12db9dbda7fe38e45cbe4a4886907fb81bdb0c5ea9246c" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004bea71122a048693e905ff602b3cf9dd18af69b9fc9d8431d2b1dd26b942c95e6f43c7b8b95eb62082c12db9dbda7fe38e45cbe4a4886907fb81bdb0c5ea9246c", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEvqcRIqBIaT6QX/YCs8+d0Yr2m5/J2EMd\nKx3Sa5Qsleb0PHuLletiCCwS2529p/445Fy+SkiGkH+4G9sMXqkkbA==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004bea71122a048693e905ff602b3cf9dd18af69b9fc9d8431d2b1dd26b942c95e6f43c7b8b95eb62082c12db9dbda7fe38e45cbe4a4886907fb81bdb0c5ea9246c", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEvqcRIqBIaT6QX/YCs8+d0Yr2m5/J2EMd\nKx3Sa5Qsleb0PHuLletiCCwS2529p/445Fy+SkiGkH+4G9sMXqkkbA==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 415, - "comment": "edge case for u2", - "flags": [ + "tcId" : 415, + "comment" : "edge case for u2", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022044104104104104104104104104104103b87853fd3b7d3f8e175125b4382f25ed", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc022044104104104104104104104104104103b87853fd3b7d3f8e175125b4382f25ed", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04da918c731ba06a20cb94ef33b778e981a404a305f1941fe33666b45b03353156e2bb2694f575b45183be78e5c9b5210bf3bf488fd4c8294516d89572ca4f5391", - "wx": "00da918c731ba06a20cb94ef33b778e981a404a305f1941fe33666b45b03353156", - "wy": "00e2bb2694f575b45183be78e5c9b5210bf3bf488fd4c8294516d89572ca4f5391" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04da918c731ba06a20cb94ef33b778e981a404a305f1941fe33666b45b03353156e2bb2694f575b45183be78e5c9b5210bf3bf488fd4c8294516d89572ca4f5391", + "wx" : "00da918c731ba06a20cb94ef33b778e981a404a305f1941fe33666b45b03353156", + "wy" : "00e2bb2694f575b45183be78e5c9b5210bf3bf488fd4c8294516d89572ca4f5391" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004da918c731ba06a20cb94ef33b778e981a404a305f1941fe33666b45b03353156e2bb2694f575b45183be78e5c9b5210bf3bf488fd4c8294516d89572ca4f5391", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE2pGMcxugaiDLlO8zt3jpgaQEowXxlB/j\nNma0WwM1MVbiuyaU9XW0UYO+eOXJtSEL879Ij9TIKUUW2JVyyk9TkQ==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004da918c731ba06a20cb94ef33b778e981a404a305f1941fe33666b45b03353156e2bb2694f575b45183be78e5c9b5210bf3bf488fd4c8294516d89572ca4f5391", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE2pGMcxugaiDLlO8zt3jpgaQEowXxlB/j\nNma0WwM1MVbiuyaU9XW0UYO+eOXJtSEL879Ij9TIKUUW2JVyyk9TkQ==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 416, - "comment": "edge case for u2", - "flags": [ + "tcId" : 416, + "comment" : "edge case for u2", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02202739ce739ce739ce739ce739ce739ce705560298d1f2f08dc419ac273a5b54d9", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02202739ce739ce739ce739ce739ce739ce705560298d1f2f08dc419ac273a5b54d9", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "043007e92c3937dade7964dfa35b0eff031f7eb02aed0a0314411106cdeb70fe3d5a7546fc0552997b20e3d6f413e75e2cb66e116322697114b79bac734bfc4dc5", - "wx": "3007e92c3937dade7964dfa35b0eff031f7eb02aed0a0314411106cdeb70fe3d", - "wy": "5a7546fc0552997b20e3d6f413e75e2cb66e116322697114b79bac734bfc4dc5" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "043007e92c3937dade7964dfa35b0eff031f7eb02aed0a0314411106cdeb70fe3d5a7546fc0552997b20e3d6f413e75e2cb66e116322697114b79bac734bfc4dc5", + "wx" : "3007e92c3937dade7964dfa35b0eff031f7eb02aed0a0314411106cdeb70fe3d", + "wy" : "5a7546fc0552997b20e3d6f413e75e2cb66e116322697114b79bac734bfc4dc5" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200043007e92c3937dade7964dfa35b0eff031f7eb02aed0a0314411106cdeb70fe3d5a7546fc0552997b20e3d6f413e75e2cb66e116322697114b79bac734bfc4dc5", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEMAfpLDk32t55ZN+jWw7/Ax9+sCrtCgMU\nQREGzetw/j1adUb8BVKZeyDj1vQT514stm4RYyJpcRS3m6xzS/xNxQ==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200043007e92c3937dade7964dfa35b0eff031f7eb02aed0a0314411106cdeb70fe3d5a7546fc0552997b20e3d6f413e75e2cb66e116322697114b79bac734bfc4dc5", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEMAfpLDk32t55ZN+jWw7/Ax9+sCrtCgMU\nQREGzetw/j1adUb8BVKZeyDj1vQT514stm4RYyJpcRS3m6xzS/xNxQ==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 417, - "comment": "edge case for u2", - "flags": [ + "tcId" : 417, + "comment" : "edge case for u2", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02204888888888888888888888888888888831c83ae82ebe0898776b4c69d11f88de", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02204888888888888888888888888888888831c83ae82ebe0898776b4c69d11f88de", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "0460e734ef5624d3cbf0ddd375011bd663d6d6aebc644eb599fdf98dbdcd18ce9bd2d90b3ac31f139af832cccf6ccbbb2c6ea11fa97370dc9906da474d7d8a7567", - "wx": "60e734ef5624d3cbf0ddd375011bd663d6d6aebc644eb599fdf98dbdcd18ce9b", - "wy": "00d2d90b3ac31f139af832cccf6ccbbb2c6ea11fa97370dc9906da474d7d8a7567" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "0460e734ef5624d3cbf0ddd375011bd663d6d6aebc644eb599fdf98dbdcd18ce9bd2d90b3ac31f139af832cccf6ccbbb2c6ea11fa97370dc9906da474d7d8a7567", + "wx" : "60e734ef5624d3cbf0ddd375011bd663d6d6aebc644eb599fdf98dbdcd18ce9b", + "wy" : "00d2d90b3ac31f139af832cccf6ccbbb2c6ea11fa97370dc9906da474d7d8a7567" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000460e734ef5624d3cbf0ddd375011bd663d6d6aebc644eb599fdf98dbdcd18ce9bd2d90b3ac31f139af832cccf6ccbbb2c6ea11fa97370dc9906da474d7d8a7567", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEYOc071Yk08vw3dN1ARvWY9bWrrxkTrWZ\n/fmNvc0YzpvS2Qs6wx8TmvgyzM9sy7ssbqEfqXNw3JkG2kdNfYp1Zw==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000460e734ef5624d3cbf0ddd375011bd663d6d6aebc644eb599fdf98dbdcd18ce9bd2d90b3ac31f139af832cccf6ccbbb2c6ea11fa97370dc9906da474d7d8a7567", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEYOc071Yk08vw3dN1ARvWY9bWrrxkTrWZ\n/fmNvc0YzpvS2Qs6wx8TmvgyzM9sy7ssbqEfqXNw3JkG2kdNfYp1Zw==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 418, - "comment": "edge case for u2", - "flags": [ + "tcId" : 418, + "comment" : "edge case for u2", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02206492492492492492492492492492492406dd3a19b8d5fb875235963c593bd2d3", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02206492492492492492492492492492492406dd3a19b8d5fb875235963c593bd2d3", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "0485a900e97858f693c0b7dfa261e380dad6ea046d1f65ddeeedd5f7d8af0ba33769744d15add4f6c0bc3b0da2aec93b34cb8c65f9340ddf74e7b0009eeeccce3c", - "wx": "0085a900e97858f693c0b7dfa261e380dad6ea046d1f65ddeeedd5f7d8af0ba337", - "wy": "69744d15add4f6c0bc3b0da2aec93b34cb8c65f9340ddf74e7b0009eeeccce3c" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "0485a900e97858f693c0b7dfa261e380dad6ea046d1f65ddeeedd5f7d8af0ba33769744d15add4f6c0bc3b0da2aec93b34cb8c65f9340ddf74e7b0009eeeccce3c", + "wx" : "0085a900e97858f693c0b7dfa261e380dad6ea046d1f65ddeeedd5f7d8af0ba337", + "wy" : "69744d15add4f6c0bc3b0da2aec93b34cb8c65f9340ddf74e7b0009eeeccce3c" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000485a900e97858f693c0b7dfa261e380dad6ea046d1f65ddeeedd5f7d8af0ba33769744d15add4f6c0bc3b0da2aec93b34cb8c65f9340ddf74e7b0009eeeccce3c", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEhakA6XhY9pPAt9+iYeOA2tbqBG0fZd3u\n7dX32K8LozdpdE0VrdT2wLw7DaKuyTs0y4xl+TQN33TnsACe7szOPA==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000485a900e97858f693c0b7dfa261e380dad6ea046d1f65ddeeedd5f7d8af0ba33769744d15add4f6c0bc3b0da2aec93b34cb8c65f9340ddf74e7b0009eeeccce3c", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEhakA6XhY9pPAt9+iYeOA2tbqBG0fZd3u\n7dX32K8LozdpdE0VrdT2wLw7DaKuyTs0y4xl+TQN33TnsACe7szOPA==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 419, - "comment": "edge case for u2", - "flags": [ + "tcId" : 419, + "comment" : "edge case for u2", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02206aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa3e3a49a23a6d8abe95461f8445676b15", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02206aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa3e3a49a23a6d8abe95461f8445676b15", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "0438066f75d88efc4c93de36f49e037b234cc18b1de5608750a62cab0345401046a3e84bed8cfcb819ef4d550444f2ce4b651766b69e2e2901f88836ff90034fed", - "wx": "38066f75d88efc4c93de36f49e037b234cc18b1de5608750a62cab0345401046", - "wy": "00a3e84bed8cfcb819ef4d550444f2ce4b651766b69e2e2901f88836ff90034fed" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "0438066f75d88efc4c93de36f49e037b234cc18b1de5608750a62cab0345401046a3e84bed8cfcb819ef4d550444f2ce4b651766b69e2e2901f88836ff90034fed", + "wx" : "38066f75d88efc4c93de36f49e037b234cc18b1de5608750a62cab0345401046", + "wy" : "00a3e84bed8cfcb819ef4d550444f2ce4b651766b69e2e2901f88836ff90034fed" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000438066f75d88efc4c93de36f49e037b234cc18b1de5608750a62cab0345401046a3e84bed8cfcb819ef4d550444f2ce4b651766b69e2e2901f88836ff90034fed", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEOAZvddiO/EyT3jb0ngN7I0zBix3lYIdQ\npiyrA0VAEEaj6EvtjPy4Ge9NVQRE8s5LZRdmtp4uKQH4iDb/kANP7Q==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000438066f75d88efc4c93de36f49e037b234cc18b1de5608750a62cab0345401046a3e84bed8cfcb819ef4d550444f2ce4b651766b69e2e2901f88836ff90034fed", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEOAZvddiO/EyT3jb0ngN7I0zBix3lYIdQ\npiyrA0VAEEaj6EvtjPy4Ge9NVQRE8s5LZRdmtp4uKQH4iDb/kANP7Q==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 420, - "comment": "edge case for u2", - "flags": [ + "tcId" : 420, + "comment" : "edge case for u2", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02202aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa3e3a49a23a6d8abe95461f8445676b17", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02202aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa3e3a49a23a6d8abe95461f8445676b17", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "0498f68177dc95c1b4cbfa5245488ca523a7d5629470d035d621a443c72f39aabfa33d29546fa1c648f2c7d5ccf70cf1ce4ab79b5db1ac059dbecd068dbdff1b89", - "wx": "0098f68177dc95c1b4cbfa5245488ca523a7d5629470d035d621a443c72f39aabf", - "wy": "00a33d29546fa1c648f2c7d5ccf70cf1ce4ab79b5db1ac059dbecd068dbdff1b89" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "0498f68177dc95c1b4cbfa5245488ca523a7d5629470d035d621a443c72f39aabfa33d29546fa1c648f2c7d5ccf70cf1ce4ab79b5db1ac059dbecd068dbdff1b89", + "wx" : "0098f68177dc95c1b4cbfa5245488ca523a7d5629470d035d621a443c72f39aabf", + "wy" : "00a33d29546fa1c648f2c7d5ccf70cf1ce4ab79b5db1ac059dbecd068dbdff1b89" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000498f68177dc95c1b4cbfa5245488ca523a7d5629470d035d621a443c72f39aabfa33d29546fa1c648f2c7d5ccf70cf1ce4ab79b5db1ac059dbecd068dbdff1b89", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEmPaBd9yVwbTL+lJFSIylI6fVYpRw0DXW\nIaRDxy85qr+jPSlUb6HGSPLH1cz3DPHOSrebXbGsBZ2+zQaNvf8biQ==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000498f68177dc95c1b4cbfa5245488ca523a7d5629470d035d621a443c72f39aabfa33d29546fa1c648f2c7d5ccf70cf1ce4ab79b5db1ac059dbecd068dbdff1b89", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEmPaBd9yVwbTL+lJFSIylI6fVYpRw0DXW\nIaRDxy85qr+jPSlUb6HGSPLH1cz3DPHOSrebXbGsBZ2+zQaNvf8biQ==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 421, - "comment": "edge case for u2", - "flags": [ + "tcId" : 421, + "comment" : "edge case for u2", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02203ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc02203ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "045c2bbfa23c9b9ad07f038aa89b4930bf267d9401e4255de9e8da0a5078ec8277e3e882a31d5e6a379e0793983ccded39b95c4353ab2ff01ea5369ba47b0c3191", - "wx": "5c2bbfa23c9b9ad07f038aa89b4930bf267d9401e4255de9e8da0a5078ec8277", - "wy": "00e3e882a31d5e6a379e0793983ccded39b95c4353ab2ff01ea5369ba47b0c3191" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "045c2bbfa23c9b9ad07f038aa89b4930bf267d9401e4255de9e8da0a5078ec8277e3e882a31d5e6a379e0793983ccded39b95c4353ab2ff01ea5369ba47b0c3191", + "wx" : "5c2bbfa23c9b9ad07f038aa89b4930bf267d9401e4255de9e8da0a5078ec8277", + "wy" : "00e3e882a31d5e6a379e0793983ccded39b95c4353ab2ff01ea5369ba47b0c3191" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200045c2bbfa23c9b9ad07f038aa89b4930bf267d9401e4255de9e8da0a5078ec8277e3e882a31d5e6a379e0793983ccded39b95c4353ab2ff01ea5369ba47b0c3191", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEXCu/ojybmtB/A4qom0kwvyZ9lAHkJV3p\n6NoKUHjsgnfj6IKjHV5qN54Hk5g8ze05uVxDU6sv8B6lNpukewwxkQ==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200045c2bbfa23c9b9ad07f038aa89b4930bf267d9401e4255de9e8da0a5078ec8277e3e882a31d5e6a379e0793983ccded39b95c4353ab2ff01ea5369ba47b0c3191", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEXCu/ojybmtB/A4qom0kwvyZ9lAHkJV3p\n6NoKUHjsgnfj6IKjHV5qN54Hk5g8ze05uVxDU6sv8B6lNpukewwxkQ==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 422, - "comment": "edge case for u2", - "flags": [ + "tcId" : 422, + "comment" : "edge case for u2", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc0220185ddbca6dac41b1da033cfb60c152869e74b3cd66e9ffdf1b6bc09ed65ee40c", - "result": "valid" + "msg" : "313233343030", + "sig" : "304402207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc0220185ddbca6dac41b1da033cfb60c152869e74b3cd66e9ffdf1b6bc09ed65ee40c", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "042ea7133432339c69d27f9b267281bd2ddd5f19d6338d400a05cd3647b157a3853547808298448edb5e701ade84cd5fb1ac9567ba5e8fb68a6b933ec4b5cc84cc", - "wx": "2ea7133432339c69d27f9b267281bd2ddd5f19d6338d400a05cd3647b157a385", - "wy": "3547808298448edb5e701ade84cd5fb1ac9567ba5e8fb68a6b933ec4b5cc84cc" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "042ea7133432339c69d27f9b267281bd2ddd5f19d6338d400a05cd3647b157a3853547808298448edb5e701ade84cd5fb1ac9567ba5e8fb68a6b933ec4b5cc84cc", + "wx" : "2ea7133432339c69d27f9b267281bd2ddd5f19d6338d400a05cd3647b157a385", + "wy" : "3547808298448edb5e701ade84cd5fb1ac9567ba5e8fb68a6b933ec4b5cc84cc" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200042ea7133432339c69d27f9b267281bd2ddd5f19d6338d400a05cd3647b157a3853547808298448edb5e701ade84cd5fb1ac9567ba5e8fb68a6b933ec4b5cc84cc", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAELqcTNDIznGnSf5smcoG9Ld1fGdYzjUAK\nBc02R7FXo4U1R4CCmESO215wGt6EzV+xrJVnul6Ptoprkz7EtcyEzA==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200042ea7133432339c69d27f9b267281bd2ddd5f19d6338d400a05cd3647b157a3853547808298448edb5e701ade84cd5fb1ac9567ba5e8fb68a6b933ec4b5cc84cc", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAELqcTNDIznGnSf5smcoG9Ld1fGdYzjUAK\nBc02R7FXo4U1R4CCmESO215wGt6EzV+xrJVnul6Ptoprkz7EtcyEzA==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 423, - "comment": "point duplication during verification", - "flags": [ + "tcId" : 423, + "comment" : "point duplication during verification", + "flags" : [ "PointDuplication" ], - "msg": "313233343030", - "sig": "3044022032b0d10d8d0e04bc8d4d064d270699e87cffc9b49c5c20730e1c26f6105ddcda022029ed3d67b3d505be95580d77d5b792b436881179b2b6b2e04c5fe592d38d82d9", - "result": "valid" + "msg" : "313233343030", + "sig" : "3044022032b0d10d8d0e04bc8d4d064d270699e87cffc9b49c5c20730e1c26f6105ddcda022029ed3d67b3d505be95580d77d5b792b436881179b2b6b2e04c5fe592d38d82d9", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "042ea7133432339c69d27f9b267281bd2ddd5f19d6338d400a05cd3647b157a385cab87f7d67bb7124a18fe5217b32a04e536a9845a1704975946cc13a4a337763", - "wx": "2ea7133432339c69d27f9b267281bd2ddd5f19d6338d400a05cd3647b157a385", - "wy": "00cab87f7d67bb7124a18fe5217b32a04e536a9845a1704975946cc13a4a337763" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "042ea7133432339c69d27f9b267281bd2ddd5f19d6338d400a05cd3647b157a385cab87f7d67bb7124a18fe5217b32a04e536a9845a1704975946cc13a4a337763", + "wx" : "2ea7133432339c69d27f9b267281bd2ddd5f19d6338d400a05cd3647b157a385", + "wy" : "00cab87f7d67bb7124a18fe5217b32a04e536a9845a1704975946cc13a4a337763" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200042ea7133432339c69d27f9b267281bd2ddd5f19d6338d400a05cd3647b157a385cab87f7d67bb7124a18fe5217b32a04e536a9845a1704975946cc13a4a337763", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAELqcTNDIznGnSf5smcoG9Ld1fGdYzjUAK\nBc02R7FXo4XKuH99Z7txJKGP5SF7MqBOU2qYRaFwSXWUbME6SjN3Yw==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200042ea7133432339c69d27f9b267281bd2ddd5f19d6338d400a05cd3647b157a385cab87f7d67bb7124a18fe5217b32a04e536a9845a1704975946cc13a4a337763", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAELqcTNDIznGnSf5smcoG9Ld1fGdYzjUAK\nBc02R7FXo4XKuH99Z7txJKGP5SF7MqBOU2qYRaFwSXWUbME6SjN3Yw==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 424, - "comment": "duplication bug", - "flags": [ + "tcId" : 424, + "comment" : "duplication bug", + "flags" : [ "PointDuplication" ], - "msg": "313233343030", - "sig": "3044022032b0d10d8d0e04bc8d4d064d270699e87cffc9b49c5c20730e1c26f6105ddcda022029ed3d67b3d505be95580d77d5b792b436881179b2b6b2e04c5fe592d38d82d9", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3044022032b0d10d8d0e04bc8d4d064d270699e87cffc9b49c5c20730e1c26f6105ddcda022029ed3d67b3d505be95580d77d5b792b436881179b2b6b2e04c5fe592d38d82d9", + "result" : "invalid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "048aa2c64fa9c6437563abfbcbd00b2048d48c18c152a2a6f49036de7647ebe82e1ce64387995c68a060fa3bc0399b05cc06eec7d598f75041a4917e692b7f51ff", - "wx": "008aa2c64fa9c6437563abfbcbd00b2048d48c18c152a2a6f49036de7647ebe82e", - "wy": "1ce64387995c68a060fa3bc0399b05cc06eec7d598f75041a4917e692b7f51ff" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "048aa2c64fa9c6437563abfbcbd00b2048d48c18c152a2a6f49036de7647ebe82e1ce64387995c68a060fa3bc0399b05cc06eec7d598f75041a4917e692b7f51ff", + "wx" : "008aa2c64fa9c6437563abfbcbd00b2048d48c18c152a2a6f49036de7647ebe82e", + "wy" : "1ce64387995c68a060fa3bc0399b05cc06eec7d598f75041a4917e692b7f51ff" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200048aa2c64fa9c6437563abfbcbd00b2048d48c18c152a2a6f49036de7647ebe82e1ce64387995c68a060fa3bc0399b05cc06eec7d598f75041a4917e692b7f51ff", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEiqLGT6nGQ3Vjq/vL0AsgSNSMGMFSoqb0\nkDbedkfr6C4c5kOHmVxooGD6O8A5mwXMBu7H1Zj3UEGkkX5pK39R/w==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200048aa2c64fa9c6437563abfbcbd00b2048d48c18c152a2a6f49036de7647ebe82e1ce64387995c68a060fa3bc0399b05cc06eec7d598f75041a4917e692b7f51ff", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEiqLGT6nGQ3Vjq/vL0AsgSNSMGMFSoqb0\nkDbedkfr6C4c5kOHmVxooGD6O8A5mwXMBu7H1Zj3UEGkkX5pK39R/w==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 425, - "comment": "comparison with point at infinity ", - "flags": [ + "tcId" : 425, + "comment" : "comparison with point at infinity ", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c0022033333333333333333333333333333332f222f8faefdb533f265d461c29a47373", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3044022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c0022033333333333333333333333333333332f222f8faefdb533f265d461c29a47373", + "result" : "invalid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04391427ff7ee78013c14aec7d96a8a062209298a783835e94fd6549d502fff71fdd6624ec343ad9fcf4d9872181e59f842f9ba4cccae09a6c0972fb6ac6b4c6bd", - "wx": "391427ff7ee78013c14aec7d96a8a062209298a783835e94fd6549d502fff71f", - "wy": "00dd6624ec343ad9fcf4d9872181e59f842f9ba4cccae09a6c0972fb6ac6b4c6bd" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04391427ff7ee78013c14aec7d96a8a062209298a783835e94fd6549d502fff71fdd6624ec343ad9fcf4d9872181e59f842f9ba4cccae09a6c0972fb6ac6b4c6bd", + "wx" : "391427ff7ee78013c14aec7d96a8a062209298a783835e94fd6549d502fff71f", + "wy" : "00dd6624ec343ad9fcf4d9872181e59f842f9ba4cccae09a6c0972fb6ac6b4c6bd" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004391427ff7ee78013c14aec7d96a8a062209298a783835e94fd6549d502fff71fdd6624ec343ad9fcf4d9872181e59f842f9ba4cccae09a6c0972fb6ac6b4c6bd", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEORQn/37ngBPBSux9lqigYiCSmKeDg16U\n/WVJ1QL/9x/dZiTsNDrZ/PTZhyGB5Z+EL5ukzMrgmmwJcvtqxrTGvQ==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004391427ff7ee78013c14aec7d96a8a062209298a783835e94fd6549d502fff71fdd6624ec343ad9fcf4d9872181e59f842f9ba4cccae09a6c0972fb6ac6b4c6bd", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEORQn/37ngBPBSux9lqigYiCSmKeDg16U\n/WVJ1QL/9x/dZiTsNDrZ/PTZhyGB5Z+EL5ukzMrgmmwJcvtqxrTGvQ==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 426, - "comment": "extreme value for k and edgecase s", - "flags": [ + "tcId" : 426, + "comment" : "extreme value for k and edgecase s", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "3045022100c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c0", - "result": "valid" + "msg" : "313233343030", + "sig" : "3045022100c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c0", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04e762b8a219b4f180219cc7a9059245e4961bd191c03899789c7a34b89e8c138ec1533ef0419bb7376e0bfde9319d10a06968791d9ea0eed9c1ce6345aed9759e", - "wx": "00e762b8a219b4f180219cc7a9059245e4961bd191c03899789c7a34b89e8c138e", - "wy": "00c1533ef0419bb7376e0bfde9319d10a06968791d9ea0eed9c1ce6345aed9759e" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04e762b8a219b4f180219cc7a9059245e4961bd191c03899789c7a34b89e8c138ec1533ef0419bb7376e0bfde9319d10a06968791d9ea0eed9c1ce6345aed9759e", + "wx" : "00e762b8a219b4f180219cc7a9059245e4961bd191c03899789c7a34b89e8c138e", + "wy" : "00c1533ef0419bb7376e0bfde9319d10a06968791d9ea0eed9c1ce6345aed9759e" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004e762b8a219b4f180219cc7a9059245e4961bd191c03899789c7a34b89e8c138ec1533ef0419bb7376e0bfde9319d10a06968791d9ea0eed9c1ce6345aed9759e", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE52K4ohm08YAhnMepBZJF5JYb0ZHAOJl4\nnHo0uJ6ME47BUz7wQZu3N24L/ekxnRCgaWh5HZ6g7tnBzmNFrtl1ng==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004e762b8a219b4f180219cc7a9059245e4961bd191c03899789c7a34b89e8c138ec1533ef0419bb7376e0bfde9319d10a06968791d9ea0eed9c1ce6345aed9759e", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE52K4ohm08YAhnMepBZJF5JYb0ZHAOJl4\nnHo0uJ6ME47BUz7wQZu3N24L/ekxnRCgaWh5HZ6g7tnBzmNFrtl1ng==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 427, - "comment": "extreme value for k and s^-1", - "flags": [ + "tcId" : 427, + "comment" : "extreme value for k and s^-1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "3045022100c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5022049249249249249249249249249249248c79facd43214c011123c1b03a93412a5", - "result": "valid" + "msg" : "313233343030", + "sig" : "3045022100c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5022049249249249249249249249249249248c79facd43214c011123c1b03a93412a5", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "049aedb0d281db164e130000c5697fae0f305ef848be6fffb43ac593fbb950e952fa6f633359bdcd82b56b0b9f965b037789d46b9a8141b791b2aefa713f96c175", - "wx": "009aedb0d281db164e130000c5697fae0f305ef848be6fffb43ac593fbb950e952", - "wy": "00fa6f633359bdcd82b56b0b9f965b037789d46b9a8141b791b2aefa713f96c175" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "049aedb0d281db164e130000c5697fae0f305ef848be6fffb43ac593fbb950e952fa6f633359bdcd82b56b0b9f965b037789d46b9a8141b791b2aefa713f96c175", + "wx" : "009aedb0d281db164e130000c5697fae0f305ef848be6fffb43ac593fbb950e952", + "wy" : "00fa6f633359bdcd82b56b0b9f965b037789d46b9a8141b791b2aefa713f96c175" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200049aedb0d281db164e130000c5697fae0f305ef848be6fffb43ac593fbb950e952fa6f633359bdcd82b56b0b9f965b037789d46b9a8141b791b2aefa713f96c175", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEmu2w0oHbFk4TAADFaX+uDzBe+Ei+b/+0\nOsWT+7lQ6VL6b2MzWb3NgrVrC5+WWwN3idRrmoFBt5GyrvpxP5bBdQ==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200049aedb0d281db164e130000c5697fae0f305ef848be6fffb43ac593fbb950e952fa6f633359bdcd82b56b0b9f965b037789d46b9a8141b791b2aefa713f96c175", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEmu2w0oHbFk4TAADFaX+uDzBe+Ei+b/+0\nOsWT+7lQ6VL6b2MzWb3NgrVrC5+WWwN3idRrmoFBt5GyrvpxP5bBdQ==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 428, - "comment": "extreme value for k and s^-1", - "flags": [ + "tcId" : 428, + "comment" : "extreme value for k and s^-1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "3045022100c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5022066666666666666666666666666666665e445f1f5dfb6a67e4cba8c385348e6e7", - "result": "valid" + "msg" : "313233343030", + "sig" : "3045022100c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5022066666666666666666666666666666665e445f1f5dfb6a67e4cba8c385348e6e7", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "048ad445db62816260e4e687fd1884e48b9fc0636d031547d63315e792e19bfaee1de64f99d5f1cd8b6ec9cb0f787a654ae86993ba3db1008ef43cff0684cb22bd", - "wx": "008ad445db62816260e4e687fd1884e48b9fc0636d031547d63315e792e19bfaee", - "wy": "1de64f99d5f1cd8b6ec9cb0f787a654ae86993ba3db1008ef43cff0684cb22bd" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "048ad445db62816260e4e687fd1884e48b9fc0636d031547d63315e792e19bfaee1de64f99d5f1cd8b6ec9cb0f787a654ae86993ba3db1008ef43cff0684cb22bd", + "wx" : "008ad445db62816260e4e687fd1884e48b9fc0636d031547d63315e792e19bfaee", + "wy" : "1de64f99d5f1cd8b6ec9cb0f787a654ae86993ba3db1008ef43cff0684cb22bd" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200048ad445db62816260e4e687fd1884e48b9fc0636d031547d63315e792e19bfaee1de64f99d5f1cd8b6ec9cb0f787a654ae86993ba3db1008ef43cff0684cb22bd", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEitRF22KBYmDk5of9GITki5/AY20DFUfW\nMxXnkuGb+u4d5k+Z1fHNi27Jyw94emVK6GmTuj2xAI70PP8GhMsivQ==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200048ad445db62816260e4e687fd1884e48b9fc0636d031547d63315e792e19bfaee1de64f99d5f1cd8b6ec9cb0f787a654ae86993ba3db1008ef43cff0684cb22bd", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEitRF22KBYmDk5of9GITki5/AY20DFUfW\nMxXnkuGb+u4d5k+Z1fHNi27Jyw94emVK6GmTuj2xAI70PP8GhMsivQ==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 429, - "comment": "extreme value for k and s^-1", - "flags": [ + "tcId" : 429, + "comment" : "extreme value for k and s^-1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "3045022100c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5022066666666666666666666666666666665e445f1f5dfb6a67e4cba8c385348e6e7", - "result": "valid" + "msg" : "313233343030", + "sig" : "3045022100c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5022066666666666666666666666666666665e445f1f5dfb6a67e4cba8c385348e6e7", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "041f5799c95be89063b24f26e40cb928c1a868a76fb0094607e8043db409c91c32e75724e813a4191e3a839007f08e2e897388b06d4a00de6de60e536d91fab566", - "wx": "1f5799c95be89063b24f26e40cb928c1a868a76fb0094607e8043db409c91c32", - "wy": "00e75724e813a4191e3a839007f08e2e897388b06d4a00de6de60e536d91fab566" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "041f5799c95be89063b24f26e40cb928c1a868a76fb0094607e8043db409c91c32e75724e813a4191e3a839007f08e2e897388b06d4a00de6de60e536d91fab566", + "wx" : "1f5799c95be89063b24f26e40cb928c1a868a76fb0094607e8043db409c91c32", + "wy" : "00e75724e813a4191e3a839007f08e2e897388b06d4a00de6de60e536d91fab566" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200041f5799c95be89063b24f26e40cb928c1a868a76fb0094607e8043db409c91c32e75724e813a4191e3a839007f08e2e897388b06d4a00de6de60e536d91fab566", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEH1eZyVvokGOyTybkDLkowahop2+wCUYH\n6AQ9tAnJHDLnVyToE6QZHjqDkAfwji6Jc4iwbUoA3m3mDlNtkfq1Zg==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200041f5799c95be89063b24f26e40cb928c1a868a76fb0094607e8043db409c91c32e75724e813a4191e3a839007f08e2e897388b06d4a00de6de60e536d91fab566", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEH1eZyVvokGOyTybkDLkowahop2+wCUYH\n6AQ9tAnJHDLnVyToE6QZHjqDkAfwji6Jc4iwbUoA3m3mDlNtkfq1Zg==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 430, - "comment": "extreme value for k and s^-1", - "flags": [ + "tcId" : 430, + "comment" : "extreme value for k and s^-1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "3045022100c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5022049249249249249249249249249249248c79facd43214c011123c1b03a93412a5", - "result": "valid" + "msg" : "313233343030", + "sig" : "3045022100c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5022049249249249249249249249249249248c79facd43214c011123c1b03a93412a5", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04a3331a4e1b4223ec2c027edd482c928a14ed358d93f1d4217d39abf69fcb5ccc28d684d2aaabcd6383775caa6239de26d4c6937bb603ecb4196082f4cffd509d", - "wx": "00a3331a4e1b4223ec2c027edd482c928a14ed358d93f1d4217d39abf69fcb5ccc", - "wy": "28d684d2aaabcd6383775caa6239de26d4c6937bb603ecb4196082f4cffd509d" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04a3331a4e1b4223ec2c027edd482c928a14ed358d93f1d4217d39abf69fcb5ccc28d684d2aaabcd6383775caa6239de26d4c6937bb603ecb4196082f4cffd509d", + "wx" : "00a3331a4e1b4223ec2c027edd482c928a14ed358d93f1d4217d39abf69fcb5ccc", + "wy" : "28d684d2aaabcd6383775caa6239de26d4c6937bb603ecb4196082f4cffd509d" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004a3331a4e1b4223ec2c027edd482c928a14ed358d93f1d4217d39abf69fcb5ccc28d684d2aaabcd6383775caa6239de26d4c6937bb603ecb4196082f4cffd509d", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEozMaThtCI+wsAn7dSCySihTtNY2T8dQh\nfTmr9p/LXMwo1oTSqqvNY4N3XKpiOd4m1MaTe7YD7LQZYIL0z/1QnQ==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004a3331a4e1b4223ec2c027edd482c928a14ed358d93f1d4217d39abf69fcb5ccc28d684d2aaabcd6383775caa6239de26d4c6937bb603ecb4196082f4cffd509d", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEozMaThtCI+wsAn7dSCySihTtNY2T8dQh\nfTmr9p/LXMwo1oTSqqvNY4N3XKpiOd4m1MaTe7YD7LQZYIL0z/1QnQ==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 431, - "comment": "extreme value for k", - "flags": [ + "tcId" : 431, + "comment" : "extreme value for k", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "3045022100c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee502200eb10e5ab95f2f275348d82ad2e4d7949c8193800d8c9c75df58e343f0ebba7b", - "result": "valid" + "msg" : "313233343030", + "sig" : "3045022100c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee502200eb10e5ab95f2f275348d82ad2e4d7949c8193800d8c9c75df58e343f0ebba7b", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "043f3952199774c7cf39b38b66cb1042a6260d8680803845e4d433adba3bb248185ea495b68cbc7ed4173ee63c9042dc502625c7eb7e21fb02ca9a9114e0a3a18d", - "wx": "3f3952199774c7cf39b38b66cb1042a6260d8680803845e4d433adba3bb24818", - "wy": "5ea495b68cbc7ed4173ee63c9042dc502625c7eb7e21fb02ca9a9114e0a3a18d" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "043f3952199774c7cf39b38b66cb1042a6260d8680803845e4d433adba3bb248185ea495b68cbc7ed4173ee63c9042dc502625c7eb7e21fb02ca9a9114e0a3a18d", + "wx" : "3f3952199774c7cf39b38b66cb1042a6260d8680803845e4d433adba3bb24818", + "wy" : "5ea495b68cbc7ed4173ee63c9042dc502625c7eb7e21fb02ca9a9114e0a3a18d" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200043f3952199774c7cf39b38b66cb1042a6260d8680803845e4d433adba3bb248185ea495b68cbc7ed4173ee63c9042dc502625c7eb7e21fb02ca9a9114e0a3a18d", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEPzlSGZd0x885s4tmyxBCpiYNhoCAOEXk\n1DOtujuySBhepJW2jLx+1Bc+5jyQQtxQJiXH634h+wLKmpEU4KOhjQ==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200043f3952199774c7cf39b38b66cb1042a6260d8680803845e4d433adba3bb248185ea495b68cbc7ed4173ee63c9042dc502625c7eb7e21fb02ca9a9114e0a3a18d", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEPzlSGZd0x885s4tmyxBCpiYNhoCAOEXk\n1DOtujuySBhepJW2jLx+1Bc+5jyQQtxQJiXH634h+wLKmpEU4KOhjQ==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 432, - "comment": "extreme value for k and edgecase s", - "flags": [ + "tcId" : 432, + "comment" : "extreme value for k and edgecase s", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "3044022079be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c0", - "result": "valid" + "msg" : "313233343030", + "sig" : "3044022079be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798022055555555555555555555555555555554e8e4f44ce51835693ff0ca2ef01215c0", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04cdfb8c0f422e144e137c2412c86c171f5fe3fa3f5bbb544e9076288f3ced786e054fd0721b77c11c79beacb3c94211b0a19bda08652efeaf92513a3b0a163698", - "wx": "00cdfb8c0f422e144e137c2412c86c171f5fe3fa3f5bbb544e9076288f3ced786e", - "wy": "054fd0721b77c11c79beacb3c94211b0a19bda08652efeaf92513a3b0a163698" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04cdfb8c0f422e144e137c2412c86c171f5fe3fa3f5bbb544e9076288f3ced786e054fd0721b77c11c79beacb3c94211b0a19bda08652efeaf92513a3b0a163698", + "wx" : "00cdfb8c0f422e144e137c2412c86c171f5fe3fa3f5bbb544e9076288f3ced786e", + "wy" : "054fd0721b77c11c79beacb3c94211b0a19bda08652efeaf92513a3b0a163698" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004cdfb8c0f422e144e137c2412c86c171f5fe3fa3f5bbb544e9076288f3ced786e054fd0721b77c11c79beacb3c94211b0a19bda08652efeaf92513a3b0a163698", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEzfuMD0IuFE4TfCQSyGwXH1/j+j9bu1RO\nkHYojzzteG4FT9ByG3fBHHm+rLPJQhGwoZvaCGUu/q+SUTo7ChY2mA==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004cdfb8c0f422e144e137c2412c86c171f5fe3fa3f5bbb544e9076288f3ced786e054fd0721b77c11c79beacb3c94211b0a19bda08652efeaf92513a3b0a163698", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEzfuMD0IuFE4TfCQSyGwXH1/j+j9bu1RO\nkHYojzzteG4FT9ByG3fBHHm+rLPJQhGwoZvaCGUu/q+SUTo7ChY2mA==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 433, - "comment": "extreme value for k and s^-1", - "flags": [ + "tcId" : 433, + "comment" : "extreme value for k and s^-1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "3044022079be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798022049249249249249249249249249249248c79facd43214c011123c1b03a93412a5", - "result": "valid" + "msg" : "313233343030", + "sig" : "3044022079be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798022049249249249249249249249249249248c79facd43214c011123c1b03a93412a5", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "0473598a6a1c68278fa6bfd0ce4064e68235bc1c0f6b20a928108be336730f87e3cbae612519b5032ecc85aed811271a95fe7939d5d3460140ba318f4d14aba31d", - "wx": "73598a6a1c68278fa6bfd0ce4064e68235bc1c0f6b20a928108be336730f87e3", - "wy": "00cbae612519b5032ecc85aed811271a95fe7939d5d3460140ba318f4d14aba31d" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "0473598a6a1c68278fa6bfd0ce4064e68235bc1c0f6b20a928108be336730f87e3cbae612519b5032ecc85aed811271a95fe7939d5d3460140ba318f4d14aba31d", + "wx" : "73598a6a1c68278fa6bfd0ce4064e68235bc1c0f6b20a928108be336730f87e3", + "wy" : "00cbae612519b5032ecc85aed811271a95fe7939d5d3460140ba318f4d14aba31d" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000473598a6a1c68278fa6bfd0ce4064e68235bc1c0f6b20a928108be336730f87e3cbae612519b5032ecc85aed811271a95fe7939d5d3460140ba318f4d14aba31d", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEc1mKahxoJ4+mv9DOQGTmgjW8HA9rIKko\nEIvjNnMPh+PLrmElGbUDLsyFrtgRJxqV/nk51dNGAUC6MY9NFKujHQ==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000473598a6a1c68278fa6bfd0ce4064e68235bc1c0f6b20a928108be336730f87e3cbae612519b5032ecc85aed811271a95fe7939d5d3460140ba318f4d14aba31d", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEc1mKahxoJ4+mv9DOQGTmgjW8HA9rIKko\nEIvjNnMPh+PLrmElGbUDLsyFrtgRJxqV/nk51dNGAUC6MY9NFKujHQ==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 434, - "comment": "extreme value for k and s^-1", - "flags": [ + "tcId" : 434, + "comment" : "extreme value for k and s^-1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "3044022079be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798022066666666666666666666666666666665e445f1f5dfb6a67e4cba8c385348e6e7", - "result": "valid" + "msg" : "313233343030", + "sig" : "3044022079be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798022066666666666666666666666666666665e445f1f5dfb6a67e4cba8c385348e6e7", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "0458debd9a7ee2c9d59132478a5440ae4d5d7ed437308369f92ea86c82183f10a16773e76f5edbf4da0e4f1bdffac0f57257e1dfa465842931309a24245fda6a5d", - "wx": "58debd9a7ee2c9d59132478a5440ae4d5d7ed437308369f92ea86c82183f10a1", - "wy": "6773e76f5edbf4da0e4f1bdffac0f57257e1dfa465842931309a24245fda6a5d" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "0458debd9a7ee2c9d59132478a5440ae4d5d7ed437308369f92ea86c82183f10a16773e76f5edbf4da0e4f1bdffac0f57257e1dfa465842931309a24245fda6a5d", + "wx" : "58debd9a7ee2c9d59132478a5440ae4d5d7ed437308369f92ea86c82183f10a1", + "wy" : "6773e76f5edbf4da0e4f1bdffac0f57257e1dfa465842931309a24245fda6a5d" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000458debd9a7ee2c9d59132478a5440ae4d5d7ed437308369f92ea86c82183f10a16773e76f5edbf4da0e4f1bdffac0f57257e1dfa465842931309a24245fda6a5d", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEWN69mn7iydWRMkeKVECuTV1+1Dcwg2n5\nLqhsghg/EKFnc+dvXtv02g5PG9/6wPVyV+HfpGWEKTEwmiQkX9pqXQ==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000458debd9a7ee2c9d59132478a5440ae4d5d7ed437308369f92ea86c82183f10a16773e76f5edbf4da0e4f1bdffac0f57257e1dfa465842931309a24245fda6a5d", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEWN69mn7iydWRMkeKVECuTV1+1Dcwg2n5\nLqhsghg/EKFnc+dvXtv02g5PG9/6wPVyV+HfpGWEKTEwmiQkX9pqXQ==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 435, - "comment": "extreme value for k and s^-1", - "flags": [ + "tcId" : 435, + "comment" : "extreme value for k and s^-1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "3044022079be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798022066666666666666666666666666666665e445f1f5dfb6a67e4cba8c385348e6e7", - "result": "valid" + "msg" : "313233343030", + "sig" : "3044022079be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798022066666666666666666666666666666665e445f1f5dfb6a67e4cba8c385348e6e7", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "048b904de47967340c5f8c3572a720924ef7578637feab1949acb241a5a6ac3f5b950904496f9824b1d63f3313bae21b89fae89afdfc811b5ece03fd5aa301864f", - "wx": "008b904de47967340c5f8c3572a720924ef7578637feab1949acb241a5a6ac3f5b", - "wy": "00950904496f9824b1d63f3313bae21b89fae89afdfc811b5ece03fd5aa301864f" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "048b904de47967340c5f8c3572a720924ef7578637feab1949acb241a5a6ac3f5b950904496f9824b1d63f3313bae21b89fae89afdfc811b5ece03fd5aa301864f", + "wx" : "008b904de47967340c5f8c3572a720924ef7578637feab1949acb241a5a6ac3f5b", + "wy" : "00950904496f9824b1d63f3313bae21b89fae89afdfc811b5ece03fd5aa301864f" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200048b904de47967340c5f8c3572a720924ef7578637feab1949acb241a5a6ac3f5b950904496f9824b1d63f3313bae21b89fae89afdfc811b5ece03fd5aa301864f", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEi5BN5HlnNAxfjDVypyCSTvdXhjf+qxlJ\nrLJBpaasP1uVCQRJb5gksdY/MxO64huJ+uia/fyBG17OA/1aowGGTw==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200048b904de47967340c5f8c3572a720924ef7578637feab1949acb241a5a6ac3f5b950904496f9824b1d63f3313bae21b89fae89afdfc811b5ece03fd5aa301864f", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEi5BN5HlnNAxfjDVypyCSTvdXhjf+qxlJ\nrLJBpaasP1uVCQRJb5gksdY/MxO64huJ+uia/fyBG17OA/1aowGGTw==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 436, - "comment": "extreme value for k and s^-1", - "flags": [ + "tcId" : 436, + "comment" : "extreme value for k and s^-1", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "3044022079be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798022049249249249249249249249249249248c79facd43214c011123c1b03a93412a5", - "result": "valid" + "msg" : "313233343030", + "sig" : "3044022079be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798022049249249249249249249249249249248c79facd43214c011123c1b03a93412a5", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04f4892b6d525c771e035f2a252708f3784e48238604b4f94dc56eaa1e546d941a346b1aa0bce68b1c50e5b52f509fb5522e5c25e028bc8f863402edb7bcad8b1b", - "wx": "00f4892b6d525c771e035f2a252708f3784e48238604b4f94dc56eaa1e546d941a", - "wy": "346b1aa0bce68b1c50e5b52f509fb5522e5c25e028bc8f863402edb7bcad8b1b" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04f4892b6d525c771e035f2a252708f3784e48238604b4f94dc56eaa1e546d941a346b1aa0bce68b1c50e5b52f509fb5522e5c25e028bc8f863402edb7bcad8b1b", + "wx" : "00f4892b6d525c771e035f2a252708f3784e48238604b4f94dc56eaa1e546d941a", + "wy" : "346b1aa0bce68b1c50e5b52f509fb5522e5c25e028bc8f863402edb7bcad8b1b" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004f4892b6d525c771e035f2a252708f3784e48238604b4f94dc56eaa1e546d941a346b1aa0bce68b1c50e5b52f509fb5522e5c25e028bc8f863402edb7bcad8b1b", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE9IkrbVJcdx4DXyolJwjzeE5II4YEtPlN\nxW6qHlRtlBo0axqgvOaLHFDltS9Qn7VSLlwl4Ci8j4Y0Au23vK2LGw==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004f4892b6d525c771e035f2a252708f3784e48238604b4f94dc56eaa1e546d941a346b1aa0bce68b1c50e5b52f509fb5522e5c25e028bc8f863402edb7bcad8b1b", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE9IkrbVJcdx4DXyolJwjzeE5II4YEtPlN\nxW6qHlRtlBo0axqgvOaLHFDltS9Qn7VSLlwl4Ci8j4Y0Au23vK2LGw==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 437, - "comment": "extreme value for k", - "flags": [ + "tcId" : 437, + "comment" : "extreme value for k", + "flags" : [ "ArithmeticError" ], - "msg": "313233343030", - "sig": "3044022079be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f8179802200eb10e5ab95f2f275348d82ad2e4d7949c8193800d8c9c75df58e343f0ebba7b", - "result": "valid" + "msg" : "313233343030", + "sig" : "3044022079be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f8179802200eb10e5ab95f2f275348d82ad2e4d7949c8193800d8c9c75df58e343f0ebba7b", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "0479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8", - "wx": "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798", - "wy": "483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "0479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8", + "wx" : "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798", + "wy" : "483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEeb5mfvncu6xVoGKVzocLBwKb/NstzijZ\nWfKBWxb4F5hIOtp3JqPEZV2k+/wOEQio/Re0SKaFVBmcR9CP+xDUuA==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEeb5mfvncu6xVoGKVzocLBwKb/NstzijZ\nWfKBWxb4F5hIOtp3JqPEZV2k+/wOEQio/Re0SKaFVBmcR9CP+xDUuA==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 438, - "comment": "public key shares x-coordinate with generator", - "flags": [ + "tcId" : 438, + "comment" : "public key shares x-coordinate with generator", + "flags" : [ "PointDuplication" ], - "msg": "313233343030", - "sig": "3045022100bb5a52f42f9c9261ed4361f59422a1e30036e7c32b270c8807a419feca60502302202492492492492492492492492492492463cfd66a190a6008891e0d81d49a0952", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3045022100bb5a52f42f9c9261ed4361f59422a1e30036e7c32b270c8807a419feca60502302202492492492492492492492492492492463cfd66a190a6008891e0d81d49a0952", + "result" : "invalid" }, { - "tcId": 439, - "comment": "public key shares x-coordinate with generator", - "flags": [ + "tcId" : 439, + "comment" : "public key shares x-coordinate with generator", + "flags" : [ "PointDuplication" ], - "msg": "313233343030", - "sig": "3044022044a5ad0bd0636d9e12bc9e0a6bdd5e1bba77f523842193b3b82e448e05d5f11e02202492492492492492492492492492492463cfd66a190a6008891e0d81d49a0952", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3044022044a5ad0bd0636d9e12bc9e0a6bdd5e1bba77f523842193b3b82e448e05d5f11e02202492492492492492492492492492492463cfd66a190a6008891e0d81d49a0952", + "result" : "invalid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "0479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798b7c52588d95c3b9aa25b0403f1eef75702e84bb7597aabe663b82f6f04ef2777", - "wx": "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798", - "wy": "00b7c52588d95c3b9aa25b0403f1eef75702e84bb7597aabe663b82f6f04ef2777" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "0479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798b7c52588d95c3b9aa25b0403f1eef75702e84bb7597aabe663b82f6f04ef2777", + "wx" : "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798", + "wy" : "00b7c52588d95c3b9aa25b0403f1eef75702e84bb7597aabe663b82f6f04ef2777" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798b7c52588d95c3b9aa25b0403f1eef75702e84bb7597aabe663b82f6f04ef2777", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEeb5mfvncu6xVoGKVzocLBwKb/NstzijZ\nWfKBWxb4F5i3xSWI2Vw7mqJbBAPx7vdXAuhLt1l6q+ZjuC9vBO8ndw==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798b7c52588d95c3b9aa25b0403f1eef75702e84bb7597aabe663b82f6f04ef2777", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEeb5mfvncu6xVoGKVzocLBwKb/NstzijZ\nWfKBWxb4F5i3xSWI2Vw7mqJbBAPx7vdXAuhLt1l6q+ZjuC9vBO8ndw==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 440, - "comment": "public key shares x-coordinate with generator", - "flags": [ + "tcId" : 440, + "comment" : "public key shares x-coordinate with generator", + "flags" : [ "PointDuplication" ], - "msg": "313233343030", - "sig": "3045022100bb5a52f42f9c9261ed4361f59422a1e30036e7c32b270c8807a419feca60502302202492492492492492492492492492492463cfd66a190a6008891e0d81d49a0952", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3045022100bb5a52f42f9c9261ed4361f59422a1e30036e7c32b270c8807a419feca60502302202492492492492492492492492492492463cfd66a190a6008891e0d81d49a0952", + "result" : "invalid" }, { - "tcId": 441, - "comment": "public key shares x-coordinate with generator", - "flags": [ + "tcId" : 441, + "comment" : "public key shares x-coordinate with generator", + "flags" : [ "PointDuplication" ], - "msg": "313233343030", - "sig": "3044022044a5ad0bd0636d9e12bc9e0a6bdd5e1bba77f523842193b3b82e448e05d5f11e02202492492492492492492492492492492463cfd66a190a6008891e0d81d49a0952", - "result": "invalid" + "msg" : "313233343030", + "sig" : "3044022044a5ad0bd0636d9e12bc9e0a6bdd5e1bba77f523842193b3b82e448e05d5f11e02202492492492492492492492492492492463cfd66a190a6008891e0d81d49a0952", + "result" : "invalid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04782c8ed17e3b2a783b5464f33b09652a71c678e05ec51e84e2bcfc663a3de963af9acb4280b8c7f7c42f4ef9aba6245ec1ec1712fd38a0fa96418d8cd6aa6152", - "wx": "782c8ed17e3b2a783b5464f33b09652a71c678e05ec51e84e2bcfc663a3de963", - "wy": "00af9acb4280b8c7f7c42f4ef9aba6245ec1ec1712fd38a0fa96418d8cd6aa6152" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04782c8ed17e3b2a783b5464f33b09652a71c678e05ec51e84e2bcfc663a3de963af9acb4280b8c7f7c42f4ef9aba6245ec1ec1712fd38a0fa96418d8cd6aa6152", + "wx" : "782c8ed17e3b2a783b5464f33b09652a71c678e05ec51e84e2bcfc663a3de963", + "wy" : "00af9acb4280b8c7f7c42f4ef9aba6245ec1ec1712fd38a0fa96418d8cd6aa6152" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004782c8ed17e3b2a783b5464f33b09652a71c678e05ec51e84e2bcfc663a3de963af9acb4280b8c7f7c42f4ef9aba6245ec1ec1712fd38a0fa96418d8cd6aa6152", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEeCyO0X47Kng7VGTzOwllKnHGeOBexR6E\n4rz8Zjo96WOvmstCgLjH98QvTvmrpiRewewXEv04oPqWQY2M1qphUg==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004782c8ed17e3b2a783b5464f33b09652a71c678e05ec51e84e2bcfc663a3de963af9acb4280b8c7f7c42f4ef9aba6245ec1ec1712fd38a0fa96418d8cd6aa6152", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEeCyO0X47Kng7VGTzOwllKnHGeOBexR6E\n4rz8Zjo96WOvmstCgLjH98QvTvmrpiRewewXEv04oPqWQY2M1qphUg==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 442, - "comment": "pseudorandom signature", - "flags": [ + "tcId" : 442, + "comment" : "pseudorandom signature", + "flags" : [ "ValidSignature" ], - "msg": "", - "sig": "3045022100f80ae4f96cdbc9d853f83d47aae225bf407d51c56b7776cd67d0dc195d99a9dc02204cfc1d941e08cb9aceadde0f4ccead76b30d332fc442115d50e673e28686b70b", - "result": "valid" + "msg" : "", + "sig" : "3045022100f80ae4f96cdbc9d853f83d47aae225bf407d51c56b7776cd67d0dc195d99a9dc02204cfc1d941e08cb9aceadde0f4ccead76b30d332fc442115d50e673e28686b70b", + "result" : "valid" }, { - "tcId": 443, - "comment": "pseudorandom signature", - "flags": [ + "tcId" : 443, + "comment" : "pseudorandom signature", + "flags" : [ "ValidSignature" ], - "msg": "4d7367", - "sig": "30440220109cd8ae0374358984a8249c0a843628f2835ffad1df1a9a69aa2fe72355545c02205390ff250ac4274e1cb25cd6ca6491f6b91281e32f5b264d87977aed4a94e77b", - "result": "valid" + "msg" : "4d7367", + "sig" : "30440220109cd8ae0374358984a8249c0a843628f2835ffad1df1a9a69aa2fe72355545c02205390ff250ac4274e1cb25cd6ca6491f6b91281e32f5b264d87977aed4a94e77b", + "result" : "valid" }, { - "tcId": 444, - "comment": "pseudorandom signature", - "flags": [ + "tcId" : 444, + "comment" : "pseudorandom signature", + "flags" : [ "ValidSignature" ], - "msg": "313233343030", - "sig": "3045022100d035ee1f17fdb0b2681b163e33c359932659990af77dca632012b30b27a057b302201939d9f3b2858bc13e3474cb50e6a82be44faa71940f876c1cba4c3e989202b6", - "result": "valid" + "msg" : "313233343030", + "sig" : "3045022100d035ee1f17fdb0b2681b163e33c359932659990af77dca632012b30b27a057b302201939d9f3b2858bc13e3474cb50e6a82be44faa71940f876c1cba4c3e989202b6", + "result" : "valid" }, { - "tcId": 445, - "comment": "pseudorandom signature", - "flags": [ + "tcId" : 445, + "comment" : "pseudorandom signature", + "flags" : [ "ValidSignature" ], - "msg": "0000000000000000000000000000000000000000", - "sig": "304402204f053f563ad34b74fd8c9934ce59e79c2eb8e6eca0fef5b323ca67d5ac7ed23802204d4b05daa0719e773d8617dce5631c5fd6f59c9bdc748e4b55c970040af01be5", - "result": "valid" + "msg" : "0000000000000000000000000000000000000000", + "sig" : "304402204f053f563ad34b74fd8c9934ce59e79c2eb8e6eca0fef5b323ca67d5ac7ed23802204d4b05daa0719e773d8617dce5631c5fd6f59c9bdc748e4b55c970040af01be5", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "046e823555452914099182c6b2c1d6f0b5d28d50ccd005af2ce1bba541aa40caff00000001060492d5a5673e0f25d8d50fb7e58c49d86d46d4216955e0aa3d40e1", - "wx": "6e823555452914099182c6b2c1d6f0b5d28d50ccd005af2ce1bba541aa40caff", - "wy": "01060492d5a5673e0f25d8d50fb7e58c49d86d46d4216955e0aa3d40e1" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "046e823555452914099182c6b2c1d6f0b5d28d50ccd005af2ce1bba541aa40caff00000001060492d5a5673e0f25d8d50fb7e58c49d86d46d4216955e0aa3d40e1", + "wx" : "6e823555452914099182c6b2c1d6f0b5d28d50ccd005af2ce1bba541aa40caff", + "wy" : "01060492d5a5673e0f25d8d50fb7e58c49d86d46d4216955e0aa3d40e1" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200046e823555452914099182c6b2c1d6f0b5d28d50ccd005af2ce1bba541aa40caff00000001060492d5a5673e0f25d8d50fb7e58c49d86d46d4216955e0aa3d40e1", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEboI1VUUpFAmRgsaywdbwtdKNUMzQBa8s\n4bulQapAyv8AAAABBgSS1aVnPg8l2NUPt+WMSdhtRtQhaVXgqj1A4Q==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200046e823555452914099182c6b2c1d6f0b5d28d50ccd005af2ce1bba541aa40caff00000001060492d5a5673e0f25d8d50fb7e58c49d86d46d4216955e0aa3d40e1", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEboI1VUUpFAmRgsaywdbwtdKNUMzQBa8s\n4bulQapAyv8AAAABBgSS1aVnPg8l2NUPt+WMSdhtRtQhaVXgqj1A4Q==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 446, - "comment": "y-coordinate of the public key is small", - "flags": [ + "tcId" : 446, + "comment" : "y-coordinate of the public key is small", + "flags" : [ "EdgeCasePublicKey" ], - "msg": "4d657373616765", - "sig": "304402206d6a4f556ccce154e7fb9f19e76c3deca13d59cc2aeb4ecad968aab2ded45965022053b9fa74803ede0fc4441bf683d56c564d3e274e09ccf47390badd1471c05fb7", - "result": "valid" + "msg" : "4d657373616765", + "sig" : "304402206d6a4f556ccce154e7fb9f19e76c3deca13d59cc2aeb4ecad968aab2ded45965022053b9fa74803ede0fc4441bf683d56c564d3e274e09ccf47390badd1471c05fb7", + "result" : "valid" }, { - "tcId": 447, - "comment": "y-coordinate of the public key is small", - "flags": [ + "tcId" : 447, + "comment" : "y-coordinate of the public key is small", + "flags" : [ "EdgeCasePublicKey" ], - "msg": "4d657373616765", - "sig": "3044022100aad503de9b9fd66b948e9acf596f0a0e65e700b28b26ec56e6e45e846489b3c4021f0ddc3a2f89abb817bb85c062ce02f823c63fc26b269e0bc9b84d81a5aa123d", - "result": "valid" + "msg" : "4d657373616765", + "sig" : "3044022100aad503de9b9fd66b948e9acf596f0a0e65e700b28b26ec56e6e45e846489b3c4021f0ddc3a2f89abb817bb85c062ce02f823c63fc26b269e0bc9b84d81a5aa123d", + "result" : "valid" }, { - "tcId": 448, - "comment": "y-coordinate of the public key is small", - "flags": [ + "tcId" : 448, + "comment" : "y-coordinate of the public key is small", + "flags" : [ "EdgeCasePublicKey" ], - "msg": "4d657373616765", - "sig": "30450221009182cebd3bb8ab572e167174397209ef4b1d439af3b200cdf003620089e43225022054477c982ea019d2e1000497fc25fcee1bccae55f2ac27530ae53b29c4b356a4", - "result": "valid" + "msg" : "4d657373616765", + "sig" : "30450221009182cebd3bb8ab572e167174397209ef4b1d439af3b200cdf003620089e43225022054477c982ea019d2e1000497fc25fcee1bccae55f2ac27530ae53b29c4b356a4", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "046e823555452914099182c6b2c1d6f0b5d28d50ccd005af2ce1bba541aa40cafffffffffef9fb6d2a5a98c1f0da272af0481a73b62792b92bde96aa1e55c2bb4e", - "wx": "6e823555452914099182c6b2c1d6f0b5d28d50ccd005af2ce1bba541aa40caff", - "wy": "00fffffffef9fb6d2a5a98c1f0da272af0481a73b62792b92bde96aa1e55c2bb4e" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "046e823555452914099182c6b2c1d6f0b5d28d50ccd005af2ce1bba541aa40cafffffffffef9fb6d2a5a98c1f0da272af0481a73b62792b92bde96aa1e55c2bb4e", + "wx" : "6e823555452914099182c6b2c1d6f0b5d28d50ccd005af2ce1bba541aa40caff", + "wy" : "00fffffffef9fb6d2a5a98c1f0da272af0481a73b62792b92bde96aa1e55c2bb4e" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200046e823555452914099182c6b2c1d6f0b5d28d50ccd005af2ce1bba541aa40cafffffffffef9fb6d2a5a98c1f0da272af0481a73b62792b92bde96aa1e55c2bb4e", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEboI1VUUpFAmRgsaywdbwtdKNUMzQBa8s\n4bulQapAyv/////++fttKlqYwfDaJyrwSBpztieSuSvelqoeVcK7Tg==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200046e823555452914099182c6b2c1d6f0b5d28d50ccd005af2ce1bba541aa40cafffffffffef9fb6d2a5a98c1f0da272af0481a73b62792b92bde96aa1e55c2bb4e", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEboI1VUUpFAmRgsaywdbwtdKNUMzQBa8s\n4bulQapAyv/////++fttKlqYwfDaJyrwSBpztieSuSvelqoeVcK7Tg==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 449, - "comment": "y-coordinate of the public key is large", - "flags": [ + "tcId" : 449, + "comment" : "y-coordinate of the public key is large", + "flags" : [ "EdgeCasePublicKey" ], - "msg": "4d657373616765", - "sig": "304402203854a3998aebdf2dbc28adac4181462ccac7873907ab7f212c42db0e69b56ed802203ed3f6b8a388d02f3e4df9f2ae9c1bd2c3916a686460dffcd42909cd7f82058e", - "result": "valid" + "msg" : "4d657373616765", + "sig" : "304402203854a3998aebdf2dbc28adac4181462ccac7873907ab7f212c42db0e69b56ed802203ed3f6b8a388d02f3e4df9f2ae9c1bd2c3916a686460dffcd42909cd7f82058e", + "result" : "valid" }, { - "tcId": 450, - "comment": "y-coordinate of the public key is large", - "flags": [ + "tcId" : 450, + "comment" : "y-coordinate of the public key is large", + "flags" : [ "EdgeCasePublicKey" ], - "msg": "4d657373616765", - "sig": "3045022100e94dbdc38795fe5c904d8f16d969d3b587f0a25d2de90b6d8c5c53ff887e360702207a947369c164972521bb8af406813b2d9f94d2aeaa53d4c215aaa0a2578a2c5d", - "result": "valid" + "msg" : "4d657373616765", + "sig" : "3045022100e94dbdc38795fe5c904d8f16d969d3b587f0a25d2de90b6d8c5c53ff887e360702207a947369c164972521bb8af406813b2d9f94d2aeaa53d4c215aaa0a2578a2c5d", + "result" : "valid" }, { - "tcId": 451, - "comment": "y-coordinate of the public key is large", - "flags": [ + "tcId" : 451, + "comment" : "y-coordinate of the public key is large", + "flags" : [ "EdgeCasePublicKey" ], - "msg": "4d657373616765", - "sig": "3044022049fc102a08ca47b60e0858cd0284d22cddd7233f94aaffbb2db1dd2cf08425e102205b16fca5a12cdb39701697ad8e39ffd6bdec0024298afaa2326aea09200b14d6", - "result": "valid" + "msg" : "4d657373616765", + "sig" : "3044022049fc102a08ca47b60e0858cd0284d22cddd7233f94aaffbb2db1dd2cf08425e102205b16fca5a12cdb39701697ad8e39ffd6bdec0024298afaa2326aea09200b14d6", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04000000013fd22248d64d95f73c29b48ab48631850be503fd00f8468b5f0f70e0f6ee7aa43bc2c6fd25b1d8269241cbdd9dbb0dac96dc96231f430705f838717d", - "wx": "013fd22248d64d95f73c29b48ab48631850be503fd00f8468b5f0f70e0", - "wy": "00f6ee7aa43bc2c6fd25b1d8269241cbdd9dbb0dac96dc96231f430705f838717d" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04000000013fd22248d64d95f73c29b48ab48631850be503fd00f8468b5f0f70e0f6ee7aa43bc2c6fd25b1d8269241cbdd9dbb0dac96dc96231f430705f838717d", + "wx" : "013fd22248d64d95f73c29b48ab48631850be503fd00f8468b5f0f70e0", + "wy" : "00f6ee7aa43bc2c6fd25b1d8269241cbdd9dbb0dac96dc96231f430705f838717d" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004000000013fd22248d64d95f73c29b48ab48631850be503fd00f8468b5f0f70e0f6ee7aa43bc2c6fd25b1d8269241cbdd9dbb0dac96dc96231f430705f838717d", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEAAAAAT/SIkjWTZX3PCm0irSGMYUL5QP9\nAPhGi18PcOD27nqkO8LG/SWx2CaSQcvdnbsNrJbcliMfQwcF+DhxfQ==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004000000013fd22248d64d95f73c29b48ab48631850be503fd00f8468b5f0f70e0f6ee7aa43bc2c6fd25b1d8269241cbdd9dbb0dac96dc96231f430705f838717d", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEAAAAAT/SIkjWTZX3PCm0irSGMYUL5QP9\nAPhGi18PcOD27nqkO8LG/SWx2CaSQcvdnbsNrJbcliMfQwcF+DhxfQ==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 452, - "comment": "x-coordinate of the public key is small", - "flags": [ + "tcId" : 452, + "comment" : "x-coordinate of the public key is small", + "flags" : [ "EdgeCasePublicKey" ], - "msg": "4d657373616765", - "sig": "3044022041efa7d3f05a0010675fcb918a45c693da4b348df21a59d6f9cd73e0d831d67a02204454ada693e5e26b7bd693236d340f80545c834577b6f73d378c7bcc534244da", - "result": "valid" + "msg" : "4d657373616765", + "sig" : "3044022041efa7d3f05a0010675fcb918a45c693da4b348df21a59d6f9cd73e0d831d67a02204454ada693e5e26b7bd693236d340f80545c834577b6f73d378c7bcc534244da", + "result" : "valid" }, { - "tcId": 453, - "comment": "x-coordinate of the public key is small", - "flags": [ + "tcId" : 453, + "comment" : "x-coordinate of the public key is small", + "flags" : [ "EdgeCasePublicKey" ], - "msg": "4d657373616765", - "sig": "3045022100b615698c358b35920dd883eca625a6c5f7563970cdfc378f8fe0cee17092144c022025f47b326b5be1fb610b885153ea84d41eb4716be66a994e8779989df1c863d4", - "result": "valid" + "msg" : "4d657373616765", + "sig" : "3045022100b615698c358b35920dd883eca625a6c5f7563970cdfc378f8fe0cee17092144c022025f47b326b5be1fb610b885153ea84d41eb4716be66a994e8779989df1c863d4", + "result" : "valid" }, { - "tcId": 454, - "comment": "x-coordinate of the public key is small", - "flags": [ + "tcId" : 454, + "comment" : "x-coordinate of the public key is small", + "flags" : [ "EdgeCasePublicKey" ], - "msg": "4d657373616765", - "sig": "304502210087cf8c0eb82d44f69c60a2ff5457d3aaa322e7ec61ae5aecfd678ae1c1932b0e02203add3b115815047d6eb340a3e008989eaa0f8708d1794814729094d08d2460d3", - "result": "valid" + "msg" : "4d657373616765", + "sig" : "304502210087cf8c0eb82d44f69c60a2ff5457d3aaa322e7ec61ae5aecfd678ae1c1932b0e02203add3b115815047d6eb340a3e008989eaa0f8708d1794814729094d08d2460d3", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "0425afd689acabaed67c1f296de59406f8c550f57146a0b4ec2c97876dfffffffffa46a76e520322dfbc491ec4f0cc197420fc4ea5883d8f6dd53c354bc4f67c35", - "wx": "25afd689acabaed67c1f296de59406f8c550f57146a0b4ec2c97876dffffffff", - "wy": "00fa46a76e520322dfbc491ec4f0cc197420fc4ea5883d8f6dd53c354bc4f67c35" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "0425afd689acabaed67c1f296de59406f8c550f57146a0b4ec2c97876dfffffffffa46a76e520322dfbc491ec4f0cc197420fc4ea5883d8f6dd53c354bc4f67c35", + "wx" : "25afd689acabaed67c1f296de59406f8c550f57146a0b4ec2c97876dffffffff", + "wy" : "00fa46a76e520322dfbc491ec4f0cc197420fc4ea5883d8f6dd53c354bc4f67c35" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a0342000425afd689acabaed67c1f296de59406f8c550f57146a0b4ec2c97876dfffffffffa46a76e520322dfbc491ec4f0cc197420fc4ea5883d8f6dd53c354bc4f67c35", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEJa/WiayrrtZ8Hylt5ZQG+MVQ9XFGoLTs\nLJeHbf/////6RqduUgMi37xJHsTwzBl0IPxOpYg9j23VPDVLxPZ8NQ==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a0342000425afd689acabaed67c1f296de59406f8c550f57146a0b4ec2c97876dfffffffffa46a76e520322dfbc491ec4f0cc197420fc4ea5883d8f6dd53c354bc4f67c35", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEJa/WiayrrtZ8Hylt5ZQG+MVQ9XFGoLTs\nLJeHbf/////6RqduUgMi37xJHsTwzBl0IPxOpYg9j23VPDVLxPZ8NQ==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 455, - "comment": "x-coordinate of the public key has many trailing 1's", - "flags": [ + "tcId" : 455, + "comment" : "x-coordinate of the public key has many trailing 1's", + "flags" : [ "EdgeCasePublicKey" ], - "msg": "4d657373616765", - "sig": "3044022062f48ef71ace27bf5a01834de1f7e3f948b9dce1ca1e911d5e13d3b104471d8202205ea8f33f0c778972c4582080deda9b341857dd64514f0849a05f6964c2e34022", - "result": "valid" + "msg" : "4d657373616765", + "sig" : "3044022062f48ef71ace27bf5a01834de1f7e3f948b9dce1ca1e911d5e13d3b104471d8202205ea8f33f0c778972c4582080deda9b341857dd64514f0849a05f6964c2e34022", + "result" : "valid" }, { - "tcId": 456, - "comment": "x-coordinate of the public key has many trailing 1's", - "flags": [ + "tcId" : 456, + "comment" : "x-coordinate of the public key has many trailing 1's", + "flags" : [ "EdgeCasePublicKey" ], - "msg": "4d657373616765", - "sig": "3045022100f6b0e2f6fe020cf7c0c20137434344ed7add6c4be51861e2d14cbda472a6ffb402206416c8dd3e5c5282b306e8dc8ff34ab64cc99549232d678d714402eb6ca7aa0f", - "result": "valid" + "msg" : "4d657373616765", + "sig" : "3045022100f6b0e2f6fe020cf7c0c20137434344ed7add6c4be51861e2d14cbda472a6ffb402206416c8dd3e5c5282b306e8dc8ff34ab64cc99549232d678d714402eb6ca7aa0f", + "result" : "valid" }, { - "tcId": 457, - "comment": "x-coordinate of the public key has many trailing 1's", - "flags": [ + "tcId" : 457, + "comment" : "x-coordinate of the public key has many trailing 1's", + "flags" : [ "EdgeCasePublicKey" ], - "msg": "4d657373616765", - "sig": "3045022100db09d8460f05eff23bc7e436b67da563fa4b4edb58ac24ce201fa8a358125057022046da116754602940c8999c8d665f786c50f5772c0a3cdbda075e77eabc64df16", - "result": "valid" + "msg" : "4d657373616765", + "sig" : "3045022100db09d8460f05eff23bc7e436b67da563fa4b4edb58ac24ce201fa8a358125057022046da116754602940c8999c8d665f786c50f5772c0a3cdbda075e77eabc64df16", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "04d12e6c66b67734c3c84d2601cf5d35dc097e27637f0aca4a4fdb74b6aadd3bb93f5bdff88bd5736df898e699006ed750f11cf07c5866cd7ad70c7121ffffffff", - "wx": "00d12e6c66b67734c3c84d2601cf5d35dc097e27637f0aca4a4fdb74b6aadd3bb9", - "wy": "3f5bdff88bd5736df898e699006ed750f11cf07c5866cd7ad70c7121ffffffff" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "04d12e6c66b67734c3c84d2601cf5d35dc097e27637f0aca4a4fdb74b6aadd3bb93f5bdff88bd5736df898e699006ed750f11cf07c5866cd7ad70c7121ffffffff", + "wx" : "00d12e6c66b67734c3c84d2601cf5d35dc097e27637f0aca4a4fdb74b6aadd3bb9", + "wy" : "3f5bdff88bd5736df898e699006ed750f11cf07c5866cd7ad70c7121ffffffff" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a03420004d12e6c66b67734c3c84d2601cf5d35dc097e27637f0aca4a4fdb74b6aadd3bb93f5bdff88bd5736df898e699006ed750f11cf07c5866cd7ad70c7121ffffffff", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE0S5sZrZ3NMPITSYBz1013Al+J2N/CspK\nT9t0tqrdO7k/W9/4i9VzbfiY5pkAbtdQ8RzwfFhmzXrXDHEh/////w==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a03420004d12e6c66b67734c3c84d2601cf5d35dc097e27637f0aca4a4fdb74b6aadd3bb93f5bdff88bd5736df898e699006ed750f11cf07c5866cd7ad70c7121ffffffff", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAE0S5sZrZ3NMPITSYBz1013Al+J2N/CspK\nT9t0tqrdO7k/W9/4i9VzbfiY5pkAbtdQ8RzwfFhmzXrXDHEh/////w==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 458, - "comment": "y-coordinate of the public key has many trailing 1's", - "flags": [ + "tcId" : 458, + "comment" : "y-coordinate of the public key has many trailing 1's", + "flags" : [ "EdgeCasePublicKey" ], - "msg": "4d657373616765", - "sig": "30440220592c41e16517f12fcabd98267674f974b588e9f35d35406c1a7bb2ed1d19b7b802203e65a06bd9f83caaeb7b00f2368d7e0dece6b12221269a9b5b765198f840a3a1", - "result": "valid" + "msg" : "4d657373616765", + "sig" : "30440220592c41e16517f12fcabd98267674f974b588e9f35d35406c1a7bb2ed1d19b7b802203e65a06bd9f83caaeb7b00f2368d7e0dece6b12221269a9b5b765198f840a3a1", + "result" : "valid" }, { - "tcId": 459, - "comment": "y-coordinate of the public key has many trailing 1's", - "flags": [ + "tcId" : 459, + "comment" : "y-coordinate of the public key has many trailing 1's", + "flags" : [ "EdgeCasePublicKey" ], - "msg": "4d657373616765", - "sig": "3045022100be0d70887d5e40821a61b68047de4ea03debfdf51cdf4d4b195558b959a032b202207d994b2d8f1dbbeb13534eb3f6e5dccd85f5c4133c27d9e64271b1826ce1f67d", - "result": "valid" + "msg" : "4d657373616765", + "sig" : "3045022100be0d70887d5e40821a61b68047de4ea03debfdf51cdf4d4b195558b959a032b202207d994b2d8f1dbbeb13534eb3f6e5dccd85f5c4133c27d9e64271b1826ce1f67d", + "result" : "valid" }, { - "tcId": 460, - "comment": "y-coordinate of the public key has many trailing 1's", - "flags": [ + "tcId" : 460, + "comment" : "y-coordinate of the public key has many trailing 1's", + "flags" : [ "EdgeCasePublicKey" ], - "msg": "4d657373616765", - "sig": "3045022100fae92dfcb2ee392d270af3a5739faa26d4f97bfd39ed3cbee4d29e26af3b206a02206c9ba37f9faa6a1fd3f65f23b4e853d4692a7274240a12db7ba3884830630d16", - "result": "valid" + "msg" : "4d657373616765", + "sig" : "3045022100fae92dfcb2ee392d270af3a5739faa26d4f97bfd39ed3cbee4d29e26af3b206a02206c9ba37f9faa6a1fd3f65f23b4e853d4692a7274240a12db7ba3884830630d16", + "result" : "valid" } ] }, { - "type": "EcdsaBitcoinVerify", - "publicKey": { - "type": "EcPublicKey", - "curve": "secp256k1", - "keySize": 256, - "uncompressed": "046d4a7f60d4774a4f0aa8bbdedb953c7eea7909407e3164755664bc2800000000e659d34e4df38d9e8c9eaadfba36612c769195be86c77aac3f36e78b538680fb", - "wx": "6d4a7f60d4774a4f0aa8bbdedb953c7eea7909407e3164755664bc2800000000", - "wy": "00e659d34e4df38d9e8c9eaadfba36612c769195be86c77aac3f36e78b538680fb" + "type" : "EcdsaBitcoinVerify", + "publicKey" : { + "type" : "EcPublicKey", + "curve" : "secp256k1", + "keySize" : 256, + "uncompressed" : "046d4a7f60d4774a4f0aa8bbdedb953c7eea7909407e3164755664bc2800000000e659d34e4df38d9e8c9eaadfba36612c769195be86c77aac3f36e78b538680fb", + "wx" : "6d4a7f60d4774a4f0aa8bbdedb953c7eea7909407e3164755664bc2800000000", + "wy" : "00e659d34e4df38d9e8c9eaadfba36612c769195be86c77aac3f36e78b538680fb" }, - "publicKeyDer": "3056301006072a8648ce3d020106052b8104000a034200046d4a7f60d4774a4f0aa8bbdedb953c7eea7909407e3164755664bc2800000000e659d34e4df38d9e8c9eaadfba36612c769195be86c77aac3f36e78b538680fb", - "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEbUp/YNR3Sk8KqLve25U8fup5CUB+MWR1\nVmS8KAAAAADmWdNOTfONnoyeqt+6NmEsdpGVvobHeqw/NueLU4aA+w==\n-----END PUBLIC KEY-----\n", - "sha": "SHA-256", - "tests": [ + "publicKeyDer" : "3056301006072a8648ce3d020106052b8104000a034200046d4a7f60d4774a4f0aa8bbdedb953c7eea7909407e3164755664bc2800000000e659d34e4df38d9e8c9eaadfba36612c769195be86c77aac3f36e78b538680fb", + "publicKeyPem" : "-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEbUp/YNR3Sk8KqLve25U8fup5CUB+MWR1\nVmS8KAAAAADmWdNOTfONnoyeqt+6NmEsdpGVvobHeqw/NueLU4aA+w==\n-----END PUBLIC KEY-----\n", + "sha" : "SHA-256", + "tests" : [ { - "tcId": 461, - "comment": "x-coordinate of the public key has many trailing 0's", - "flags": [ + "tcId" : 461, + "comment" : "x-coordinate of the public key has many trailing 0's", + "flags" : [ "EdgeCasePublicKey" ], - "msg": "4d657373616765", - "sig": "30440220176a2557566ffa518b11226694eb9802ed2098bfe278e5570fe1d5d7af18a94302201291df6a0ed5fc0d15098e70bcf13a009284dfd0689d3bb4be6ceeb9be1487c4", - "result": "valid" + "msg" : "4d657373616765", + "sig" : "30440220176a2557566ffa518b11226694eb9802ed2098bfe278e5570fe1d5d7af18a94302201291df6a0ed5fc0d15098e70bcf13a009284dfd0689d3bb4be6ceeb9be1487c4", + "result" : "valid" }, { - "tcId": 462, - "comment": "x-coordinate of the public key has many trailing 0's", - "flags": [ + "tcId" : 462, + "comment" : "x-coordinate of the public key has many trailing 0's", + "flags" : [ "EdgeCasePublicKey" ], - "msg": "4d657373616765", - "sig": "3044022060be20c3dbc162dd34d26780621c104bbe5dace630171b2daef0d826409ee5c20220427f7e4d889d549170bda6a9409fb1cb8b0e763d13eea7bd97f64cf41dc6e497", - "result": "valid" + "msg" : "4d657373616765", + "sig" : "3044022060be20c3dbc162dd34d26780621c104bbe5dace630171b2daef0d826409ee5c20220427f7e4d889d549170bda6a9409fb1cb8b0e763d13eea7bd97f64cf41dc6e497", + "result" : "valid" }, { - "tcId": 463, - "comment": "x-coordinate of the public key has many trailing 0's", - "flags": [ + "tcId" : 463, + "comment" : "x-coordinate of the public key has many trailing 0's", + "flags" : [ "EdgeCasePublicKey" ], - "msg": "4d657373616765", - "sig": "3045022100edf03cf63f658883289a1a593d1007895b9f236d27c9c1f1313089aaed6b16ae02201a4dd6fc0814dc523d1fefa81c64fbf5e618e651e7096fccadbb94cd48e5e0cd", - "result": "valid" + "msg" : "4d657373616765", + "sig" : "3045022100edf03cf63f658883289a1a593d1007895b9f236d27c9c1f1313089aaed6b16ae02201a4dd6fc0814dc523d1fefa81c64fbf5e618e651e7096fccadbb94cd48e5e0cd", + "result" : "valid" } ] } diff --git a/include/xrpl/proto/org/xrpl/rpc/v1/get_ledger.proto b/include/xrpl/proto/org/xrpl/rpc/v1/get_ledger.proto index 0df9ca5ceb..59c9f51609 100644 --- a/include/xrpl/proto/org/xrpl/rpc/v1/get_ledger.proto +++ b/include/xrpl/proto/org/xrpl/rpc/v1/get_ledger.proto @@ -6,89 +6,81 @@ option java_multiple_files = true; import "org/xrpl/rpc/v1/ledger.proto"; -message GetLedgerRequest -{ +message GetLedgerRequest { + LedgerSpecifier ledger = 1; - LedgerSpecifier ledger = 1; + // If true, include transactions contained in this ledger + bool transactions = 2; - // If true, include transactions contained in this ledger - bool transactions = 2; + // If true and transactions, include full transactions and metadata + // If false and transactions, include only transaction hashes + bool expand = 3; - // If true and transactions, include full transactions and metadata - // If false and transactions, include only transaction hashes - bool expand = 3; + // If true, include state map difference between this ledger and the + // previous ledger. This includes all added, modified or deleted ledger + // objects + bool get_objects = 4; - // If true, include state map difference between this ledger and the - // previous ledger. This includes all added, modified or deleted ledger - // objects - bool get_objects = 4; - - // If the request needs to be forwarded from a reporting node to a p2p node, - // the reporting node will set this field. Clients should not set this - // field. - string client_ip = 5; + // If the request needs to be forwarded from a reporting node to a p2p node, + // the reporting node will set this field. Clients should not set this + // field. + string client_ip = 5; - // Identifying string. If user is set, client_ip is not set, and request is - // coming from a secure_gateway host, then the client is not subject to - // resource controls - string user = 6; + // Identifying string. If user is set, client_ip is not set, and request is + // coming from a secure_gateway host, then the client is not subject to + // resource controls + string user = 6; - // For every object in the diff, get the object's predecessor and successor - // in the state map. Only used if get_objects is also true. - bool get_object_neighbors = 7; + // For every object in the diff, get the object's predecessor and successor + // in the state map. Only used if get_objects is also true. + bool get_object_neighbors = 7; } -message GetLedgerResponse -{ - bytes ledger_header = 1; +message GetLedgerResponse { + bytes ledger_header = 1; - oneof transactions - { - // Just the hashes - TransactionHashList hashes_list = 2; - - // Full transactions and metadata - TransactionAndMetadataList transactions_list = 3; - } + oneof transactions { + // Just the hashes + TransactionHashList hashes_list = 2; - // True if the ledger has been validated - bool validated = 4; + // Full transactions and metadata + TransactionAndMetadataList transactions_list = 3; + } - // State map difference between this ledger and the previous ledger - RawLedgerObjects ledger_objects = 5; + // True if the ledger has been validated + bool validated = 4; - // True if the skiplist object is included in ledger_objects - bool skiplist_included = 6; + // State map difference between this ledger and the previous ledger + RawLedgerObjects ledger_objects = 5; - // True if request was exempt from resource controls - bool is_unlimited = 7; + // True if the skiplist object is included in ledger_objects + bool skiplist_included = 6; - // True if the response contains the state map diff - bool objects_included = 8; + // True if request was exempt from resource controls + bool is_unlimited = 7; - // True if the response contains key of objects adjacent to objects in state - // map diff - bool object_neighbors_included = 9; + // True if the response contains the state map diff + bool objects_included = 8; + // True if the response contains key of objects adjacent to objects in state + // map diff + bool object_neighbors_included = 9; - // Successor information for book directories modified as part of this - // ledger - repeated BookSuccessor book_successors = 10; + // Successor information for book directories modified as part of this + // ledger + repeated BookSuccessor book_successors = 10; } -message TransactionHashList -{ - repeated bytes hashes = 1; +message TransactionHashList { + repeated bytes hashes = 1; } -message TransactionAndMetadata -{ - bytes transaction_blob = 1; +message TransactionAndMetadata { + bytes transaction_blob = 1; - bytes metadata_blob = 2; + bytes metadata_blob = 2; } -message TransactionAndMetadataList -{ - repeated TransactionAndMetadata transactions = 1; +message TransactionAndMetadataList { + repeated TransactionAndMetadata transactions = 1; } diff --git a/include/xrpl/proto/org/xrpl/rpc/v1/get_ledger_data.proto b/include/xrpl/proto/org/xrpl/rpc/v1/get_ledger_data.proto index c311994ac2..a9e93c743c 100644 --- a/include/xrpl/proto/org/xrpl/rpc/v1/get_ledger_data.proto +++ b/include/xrpl/proto/org/xrpl/rpc/v1/get_ledger_data.proto @@ -8,46 +8,43 @@ import "org/xrpl/rpc/v1/ledger.proto"; // Get ledger objects for a specific ledger. You can iterate through several // calls to retrieve the entire contents of a single ledger version. -message GetLedgerDataRequest -{ - // If set, only objects with a key greater than marker are returned. - // This can be used to pick up where a previous call left off. - // Set marker to the value of marker in the previous response. - bytes marker = 1; +message GetLedgerDataRequest { + // If set, only objects with a key greater than marker are returned. + // This can be used to pick up where a previous call left off. + // Set marker to the value of marker in the previous response. + bytes marker = 1; - LedgerSpecifier ledger = 2; + LedgerSpecifier ledger = 2; - // If set, only objects with a key less than end_marker are returned - bytes end_marker = 3; + // If set, only objects with a key less than end_marker are returned + bytes end_marker = 3; - // If the request needs to be forwarded from a reporting node to a p2p node, - // the reporting node will set this field. Clients should not set this - // field. - string client_ip = 4; + // If the request needs to be forwarded from a reporting node to a p2p node, + // the reporting node will set this field. Clients should not set this + // field. + string client_ip = 4; - // Identifying string. If user is set, client_ip is not set, and request is - // coming from a secure_gateway host, then the client is not subject to - // resource controls - string user = 6; + // Identifying string. If user is set, client_ip is not set, and request is + // coming from a secure_gateway host, then the client is not subject to + // resource controls + string user = 6; } -message GetLedgerDataResponse -{ - // Sequence of the ledger containing the returned ledger objects - uint32 ledger_index = 1; +message GetLedgerDataResponse { + // Sequence of the ledger containing the returned ledger objects + uint32 ledger_index = 1; - // Hash of the ledger containing the returned ledger objects - bytes ledger_hash = 2; - - // Ledger objects - RawLedgerObjects ledger_objects = 3; + // Hash of the ledger containing the returned ledger objects + bytes ledger_hash = 2; - // Key to be passed into a subsequent call to continue iteration. If not - // set, there are no more objects left in the ledger, or no more objects - // with key less than end_marker (if end_marker was set in the request) - bytes marker = 4; + // Ledger objects + RawLedgerObjects ledger_objects = 3; - // True if request was exempt from resource controls - bool is_unlimited = 7; + // Key to be passed into a subsequent call to continue iteration. If not + // set, there are no more objects left in the ledger, or no more objects + // with key less than end_marker (if end_marker was set in the request) + bytes marker = 4; + + // True if request was exempt from resource controls + bool is_unlimited = 7; } - diff --git a/include/xrpl/proto/org/xrpl/rpc/v1/get_ledger_diff.proto b/include/xrpl/proto/org/xrpl/rpc/v1/get_ledger_diff.proto index 218cbeb61f..ab6d5551fa 100644 --- a/include/xrpl/proto/org/xrpl/rpc/v1/get_ledger_diff.proto +++ b/include/xrpl/proto/org/xrpl/rpc/v1/get_ledger_diff.proto @@ -6,27 +6,23 @@ option java_multiple_files = true; import "org/xrpl/rpc/v1/ledger.proto"; - // Get the state map difference between the two specified ledgers -message GetLedgerDiffRequest -{ - LedgerSpecifier base_ledger = 1; +message GetLedgerDiffRequest { + LedgerSpecifier base_ledger = 1; - LedgerSpecifier desired_ledger = 2; + LedgerSpecifier desired_ledger = 2; - // If true, include the full ledger object. If false, only keys are included. - bool include_blobs = 3; + // If true, include the full ledger object. If false, only keys are included. + bool include_blobs = 3; - // If the request needs to be forwarded from a reporting node to a p2p node, - // the reporting node will set this field. Clients should not set this - // field. - string client_ip = 4; + // If the request needs to be forwarded from a reporting node to a p2p node, + // the reporting node will set this field. Clients should not set this + // field. + string client_ip = 4; } -message GetLedgerDiffResponse -{ - // All ledger objects that were added, modified or deleted between - // base_ledger and desired_ledger - RawLedgerObjects ledger_objects = 1; +message GetLedgerDiffResponse { + // All ledger objects that were added, modified or deleted between + // base_ledger and desired_ledger + RawLedgerObjects ledger_objects = 1; } - diff --git a/include/xrpl/proto/org/xrpl/rpc/v1/get_ledger_entry.proto b/include/xrpl/proto/org/xrpl/rpc/v1/get_ledger_entry.proto index a894c7729f..4da6420e35 100644 --- a/include/xrpl/proto/org/xrpl/rpc/v1/get_ledger_entry.proto +++ b/include/xrpl/proto/org/xrpl/rpc/v1/get_ledger_entry.proto @@ -7,25 +7,23 @@ option java_multiple_files = true; import "org/xrpl/rpc/v1/ledger.proto"; // Get a single ledger object -message GetLedgerEntryRequest -{ - // Key of the desired object - bytes key = 1; +message GetLedgerEntryRequest { + // Key of the desired object + bytes key = 1; - // Ledger containing the object - LedgerSpecifier ledger = 2; - - // If the request needs to be forwarded from a reporting node to a p2p node, - // the reporting node will set this field. Clients should not set this - // field. - string client_ip = 3; + // Ledger containing the object + LedgerSpecifier ledger = 2; + + // If the request needs to be forwarded from a reporting node to a p2p node, + // the reporting node will set this field. Clients should not set this + // field. + string client_ip = 3; } -message GetLedgerEntryResponse -{ - RawLedgerObject ledger_object = 1; +message GetLedgerEntryResponse { + RawLedgerObject ledger_object = 1; - // Ledger containing the object. Will match the value specified in the - // request. - LedgerSpecifier ledger = 2; + // Ledger containing the object. Will match the value specified in the + // request. + LedgerSpecifier ledger = 2; } diff --git a/include/xrpl/proto/org/xrpl/rpc/v1/ledger.proto b/include/xrpl/proto/org/xrpl/rpc/v1/ledger.proto index 3bb199de22..63ce86b51c 100644 --- a/include/xrpl/proto/org/xrpl/rpc/v1/ledger.proto +++ b/include/xrpl/proto/org/xrpl/rpc/v1/ledger.proto @@ -5,71 +5,61 @@ option java_package = "org.xrpl.rpc.v1"; option java_multiple_files = true; // Next field: 4 -message LedgerSpecifier -{ - // Next field: 4 - enum Shortcut - { - SHORTCUT_UNSPECIFIED = 0; - SHORTCUT_VALIDATED = 1; - SHORTCUT_CLOSED = 2; - SHORTCUT_CURRENT = 3; - } +message LedgerSpecifier { + // Next field: 4 + enum Shortcut { + SHORTCUT_UNSPECIFIED = 0; + SHORTCUT_VALIDATED = 1; + SHORTCUT_CLOSED = 2; + SHORTCUT_CURRENT = 3; + } - oneof ledger - { - Shortcut shortcut = 1; - uint32 sequence = 2; - // 32 bytes - bytes hash = 3; - } + oneof ledger { + Shortcut shortcut = 1; + uint32 sequence = 2; + // 32 bytes + bytes hash = 3; + } } - // Next field: 3 -message RawLedgerObject -{ - // Raw data of the ledger object. In GetLedgerResponse and - // GetLedgerDiffResponse, data will be empty if the object was deleted. - bytes data = 1; +message RawLedgerObject { + // Raw data of the ledger object. In GetLedgerResponse and + // GetLedgerDiffResponse, data will be empty if the object was deleted. + bytes data = 1; - // Key of the ledger object - bytes key = 2; + // Key of the ledger object + bytes key = 2; - enum ModificationType - { - UNSPECIFIED = 0; - CREATED = 1; - MODIFIED = 2; - DELETED = 3; - } + enum ModificationType { + UNSPECIFIED = 0; + CREATED = 1; + MODIFIED = 2; + DELETED = 3; + } - // Whether the object was created, modified or deleted - ModificationType mod_type = 3; + // Whether the object was created, modified or deleted + ModificationType mod_type = 3; - // Key of the object preceding this object in the desired ledger - bytes predecessor = 4; + // Key of the object preceding this object in the desired ledger + bytes predecessor = 4; - // Key of the object succeeding this object in the desired ledger - bytes successor = 5; + // Key of the object succeeding this object in the desired ledger + bytes successor = 5; } -message RawLedgerObjects -{ - repeated RawLedgerObject objects = 1; +message RawLedgerObjects { + repeated RawLedgerObject objects = 1; } // Successor information for book directories. The book base is (usually) not // an actual object, yet we need to be able to ask for the successor to the // book base. message BookSuccessor { + // Base of the book in question + bytes book_base = 1; - // Base of the book in question - bytes book_base = 1; - - // First book directory in the book. An empty value here means the entire - // book is deleted - bytes first_book = 2; - + // First book directory in the book. An empty value here means the entire + // book is deleted + bytes first_book = 2; }; - diff --git a/include/xrpl/proto/org/xrpl/rpc/v1/xrp_ledger.proto b/include/xrpl/proto/org/xrpl/rpc/v1/xrp_ledger.proto index 01a23fbe37..2b8dc471ce 100644 --- a/include/xrpl/proto/org/xrpl/rpc/v1/xrp_ledger.proto +++ b/include/xrpl/proto/org/xrpl/rpc/v1/xrp_ledger.proto @@ -9,13 +9,11 @@ import "org/xrpl/rpc/v1/get_ledger_entry.proto"; import "org/xrpl/rpc/v1/get_ledger_data.proto"; import "org/xrpl/rpc/v1/get_ledger_diff.proto"; - // These methods are binary only methods for retrieiving arbitrary ledger state // via gRPC. These methods are used by clio, but can also be // used by any client that wants to extract ledger state in an efficient manner. // They do not directly mimic the JSON equivalent methods. service XRPLedgerAPIService { - // Get a specific ledger, optionally including transactions and any modified, // added or deleted ledger objects rpc GetLedger(GetLedgerRequest) returns (GetLedgerResponse); @@ -29,5 +27,4 @@ service XRPLedgerAPIService { // Get all ledger objects that are different between the two specified // ledgers. Note, this method has no JSON equivalent. rpc GetLedgerDiff(GetLedgerDiffRequest) returns (GetLedgerDiffResponse); - } diff --git a/include/xrpl/proto/ripple.proto b/include/xrpl/proto/ripple.proto index a06bbd9a31..f93ebbc72c 100644 --- a/include/xrpl/proto/ripple.proto +++ b/include/xrpl/proto/ripple.proto @@ -4,29 +4,28 @@ package protocol; // Unused numbers in the list below may have been used previously. Please don't // reassign them for reuse unless you are 100% certain that there won't be a // conflict. Even if you're sure, it's probably best to assign a new type. -enum MessageType -{ - mtMANIFESTS = 2; - mtPING = 3; - mtCLUSTER = 5; - mtENDPOINTS = 15; - mtTRANSACTION = 30; - mtGET_LEDGER = 31; - mtLEDGER_DATA = 32; - mtPROPOSE_LEDGER = 33; - mtSTATUS_CHANGE = 34; - mtHAVE_SET = 35; - mtVALIDATION = 41; - mtGET_OBJECTS = 42; - mtVALIDATORLIST = 54; - mtSQUELCH = 55; - mtVALIDATORLISTCOLLECTION = 56; - mtPROOF_PATH_REQ = 57; - mtPROOF_PATH_RESPONSE = 58; - mtREPLAY_DELTA_REQ = 59; - mtREPLAY_DELTA_RESPONSE = 60; - mtHAVE_TRANSACTIONS = 63; - mtTRANSACTIONS = 64; +enum MessageType { + mtMANIFESTS = 2; + mtPING = 3; + mtCLUSTER = 5; + mtENDPOINTS = 15; + mtTRANSACTION = 30; + mtGET_LEDGER = 31; + mtLEDGER_DATA = 32; + mtPROPOSE_LEDGER = 33; + mtSTATUS_CHANGE = 34; + mtHAVE_SET = 35; + mtVALIDATION = 41; + mtGET_OBJECTS = 42; + mtVALIDATORLIST = 54; + mtSQUELCH = 55; + mtVALIDATORLISTCOLLECTION = 56; + mtPROOF_PATH_REQ = 57; + mtPROOF_PATH_RESPONSE = 58; + mtREPLAY_DELTA_REQ = 59; + mtREPLAY_DELTA_RESPONSE = 60; + mtHAVE_TRANSACTIONS = 63; + mtTRANSACTIONS = 64; } // token, iterations, target, challenge = issue demand for proof of work @@ -36,352 +35,309 @@ enum MessageType //------------------------------------------------------------------------------ /* Provides the current ephemeral key for a validator. */ -message TMManifest -{ - // A Manifest object in the Ripple serialization format. - required bytes stobject = 1; +message TMManifest { + // A Manifest object in the Ripple serialization format. + required bytes stobject = 1; } -message TMManifests -{ - repeated TMManifest list = 1; +message TMManifests { + repeated TMManifest list = 1; - // The manifests sent when a peer first connects to another peer are `history`. - optional bool history = 2 [deprecated=true]; + // The manifests sent when a peer first connects to another peer are `history`. + optional bool history = 2 [deprecated = true]; } //------------------------------------------------------------------------------ // The status of a node in our cluster -message TMClusterNode -{ - required string publicKey = 1; - required uint32 reportTime = 2; - required uint32 nodeLoad = 3; - optional string nodeName = 4; - optional string address = 5; +message TMClusterNode { + required string publicKey = 1; + required uint32 reportTime = 2; + required uint32 nodeLoad = 3; + optional string nodeName = 4; + optional string address = 5; } // Sources that are placing load on the server -message TMLoadSource -{ - required string name = 1; - required uint32 cost = 2; - optional uint32 count = 3; // number of connections +message TMLoadSource { + required string name = 1; + required uint32 cost = 2; + optional uint32 count = 3; // number of connections } // The status of all nodes in the cluster -message TMCluster -{ - repeated TMClusterNode clusterNodes = 1; - repeated TMLoadSource loadSources = 2; +message TMCluster { + repeated TMClusterNode clusterNodes = 1; + repeated TMLoadSource loadSources = 2; } // Node public key -message TMLink -{ - required bytes nodePubKey = 1 [deprecated=true]; // node public key +message TMLink { + required bytes nodePubKey = 1 [deprecated = true]; // node public key } // Peer public key -message TMPublicKey -{ - required bytes publicKey = 1; +message TMPublicKey { + required bytes publicKey = 1; } // A transaction can have only one input and one output. // If you want to send an amount that is greater than any single address of yours // you must first combine coins from one address to another. -enum TransactionStatus -{ - tsNEW = 1; // origin node did/could not validate - tsCURRENT = 2; // scheduled to go in this ledger - tsCOMMITED = 3; // in a closed ledger - tsREJECT_CONFLICT = 4; - tsREJECT_INVALID = 5; - tsREJECT_FUNDS = 6; - tsHELD_SEQ = 7; - tsHELD_LEDGER = 8; // held for future ledger +enum TransactionStatus { + tsNEW = 1; // origin node did/could not validate + tsCURRENT = 2; // scheduled to go in this ledger + tsCOMMITED = 3; // in a closed ledger + tsREJECT_CONFLICT = 4; + tsREJECT_INVALID = 5; + tsREJECT_FUNDS = 6; + tsHELD_SEQ = 7; + tsHELD_LEDGER = 8; // held for future ledger } -message TMTransaction -{ - required bytes rawTransaction = 1; - required TransactionStatus status = 2; - optional uint64 receiveTimestamp = 3; - optional bool deferred = 4; // not applied to open ledger +message TMTransaction { + required bytes rawTransaction = 1; + required TransactionStatus status = 2; + optional uint64 receiveTimestamp = 3; + optional bool deferred = 4; // not applied to open ledger } -message TMTransactions -{ - repeated TMTransaction transactions = 1; +message TMTransactions { + repeated TMTransaction transactions = 1; } - -enum NodeStatus -{ - nsCONNECTING = 1; // acquiring connections - nsCONNECTED = 2; // convinced we are connected to the real network - nsMONITORING = 3; // we know what the previous ledger is - nsVALIDATING = 4; // we have the full ledger contents - nsSHUTTING = 5; // node is shutting down +enum NodeStatus { + nsCONNECTING = 1; // acquiring connections + nsCONNECTED = 2; // convinced we are connected to the real network + nsMONITORING = 3; // we know what the previous ledger is + nsVALIDATING = 4; // we have the full ledger contents + nsSHUTTING = 5; // node is shutting down } -enum NodeEvent -{ - neCLOSING_LEDGER = 1; // closing a ledger because its close time has come - neACCEPTED_LEDGER = 2; // accepting a closed ledger, we have finished computing it - neSWITCHED_LEDGER = 3; // changing due to network consensus - neLOST_SYNC = 4; +enum NodeEvent { + neCLOSING_LEDGER = 1; // closing a ledger because its close time has come + neACCEPTED_LEDGER = 2; // accepting a closed ledger, we have finished computing it + neSWITCHED_LEDGER = 3; // changing due to network consensus + neLOST_SYNC = 4; } -message TMStatusChange -{ - optional NodeStatus newStatus = 1; - optional NodeEvent newEvent = 2; - optional uint32 ledgerSeq = 3; - optional bytes ledgerHash = 4; - optional bytes ledgerHashPrevious = 5; - optional uint64 networkTime = 6; - optional uint32 firstSeq = 7; - optional uint32 lastSeq = 8; +message TMStatusChange { + optional NodeStatus newStatus = 1; + optional NodeEvent newEvent = 2; + optional uint32 ledgerSeq = 3; + optional bytes ledgerHash = 4; + optional bytes ledgerHashPrevious = 5; + optional uint64 networkTime = 6; + optional uint32 firstSeq = 7; + optional uint32 lastSeq = 8; } - // Announce to the network our position on a closing ledger -message TMProposeSet -{ - required uint32 proposeSeq = 1; - required bytes currentTxHash = 2; // the hash of the ledger we are proposing - required bytes nodePubKey = 3; - required uint32 closeTime = 4; - required bytes signature = 5; // signature of above fields - required bytes previousledger = 6; - repeated bytes addedTransactions = 10; // not required if number is large - repeated bytes removedTransactions = 11; // not required if number is large +message TMProposeSet { + required uint32 proposeSeq = 1; + required bytes currentTxHash = 2; // the hash of the ledger we are proposing + required bytes nodePubKey = 3; + required uint32 closeTime = 4; + required bytes signature = 5; // signature of above fields + required bytes previousledger = 6; + repeated bytes addedTransactions = 10; // not required if number is large + repeated bytes removedTransactions = 11; // not required if number is large - // node vouches signature is correct - optional bool checkedSignature = 7 [deprecated=true]; + // node vouches signature is correct + optional bool checkedSignature = 7 [deprecated = true]; - // Number of hops traveled - optional uint32 hops = 12 [deprecated=true]; + // Number of hops traveled + optional uint32 hops = 12 [deprecated = true]; } -enum TxSetStatus -{ - tsHAVE = 1; // We have this set locally - tsCAN_GET = 2; // We have a peer with this set - tsNEED = 3; // We need this set and can't get it +enum TxSetStatus { + tsHAVE = 1; // We have this set locally + tsCAN_GET = 2; // We have a peer with this set + tsNEED = 3; // We need this set and can't get it } -message TMHaveTransactionSet -{ - required TxSetStatus status = 1; - required bytes hash = 2; +message TMHaveTransactionSet { + required TxSetStatus status = 1; + required bytes hash = 2; } // Validator list (UNL) -message TMValidatorList -{ - required bytes manifest = 1; - required bytes blob = 2; - required bytes signature = 3; - required uint32 version = 4; +message TMValidatorList { + required bytes manifest = 1; + required bytes blob = 2; + required bytes signature = 3; + required uint32 version = 4; } // Validator List v2 -message ValidatorBlobInfo -{ - optional bytes manifest = 1; - required bytes blob = 2; - required bytes signature = 3; +message ValidatorBlobInfo { + optional bytes manifest = 1; + required bytes blob = 2; + required bytes signature = 3; } // Collection of Validator List v2 (UNL) -message TMValidatorListCollection -{ - required uint32 version = 1; - required bytes manifest = 2; - repeated ValidatorBlobInfo blobs = 3; +message TMValidatorListCollection { + required uint32 version = 1; + required bytes manifest = 2; + repeated ValidatorBlobInfo blobs = 3; } // Used to sign a final closed ledger after reprocessing -message TMValidation -{ - // The serialized validation - required bytes validation = 1; +message TMValidation { + // The serialized validation + required bytes validation = 1; - // node vouches signature is correct - optional bool checkedSignature = 2 [deprecated = true]; + // node vouches signature is correct + optional bool checkedSignature = 2 [deprecated = true]; - // Number of hops traveled - optional uint32 hops = 3 [deprecated = true]; + // Number of hops traveled + optional uint32 hops = 3 [deprecated = true]; } // An array of Endpoint messages -message TMEndpoints -{ - // Previously used - don't reuse. - reserved 2; +message TMEndpoints { + // Previously used - don't reuse. + reserved 2; - // This field is used to allow the TMEndpoints message format to be - // modified as necessary in the future. - required uint32 version = 1; + // This field is used to allow the TMEndpoints message format to be + // modified as necessary in the future. + required uint32 version = 1; - // An update to the Endpoint type that uses a string - // to represent endpoints, thus allowing ipv6 or ipv4 addresses - message TMEndpointv2 - { - required string endpoint = 1; - required uint32 hops = 2; - } - repeated TMEndpointv2 endpoints_v2 = 3; + // An update to the Endpoint type that uses a string + // to represent endpoints, thus allowing ipv6 or ipv4 addresses + message TMEndpointv2 { + required string endpoint = 1; + required uint32 hops = 2; + } + repeated TMEndpointv2 endpoints_v2 = 3; }; -message TMIndexedObject -{ - optional bytes hash = 1; - optional bytes nodeID = 2; - optional bytes index = 3; - optional bytes data = 4; - optional uint32 ledgerSeq = 5; +message TMIndexedObject { + optional bytes hash = 1; + optional bytes nodeID = 2; + optional bytes index = 3; + optional bytes data = 4; + optional uint32 ledgerSeq = 5; } -message TMGetObjectByHash -{ - enum ObjectType { - otUNKNOWN = 0; - otLEDGER = 1; - otTRANSACTION = 2; - otTRANSACTION_NODE = 3; - otSTATE_NODE = 4; - otCAS_OBJECT = 5; - otFETCH_PACK = 6; - otTRANSACTIONS = 7; - } +message TMGetObjectByHash { + enum ObjectType { + otUNKNOWN = 0; + otLEDGER = 1; + otTRANSACTION = 2; + otTRANSACTION_NODE = 3; + otSTATE_NODE = 4; + otCAS_OBJECT = 5; + otFETCH_PACK = 6; + otTRANSACTIONS = 7; + } - required ObjectType type = 1; - required bool query = 2; // is this a query or a reply? - optional uint32 seq = 3; // used to match replies to queries - optional bytes ledgerHash = 4; // the hash of the ledger these queries are for - optional bool fat = 5; // return related nodes - repeated TMIndexedObject objects = 6; // the specific objects requested + required ObjectType type = 1; + required bool query = 2; // is this a query or a reply? + optional uint32 seq = 3; // used to match replies to queries + optional bytes ledgerHash = 4; // the hash of the ledger these queries are for + optional bool fat = 5; // return related nodes + repeated TMIndexedObject objects = 6; // the specific objects requested } - -message TMLedgerNode -{ - required bytes nodedata = 1; - optional bytes nodeid = 2; // missing for ledger base data +message TMLedgerNode { + required bytes nodedata = 1; + optional bytes nodeid = 2; // missing for ledger base data } -enum TMLedgerInfoType -{ - liBASE = 0; // basic ledger info - liTX_NODE = 1; // transaction node - liAS_NODE = 2; // account state node - liTS_CANDIDATE = 3; // candidate transaction set +enum TMLedgerInfoType { + liBASE = 0; // basic ledger info + liTX_NODE = 1; // transaction node + liAS_NODE = 2; // account state node + liTS_CANDIDATE = 3; // candidate transaction set } -enum TMLedgerType -{ - ltACCEPTED = 0; - ltCURRENT = 1; // no longer supported - ltCLOSED = 2; +enum TMLedgerType { + ltACCEPTED = 0; + ltCURRENT = 1; // no longer supported + ltCLOSED = 2; } -enum TMQueryType -{ - qtINDIRECT = 0; +enum TMQueryType { + qtINDIRECT = 0; } -message TMGetLedger -{ - required TMLedgerInfoType itype = 1; - optional TMLedgerType ltype = 2; - optional bytes ledgerHash = 3; // Can also be the transaction set hash if liTS_CANDIDATE - optional uint32 ledgerSeq = 4; - repeated bytes nodeIDs = 5; - optional uint64 requestCookie = 6; - optional TMQueryType queryType = 7; - optional uint32 queryDepth = 8; // How deep to go, number of extra levels +message TMGetLedger { + required TMLedgerInfoType itype = 1; + optional TMLedgerType ltype = 2; + optional bytes ledgerHash = 3; // Can also be the transaction set hash if liTS_CANDIDATE + optional uint32 ledgerSeq = 4; + repeated bytes nodeIDs = 5; + optional uint64 requestCookie = 6; + optional TMQueryType queryType = 7; + optional uint32 queryDepth = 8; // How deep to go, number of extra levels } -enum TMReplyError -{ - reNO_LEDGER = 1; // We don't have the ledger you are asking about - reNO_NODE = 2; // We don't have any of the nodes you are asking for - reBAD_REQUEST = 3; // The request is wrong, e.g. wrong format +enum TMReplyError { + reNO_LEDGER = 1; // We don't have the ledger you are asking about + reNO_NODE = 2; // We don't have any of the nodes you are asking for + reBAD_REQUEST = 3; // The request is wrong, e.g. wrong format } -message TMLedgerData -{ - required bytes ledgerHash = 1; - required uint32 ledgerSeq = 2; - required TMLedgerInfoType type = 3; - repeated TMLedgerNode nodes = 4; - optional uint32 requestCookie = 5; - optional TMReplyError error = 6; +message TMLedgerData { + required bytes ledgerHash = 1; + required uint32 ledgerSeq = 2; + required TMLedgerInfoType type = 3; + repeated TMLedgerNode nodes = 4; + optional uint32 requestCookie = 5; + optional TMReplyError error = 6; } -message TMPing -{ - enum pingType { - ptPING = 0; // we want a reply - ptPONG = 1; // this is a reply - } - required pingType type = 1; - optional uint32 seq = 2; // detect stale replies, ensure other side is reading - optional uint64 pingTime = 3; // know when we think we sent the ping - optional uint64 netTime = 4; +message TMPing { + enum pingType { + ptPING = 0; // we want a reply + ptPONG = 1; // this is a reply + } + required pingType type = 1; + optional uint32 seq = 2; // detect stale replies, ensure other side is reading + optional uint64 pingTime = 3; // know when we think we sent the ping + optional uint64 netTime = 4; } -message TMSquelch -{ - required bool squelch = 1; // squelch if true, otherwise unsquelch - required bytes validatorPubKey = 2; // validator's public key - optional uint32 squelchDuration = 3; // squelch duration in seconds +message TMSquelch { + required bool squelch = 1; // squelch if true, otherwise unsquelch + required bytes validatorPubKey = 2; // validator's public key + optional uint32 squelchDuration = 3; // squelch duration in seconds } -enum TMLedgerMapType -{ - lmTRANASCTION = 1; // transaction map - lmACCOUNT_STATE = 2; // account state map +enum TMLedgerMapType { + lmTRANASCTION = 1; // transaction map + lmACCOUNT_STATE = 2; // account state map } -message TMProofPathRequest -{ - required bytes key = 1; - required bytes ledgerHash = 2; - required TMLedgerMapType type = 3; +message TMProofPathRequest { + required bytes key = 1; + required bytes ledgerHash = 2; + required TMLedgerMapType type = 3; } -message TMProofPathResponse -{ - required bytes key = 1; - required bytes ledgerHash = 2; - required TMLedgerMapType type = 3; - optional bytes ledgerHeader = 4; - repeated bytes path = 5; - optional TMReplyError error = 6; +message TMProofPathResponse { + required bytes key = 1; + required bytes ledgerHash = 2; + required TMLedgerMapType type = 3; + optional bytes ledgerHeader = 4; + repeated bytes path = 5; + optional TMReplyError error = 6; } -message TMReplayDeltaRequest -{ - required bytes ledgerHash = 1; +message TMReplayDeltaRequest { + required bytes ledgerHash = 1; } -message TMReplayDeltaResponse -{ - required bytes ledgerHash = 1; - optional bytes ledgerHeader = 2; - repeated bytes transaction = 3; - optional TMReplyError error = 4; +message TMReplayDeltaResponse { + required bytes ledgerHash = 1; + optional bytes ledgerHeader = 2; + repeated bytes transaction = 3; + optional TMReplyError error = 4; } -message TMHaveTransactions -{ - repeated bytes hashes = 1; +message TMHaveTransactions { + repeated bytes hashes = 1; } - From 58dd07bbdf04772a9f8cda1a006e941189627452 Mon Sep 17 00:00:00 2001 From: Bart Date: Thu, 21 Aug 2025 16:32:04 -0400 Subject: [PATCH 131/244] fix: Skip notify-clio when running in a fork, reorder config fields (#5712) This change will skip running the notify-clio job when a PR is created from a fork, and reorders the strategy matrix configuration fields so GitHub will more clearly show which configuration is running. --- .github/scripts/strategy-matrix/generate.py | 21 ++++++++++++--------- .github/workflows/build-test.yml | 2 +- .github/workflows/notify-clio.yml | 1 + 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/.github/scripts/strategy-matrix/generate.py b/.github/scripts/strategy-matrix/generate.py index 42927b5ccd..0acdca8d4f 100644 --- a/.github/scripts/strategy-matrix/generate.py +++ b/.github/scripts/strategy-matrix/generate.py @@ -48,18 +48,18 @@ def generate_strategy_matrix(all: bool, architecture: list[dict], os: list[dict] skip = True if os['distro_version'] == 'bookworm': if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-13' and build_type == 'Release' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/arm64': - cmake_args = f'{cmake_args} -DUNIT_TEST_REFERENCE_FEE=500' + cmake_args = f'-DUNIT_TEST_REFERENCE_FEE=500 {cmake_args}' skip = False if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-15' and build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/amd64': - cmake_args = f'{cmake_args} -Dcoverage=ON -Dcoverage_format=xml -DCODE_COVERAGE_VERBOSE=ON -DCMAKE_C_FLAGS=-O0 -DCMAKE_CXX_FLAGS=-O0' + cmake_args = f'-Dcoverage=ON -Dcoverage_format=xml -DCODE_COVERAGE_VERBOSE=ON -DCMAKE_C_FLAGS=-O0 -DCMAKE_CXX_FLAGS=-O0 {cmake_args}' cmake_target = 'coverage' build_only = True skip = False if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-16' and build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/arm64': - cmake_args = f'{cmake_args} -Dvoidstar=ON' + cmake_args = f'-Dvoidstar=ON {cmake_args}' skip = False if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-17' and build_type == 'Release' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/amd64': - cmake_args = f'{cmake_args} -DUNIT_TEST_REFERENCE_FEE=1000' + cmake_args = f'-DUNIT_TEST_REFERENCE_FEE=1000 {cmake_args}' skip = False if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-20' and build_type == 'Debug' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/amd64': skip = False @@ -137,14 +137,17 @@ def generate_strategy_matrix(all: bool, architecture: list[dict], os: list[dict] if '-Dunity=ON' in cmake_args: config_name += '-unity' + # Add the configuration to the list, with the most unique fields first, + # so that they are easier to identify in the GitHub Actions UI, as long + # names get truncated. configurations.append({ - 'architecture': architecture, - 'os': os, - 'build_type': build_type, - 'build_only': 'true' if build_only else 'false', + 'config_name': config_name, 'cmake_args': cmake_args, 'cmake_target': cmake_target, - 'config_name': config_name, + 'build_only': 'true' if build_only else 'false', + 'build_type': build_type, + 'os': os, + 'architecture': architecture, }) return {'include': configurations} diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml index 2fa557f671..e06ff9abab 100644 --- a/.github/workflows/build-test.yml +++ b/.github/workflows/build-test.yml @@ -138,7 +138,7 @@ jobs: if: ${{ inputs.os == 'macos' }} run: | echo 'Installing build tools.' - brew install cmake conan ninja coreutils + brew install --quiet cmake conan ninja coreutils - name: Check configuration (Linux and MacOS) if: ${{ inputs.os == 'linux' || inputs.os == 'macos' }} run: | diff --git a/.github/workflows/notify-clio.yml b/.github/workflows/notify-clio.yml index 6ccf527ea6..b92dea65e2 100644 --- a/.github/workflows/notify-clio.yml +++ b/.github/workflows/notify-clio.yml @@ -36,6 +36,7 @@ defaults: jobs: upload: + if: ${{ github.event.pull_request.head.repo.full_name == github.repository }} runs-on: ubuntu-latest container: ghcr.io/xrplf/ci/ubuntu-noble:gcc-13 steps: From 896b8c3b54a22b0497cb0d1ce95e1095f9a227ce Mon Sep 17 00:00:00 2001 From: Bart Date: Fri, 22 Aug 2025 10:02:56 -0400 Subject: [PATCH 132/244] chore: Fix file formatting (#5718) --- .github/actions/build-deps/action.yml | 18 ++--- .github/actions/build-test/action.yml | 14 ++-- .github/scripts/levelization/README.md | 2 +- .github/workflows/build-test.yml | 20 ++--- .github/workflows/check-missing-commits.yml | 78 +++++++++---------- .github/workflows/notify-clio.yml | 10 +-- .github/workflows/on-pr.yml | 40 +++++----- .github/workflows/on-trigger.yml | 44 +++++------ .github/workflows/publish-docs.yml | 16 ++-- bin/git/setup-upstreams.sh | 3 +- bin/git/squash-branches.sh | 3 +- cfg/rippled-example.cfg | 8 +- cmake/RippledCore.cmake | 2 +- .../negativeUNLSqDiagram.puml | 20 ++--- .../ledger_replay_classes.puml | 36 ++++----- .../ledger_replay_sequence.puml | 6 +- include/xrpl/protocol/Batch.h | 2 +- include/xrpl/protocol/TxFlags.h | 4 +- .../xrpl/protocol/detail/ledger_entries.macro | 1 - src/libxrpl/protocol/Permissions.cpp | 2 +- src/test/app/Delegate_test.cpp | 2 +- src/test/app/NFTokenAuth_test.cpp | 2 +- src/test/csf/collectors.h | 2 +- src/test/jtx/delegate.h | 2 +- src/test/jtx/impl/delegate.cpp | 2 +- src/xrpld/app/tx/detail/DelegateSet.cpp | 2 +- src/xrpld/app/tx/detail/DelegateSet.h | 2 +- 27 files changed, 170 insertions(+), 173 deletions(-) diff --git a/.github/actions/build-deps/action.yml b/.github/actions/build-deps/action.yml index 272af0f97d..ba4f4e9e2f 100644 --- a/.github/actions/build-deps/action.yml +++ b/.github/actions/build-deps/action.yml @@ -7,33 +7,33 @@ name: Build Conan dependencies # https://docs.github.com/en/actions/reference/workflows-and-actions/metadata-syntax#inputs. inputs: build_dir: - description: 'The directory where to build.' + description: "The directory where to build." required: true build_type: description: 'The build type to use ("Debug", "Release").' required: true conan_remote_name: - description: 'The name of the Conan remote to use.' + description: "The name of the Conan remote to use." required: true conan_remote_url: - description: 'The URL of the Conan endpoint to use.' + description: "The URL of the Conan endpoint to use." required: true conan_remote_username: - description: 'The username for logging into the Conan remote. If not provided, the dependencies will not be uploaded.' + description: "The username for logging into the Conan remote. If not provided, the dependencies will not be uploaded." required: false - default: '' + default: "" conan_remote_password: - description: 'The password for logging into the Conan remote. If not provided, the dependencies will not be uploaded.' + description: "The password for logging into the Conan remote. If not provided, the dependencies will not be uploaded." required: false - default: '' + default: "" force_build: description: 'Force building of all dependencies ("true", "false").' required: false - default: 'false' + default: "false" force_upload: description: 'Force uploading of all dependencies ("true", "false").' required: false - default: 'false' + default: "false" runs: using: composite diff --git a/.github/actions/build-test/action.yml b/.github/actions/build-test/action.yml index 44292e7318..d68f302698 100644 --- a/.github/actions/build-test/action.yml +++ b/.github/actions/build-test/action.yml @@ -6,26 +6,26 @@ name: Build and Test # https://docs.github.com/en/actions/reference/workflows-and-actions/metadata-syntax#inputs. inputs: build_dir: - description: 'The directory where to build.' + description: "The directory where to build." required: true build_only: description: 'Whether to only build or to build and test the code ("true", "false").' required: false - default: 'false' + default: "false" build_type: description: 'The build type to use ("Debug", "Release").' required: true cmake_args: - description: 'Additional arguments to pass to CMake.' + description: "Additional arguments to pass to CMake." required: false - default: '' + default: "" cmake_target: - description: 'The CMake target to build.' + description: "The CMake target to build." required: true codecov_token: - description: 'The Codecov token to use for uploading coverage reports.' + description: "The Codecov token to use for uploading coverage reports." required: false - default: '' + default: "" os: description: 'The operating system to use for the build ("linux", "macos", "windows").' required: true diff --git a/.github/scripts/levelization/README.md b/.github/scripts/levelization/README.md index ec41a021cc..31c6d34b6b 100644 --- a/.github/scripts/levelization/README.md +++ b/.github/scripts/levelization/README.md @@ -111,4 +111,4 @@ get those details locally. 1. Run `levelization.sh` 2. Grep the modules in `paths.txt`. - For example, if a cycle is found `A ~= B`, simply `grep -w - A .github/scripts/levelization/results/paths.txt | grep -w B` +A .github/scripts/levelization/results/paths.txt | grep -w B` diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml index e06ff9abab..36145479e1 100644 --- a/.github/workflows/build-test.yml +++ b/.github/workflows/build-test.yml @@ -9,25 +9,25 @@ on: workflow_call: inputs: build_dir: - description: 'The directory where to build.' + description: "The directory where to build." required: false type: string - default: '.build' + default: ".build" conan_remote_name: - description: 'The name of the Conan remote to use.' + description: "The name of the Conan remote to use." required: true type: string conan_remote_url: - description: 'The URL of the Conan endpoint to use.' + description: "The URL of the Conan endpoint to use." required: true type: string dependencies_force_build: - description: 'Force building of all dependencies.' + description: "Force building of all dependencies." required: false type: boolean default: false dependencies_force_upload: - description: 'Force uploading of all dependencies.' + description: "Force uploading of all dependencies." required: false type: boolean default: false @@ -40,16 +40,16 @@ on: description: 'The strategy matrix to use for generating the configurations ("minimal", "all").' required: false type: string - default: 'minimal' + default: "minimal" secrets: codecov_token: - description: 'The Codecov token to use for uploading coverage reports.' + description: "The Codecov token to use for uploading coverage reports." required: false conan_remote_username: - description: 'The username for logging into the Conan remote. If not provided, the dependencies will not be uploaded.' + description: "The username for logging into the Conan remote. If not provided, the dependencies will not be uploaded." required: false conan_remote_password: - description: 'The password for logging into the Conan remote. If not provided, the dependencies will not be uploaded.' + description: "The password for logging into the Conan remote. If not provided, the dependencies will not be uploaded." required: false concurrency: diff --git a/.github/workflows/check-missing-commits.yml b/.github/workflows/check-missing-commits.yml index da0e296e70..07d29174d8 100644 --- a/.github/workflows/check-missing-commits.yml +++ b/.github/workflows/check-missing-commits.yml @@ -18,45 +18,45 @@ jobs: check: runs-on: ubuntu-latest steps: - - name: Checkout repository - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - with: - fetch-depth: 0 - - name: Check for missing commits - env: - MESSAGE: | + - name: Checkout repository + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 + with: + fetch-depth: 0 + - name: Check for missing commits + env: + MESSAGE: | - If you are reading this, then the commits indicated above are missing - from the "develop" and/or "release" branch. Do a reverse-merge as soon - as possible. See CONTRIBUTING.md for instructions. - run: | - set -o pipefail - # Branches are ordered by how "canonical" they are. Every commit in one - # branch should be in all the branches behind it. - order=(master release develop) - branches=() - for branch in "${order[@]}"; do - # Check that the branches exist so that this job will work on forked - # repos, which don't necessarily have master and release branches. - echo "Checking if ${branch} exists." - if git ls-remote --exit-code --heads origin \ - refs/heads/${branch} > /dev/null; then - branches+=(origin/${branch}) - fi - done + If you are reading this, then the commits indicated above are missing + from the "develop" and/or "release" branch. Do a reverse-merge as soon + as possible. See CONTRIBUTING.md for instructions. + run: | + set -o pipefail + # Branches are ordered by how "canonical" they are. Every commit in one + # branch should be in all the branches behind it. + order=(master release develop) + branches=() + for branch in "${order[@]}"; do + # Check that the branches exist so that this job will work on forked + # repos, which don't necessarily have master and release branches. + echo "Checking if ${branch} exists." + if git ls-remote --exit-code --heads origin \ + refs/heads/${branch} > /dev/null; then + branches+=(origin/${branch}) + fi + done - prior=() - for branch in "${branches[@]}"; do - if [[ ${#prior[@]} -ne 0 ]]; then - echo "Checking ${prior[@]} for commits missing from ${branch}." - git log --oneline --no-merges "${prior[@]}" \ - ^$branch | tee -a "missing-commits.txt" - echo + prior=() + for branch in "${branches[@]}"; do + if [[ ${#prior[@]} -ne 0 ]]; then + echo "Checking ${prior[@]} for commits missing from ${branch}." + git log --oneline --no-merges "${prior[@]}" \ + ^$branch | tee -a "missing-commits.txt" + echo + fi + prior+=("${branch}") + done + + if [[ $(cat missing-commits.txt | wc -l) -ne 0 ]]; then + echo "${MESSAGE}" + exit 1 fi - prior+=("${branch}") - done - - if [[ $(cat missing-commits.txt | wc -l) -ne 0 ]]; then - echo "${MESSAGE}" - exit 1 - fi diff --git a/.github/workflows/notify-clio.yml b/.github/workflows/notify-clio.yml index b92dea65e2..f7e10de7af 100644 --- a/.github/workflows/notify-clio.yml +++ b/.github/workflows/notify-clio.yml @@ -8,22 +8,22 @@ on: workflow_call: inputs: conan_remote_name: - description: 'The name of the Conan remote to use.' + description: "The name of the Conan remote to use." required: true type: string conan_remote_url: - description: 'The URL of the Conan endpoint to use.' + description: "The URL of the Conan endpoint to use." required: true type: string secrets: clio_notify_token: - description: 'The GitHub token to notify Clio about new versions.' + description: "The GitHub token to notify Clio about new versions." required: true conan_remote_username: - description: 'The username for logging into the Conan remote.' + description: "The username for logging into the Conan remote." required: true conan_remote_password: - description: 'The password for logging into the Conan remote.' + description: "The password for logging into the Conan remote." required: true concurrency: diff --git a/.github/workflows/on-pr.yml b/.github/workflows/on-pr.yml index 9d7bbbf89c..f3811c4ea4 100644 --- a/.github/workflows/on-pr.yml +++ b/.github/workflows/on-pr.yml @@ -7,28 +7,28 @@ name: PR on: pull_request: paths: - - '.github/actions/build-deps/**' - - '.github/actions/build-test/**' - - '.github/scripts/levelization/**' - - '.github/scripts/strategy-matrix/**' - - '.github/workflows/build-test.yml' - - '.github/workflows/check-format.yml' - - '.github/workflows/check-levelization.yml' - - '.github/workflows/notify-clio.yml' - - '.github/workflows/on-pr.yml' + - ".github/actions/build-deps/**" + - ".github/actions/build-test/**" + - ".github/scripts/levelization/**" + - ".github/scripts/strategy-matrix/**" + - ".github/workflows/build-test.yml" + - ".github/workflows/check-format.yml" + - ".github/workflows/check-levelization.yml" + - ".github/workflows/notify-clio.yml" + - ".github/workflows/on-pr.yml" # Keep the list of paths below in sync with those in the `on-trigger.yml` # file. - - 'cmake/**' - - 'conan/**' - - 'external/**' - - 'include/**' - - 'src/**' - - 'tests/**' - - '.clang-format' - - '.codecov.yml' - - '.pre-commit-config.yaml' - - 'CMakeLists.txt' - - 'conanfile.py' + - "cmake/**" + - "conan/**" + - "external/**" + - "include/**" + - "src/**" + - "tests/**" + - ".clang-format" + - ".codecov.yml" + - ".pre-commit-config.yaml" + - "CMakeLists.txt" + - "conanfile.py" types: - opened - synchronize diff --git a/.github/workflows/on-trigger.yml b/.github/workflows/on-trigger.yml index ed9a794985..55e93b9866 100644 --- a/.github/workflows/on-trigger.yml +++ b/.github/workflows/on-trigger.yml @@ -13,31 +13,31 @@ on: - release - master paths: - - '.github/actions/build-deps/**' - - '.github/actions/build-test/**' - - '.github/scripts/strategy-matrix/**' - - '.github/workflows/build-test.yml' - - '.github/workflows/check-missing-commits.yml' - - '.github/workflows/on-trigger.yml' - - '.github/workflows/publish-docs.yml' + - ".github/actions/build-deps/**" + - ".github/actions/build-test/**" + - ".github/scripts/strategy-matrix/**" + - ".github/workflows/build-test.yml" + - ".github/workflows/check-missing-commits.yml" + - ".github/workflows/on-trigger.yml" + - ".github/workflows/publish-docs.yml" # Keep the list of paths below in sync with those in `on-pr.yml`. - - 'cmake/**' - - 'conan/**' - - 'external/**' - - 'include/**' - - 'src/**' - - 'tests/**' - - '.clang-format' - - '.codecov.yml' - - '.pre-commit-config.yaml' - - 'CMakeLists.txt' - - 'conanfile.py' + - "cmake/**" + - "conan/**" + - "external/**" + - "include/**" + - "src/**" + - "tests/**" + - ".clang-format" + - ".codecov.yml" + - ".pre-commit-config.yaml" + - "CMakeLists.txt" + - "conanfile.py" # Run at 06:32 UTC on every day of the week from Monday through Friday. This # will force all dependencies to be rebuilt, which is useful to verify that # all dependencies can be built successfully. Only the dependencies that # are actually missing from the remote will be uploaded. schedule: - - cron: '32 6 * * 1-5' + - cron: "32 6 * * 1-5" # Run when manually triggered via the GitHub UI or API. If `force_upload` is # true, then the dependencies that were missing (`force_rebuild` is false) or # rebuilt (`force_rebuild` is true) will be uploaded, overwriting existing @@ -45,12 +45,12 @@ on: workflow_dispatch: inputs: dependencies_force_build: - description: 'Force building of all dependencies.' + description: "Force building of all dependencies." required: false type: boolean default: false dependencies_force_upload: - description: 'Force uploading of all dependencies.' + description: "Force uploading of all dependencies." required: false type: boolean default: false @@ -109,7 +109,7 @@ jobs: dependencies_force_build: ${{ needs.generate-outputs.outputs.dependencies_force_build == 'true' }} dependencies_force_upload: ${{ needs.generate-outputs.outputs.dependencies_force_upload == 'true' }} os: ${{ matrix.os }} - strategy_matrix: 'all' + strategy_matrix: "all" secrets: conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }} conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }} diff --git a/.github/workflows/publish-docs.yml b/.github/workflows/publish-docs.yml index 509644e6b5..2fcdd581d1 100644 --- a/.github/workflows/publish-docs.yml +++ b/.github/workflows/publish-docs.yml @@ -5,13 +5,13 @@ name: Build and publish documentation on: push: paths: - - '.github/workflows/publish-docs.yml' - - '*.md' - - '**/*.md' - - 'docs/**' - - 'include/**' - - 'src/libxrpl/**' - - 'src/xrpld/**' + - ".github/workflows/publish-docs.yml" + - "*.md" + - "**/*.md" + - "docs/**" + - "include/**" + - "src/libxrpl/**" + - "src/xrpld/**" concurrency: group: ${{ github.workflow }}-${{ github.ref }} @@ -22,7 +22,7 @@ defaults: shell: bash env: - BUILD_DIR: .build + BUILD_DIR: .build jobs: publish: diff --git a/bin/git/setup-upstreams.sh b/bin/git/setup-upstreams.sh index cdf3f37f37..61d8171569 100755 --- a/bin/git/setup-upstreams.sh +++ b/bin/git/setup-upstreams.sh @@ -5,7 +5,7 @@ then name=$( basename $0 ) cat <<- USAGE Usage: $name - + Where is the Github username of the upstream repo. e.g. XRPLF USAGE exit 0 @@ -83,4 +83,3 @@ fi _run git fetch --jobs=$(nproc) upstreams exit 0 - diff --git a/bin/git/squash-branches.sh b/bin/git/squash-branches.sh index 66f1a2d715..4dcbf5aaa1 100755 --- a/bin/git/squash-branches.sh +++ b/bin/git/squash-branches.sh @@ -5,7 +5,7 @@ then name=$( basename $0 ) cat <<- USAGE Usage: $name workbranch base/branch user/branch [user/branch [...]] - + * workbranch will be created locally from base/branch * base/branch and user/branch may be specified as user:branch to allow easy copying from Github PRs @@ -66,4 +66,3 @@ git push $push HEAD:$b git fetch $repo ------------------------------------------------------------------- PUSH - diff --git a/cfg/rippled-example.cfg b/cfg/rippled-example.cfg index 8fb7d00875..8bffc150c1 100644 --- a/cfg/rippled-example.cfg +++ b/cfg/rippled-example.cfg @@ -396,8 +396,8 @@ # true - enables compression # false - disables compression [default]. # -# The rippled server can save bandwidth by compressing its peer-to-peer communications, -# at a cost of greater CPU usage. If you enable link compression, +# The rippled server can save bandwidth by compressing its peer-to-peer communications, +# at a cost of greater CPU usage. If you enable link compression, # the server automatically compresses communications with peer servers # that also have link compression enabled. # https://xrpl.org/enable-link-compression.html @@ -1011,7 +1011,7 @@ # that rippled is still in sync with the network, # and that the validated ledger is less than # 'age_threshold_seconds' old. If not, then continue -# sleeping for this number of seconds and +# sleeping for this number of seconds and # checking until healthy. # Default is 5. # @@ -1113,7 +1113,7 @@ # page_size Valid values: integer (MUST be power of 2 between 512 and 65536) # The default is 4096 bytes. This setting determines # the size of a page in the transaction.db file. -# See https://www.sqlite.org/pragma.html#pragma_page_size +# See https://www.sqlite.org/pragma.html#pragma_page_size # for more details about the available options. # # journal_size_limit Valid values: integer diff --git a/cmake/RippledCore.cmake b/cmake/RippledCore.cmake index 83b27e6c4f..7d3561675a 100644 --- a/cmake/RippledCore.cmake +++ b/cmake/RippledCore.cmake @@ -101,7 +101,7 @@ target_link_libraries(xrpl.libxrpl.resource PUBLIC xrpl.libxrpl.protocol) # Level 06 add_module(xrpl net) -target_link_libraries(xrpl.libxrpl.net PUBLIC +target_link_libraries(xrpl.libxrpl.net PUBLIC xrpl.libxrpl.basics xrpl.libxrpl.json xrpl.libxrpl.protocol diff --git a/docs/0001-negative-unl/negativeUNLSqDiagram.puml b/docs/0001-negative-unl/negativeUNLSqDiagram.puml index 8cb491af6a..9f37d43903 100644 --- a/docs/0001-negative-unl/negativeUNLSqDiagram.puml +++ b/docs/0001-negative-unl/negativeUNLSqDiagram.puml @@ -5,8 +5,8 @@ skinparam roundcorner 20 skinparam maxmessagesize 160 actor "Rippled Start" as RS -participant "Timer" as T -participant "NetworkOPs" as NOP +participant "Timer" as T +participant "NetworkOPs" as NOP participant "ValidatorList" as VL #lightgreen participant "Consensus" as GC participant "ConsensusAdaptor" as CA #lightgreen @@ -20,7 +20,7 @@ VL -> NOP NOP -> VL: update trusted validators activate VL VL -> VL: re-calculate quorum -hnote over VL#lightgreen: ignore negative listed validators\nwhen calculate quorum +hnote over VL#lightgreen: ignore negative listed validators\nwhen calculate quorum VL -> NOP deactivate VL NOP -> GC: start round @@ -36,14 +36,14 @@ activate GC end alt phase == OPEN - alt should close ledger + alt should close ledger GC -> GC: phase = ESTABLISH GC -> CA: onClose activate CA - alt sqn%256==0 + alt sqn%256==0 CA -[#green]> RM: getValidations - CA -[#green]> CA: create UNLModify Tx - hnote over CA#lightgreen: use validatations of the last 256 ledgers\nto figure out UNLModify Tx candidates.\nIf any, create UNLModify Tx, and add to TxSet. + CA -[#green]> CA: create UNLModify Tx + hnote over CA#lightgreen: use validatations of the last 256 ledgers\nto figure out UNLModify Tx candidates.\nIf any, create UNLModify Tx, and add to TxSet. end CA -> GC GC -> CA: propose @@ -61,14 +61,14 @@ else phase == ESTABLISH CA -> CA : build LCL hnote over CA #lightgreen: copy negative UNL from parent ledger alt sqn%256==0 - CA -[#green]> CA: Adjust negative UNL + CA -[#green]> CA: Adjust negative UNL CA -[#green]> CA: apply UNLModify Tx end CA -> CA : validate and send validation message activate NOP CA -> NOP : end consensus and\nbegin next consensus round deactivate NOP - deactivate CA + deactivate CA hnote over RM: receive validations end else phase == ACCEPTED @@ -76,4 +76,4 @@ else phase == ACCEPTED end deactivate GC -@enduml \ No newline at end of file +@enduml diff --git a/docs/0010-ledger-replay/ledger_replay_classes.puml b/docs/0010-ledger-replay/ledger_replay_classes.puml index 4c90ef2511..f98cfbe231 100644 --- a/docs/0010-ledger-replay/ledger_replay_classes.puml +++ b/docs/0010-ledger-replay/ledger_replay_classes.puml @@ -4,7 +4,7 @@ class TimeoutCounter { #app_ : Application& } -TimeoutCounter o-- "1" Application +TimeoutCounter o-- "1" Application ': app_ Stoppable <.. Application @@ -14,13 +14,13 @@ class Application { -m_inboundLedgers : uptr } -Application *-- "1" LedgerReplayer +Application *-- "1" LedgerReplayer ': m_ledgerReplayer -Application *-- "1" InboundLedgers +Application *-- "1" InboundLedgers ': m_inboundLedgers Stoppable <.. InboundLedgers -Application "1" --o InboundLedgers +Application "1" --o InboundLedgers ': app_ class InboundLedgers { @@ -28,9 +28,9 @@ class InboundLedgers { } Stoppable <.. LedgerReplayer -InboundLedgers "1" --o LedgerReplayer +InboundLedgers "1" --o LedgerReplayer ': inboundLedgers_ -Application "1" --o LedgerReplayer +Application "1" --o LedgerReplayer ': app_ class LedgerReplayer { @@ -42,17 +42,17 @@ class LedgerReplayer { -skipLists_ : hash_map> } -LedgerReplayer *-- LedgerReplayTask +LedgerReplayer *-- LedgerReplayTask ': tasks_ -LedgerReplayer o-- LedgerDeltaAcquire +LedgerReplayer o-- LedgerDeltaAcquire ': deltas_ -LedgerReplayer o-- SkipListAcquire +LedgerReplayer o-- SkipListAcquire ': skipLists_ TimeoutCounter <.. LedgerReplayTask -InboundLedgers "1" --o LedgerReplayTask +InboundLedgers "1" --o LedgerReplayTask ': inboundLedgers_ -LedgerReplayer "1" --o LedgerReplayTask +LedgerReplayer "1" --o LedgerReplayTask ': replayer_ class LedgerReplayTask { @@ -63,15 +63,15 @@ class LedgerReplayTask { +addDelta(sptr) } -LedgerReplayTask *-- "1" SkipListAcquire +LedgerReplayTask *-- "1" SkipListAcquire ': skipListAcquirer_ -LedgerReplayTask *-- LedgerDeltaAcquire +LedgerReplayTask *-- LedgerDeltaAcquire ': deltas_ TimeoutCounter <.. SkipListAcquire -InboundLedgers "1" --o SkipListAcquire +InboundLedgers "1" --o SkipListAcquire ': inboundLedgers_ -LedgerReplayer "1" --o SkipListAcquire +LedgerReplayer "1" --o SkipListAcquire ': replayer_ LedgerReplayTask --o SkipListAcquire : implicit via callback @@ -83,9 +83,9 @@ class SkipListAcquire { } TimeoutCounter <.. LedgerDeltaAcquire -InboundLedgers "1" --o LedgerDeltaAcquire +InboundLedgers "1" --o LedgerDeltaAcquire ': inboundLedgers_ -LedgerReplayer "1" --o LedgerDeltaAcquire +LedgerReplayer "1" --o LedgerDeltaAcquire ': replayer_ LedgerReplayTask --o LedgerDeltaAcquire : implicit via callback @@ -95,4 +95,4 @@ class LedgerDeltaAcquire { -replayer_ : LedgerReplayer& -dataReadyCallbacks_ : vector } -@enduml \ No newline at end of file +@enduml diff --git a/docs/0010-ledger-replay/ledger_replay_sequence.puml b/docs/0010-ledger-replay/ledger_replay_sequence.puml index 481819b5e8..603b09157b 100644 --- a/docs/0010-ledger-replay/ledger_replay_sequence.puml +++ b/docs/0010-ledger-replay/ledger_replay_sequence.puml @@ -38,7 +38,7 @@ deactivate lr loop lr -> lda : make_shared(ledgerId, ledgerSeq) return delta - lr -> lrt : addDelta(delta) + lr -> lrt : addDelta(delta) lrt -> lda : addDataCallback(callback) return return @@ -62,7 +62,7 @@ deactivate peer lr -> lda : processData(ledgerHeader, txns) lda -> lda : notify() note over lda: call the callbacks added by\naddDataCallback(callback). - lda -> lrt : callback(ledgerId) + lda -> lrt : callback(ledgerId) lrt -> lrt : deltaReady(ledgerId) lrt -> lrt : tryAdvance() loop as long as child can be built @@ -82,4 +82,4 @@ deactivate peer deactivate peer -@enduml \ No newline at end of file +@enduml diff --git a/include/xrpl/protocol/Batch.h b/include/xrpl/protocol/Batch.h index 1388bbd2f1..1307ea0978 100644 --- a/include/xrpl/protocol/Batch.h +++ b/include/xrpl/protocol/Batch.h @@ -34,4 +34,4 @@ serializeBatch( msg.addBitString(txid); } -} // namespace ripple \ No newline at end of file +} // namespace ripple diff --git a/include/xrpl/protocol/TxFlags.h b/include/xrpl/protocol/TxFlags.h index 2831933afb..a37474b780 100644 --- a/include/xrpl/protocol/TxFlags.h +++ b/include/xrpl/protocol/TxFlags.h @@ -141,7 +141,7 @@ constexpr std::uint32_t const tfTransferable = 0x00000008; constexpr std::uint32_t const tfMutable = 0x00000010; // MPTokenIssuanceCreate flags: -// NOTE - there is intentionally no flag here for lsfMPTLocked, which this transaction cannot mutate. +// NOTE - there is intentionally no flag here for lsfMPTLocked, which this transaction cannot mutate. constexpr std::uint32_t const tfMPTCanLock = lsfMPTCanLock; constexpr std::uint32_t const tfMPTRequireAuth = lsfMPTRequireAuth; constexpr std::uint32_t const tfMPTCanEscrow = lsfMPTCanEscrow; @@ -243,7 +243,7 @@ constexpr std::uint32_t tfUntilFailure = 0x00040000; constexpr std::uint32_t tfIndependent = 0x00080000; /** * @note If nested Batch transactions are supported in the future, the tfInnerBatchTxn flag - * will need to be removed from this mask to allow Batch transaction to be inside + * will need to be removed from this mask to allow Batch transaction to be inside * the sfRawTransactions array. */ constexpr std::uint32_t const tfBatchMask = diff --git a/include/xrpl/protocol/detail/ledger_entries.macro b/include/xrpl/protocol/detail/ledger_entries.macro index 11306ee0f5..967fb37b94 100644 --- a/include/xrpl/protocol/detail/ledger_entries.macro +++ b/include/xrpl/protocol/detail/ledger_entries.macro @@ -505,4 +505,3 @@ LEDGER_ENTRY(ltVAULT, 0x0084, Vault, vault, ({ #undef EXPAND #undef LEDGER_ENTRY_DUPLICATE - diff --git a/src/libxrpl/protocol/Permissions.cpp b/src/libxrpl/protocol/Permissions.cpp index dbe5325a4e..ca8cb26f36 100644 --- a/src/libxrpl/protocol/Permissions.cpp +++ b/src/libxrpl/protocol/Permissions.cpp @@ -145,4 +145,4 @@ Permission::permissionToTxType(uint32_t const& value) const return static_cast(value - 1); } -} // namespace ripple \ No newline at end of file +} // namespace ripple diff --git a/src/test/app/Delegate_test.cpp b/src/test/app/Delegate_test.cpp index 179532140d..44cb6a54b6 100644 --- a/src/test/app/Delegate_test.cpp +++ b/src/test/app/Delegate_test.cpp @@ -1499,4 +1499,4 @@ class Delegate_test : public beast::unit_test::suite }; BEAST_DEFINE_TESTSUITE(Delegate, app, ripple); } // namespace test -} // namespace ripple \ No newline at end of file +} // namespace ripple diff --git a/src/test/app/NFTokenAuth_test.cpp b/src/test/app/NFTokenAuth_test.cpp index f5eedfce77..f0d7cc3700 100644 --- a/src/test/app/NFTokenAuth_test.cpp +++ b/src/test/app/NFTokenAuth_test.cpp @@ -621,4 +621,4 @@ public: BEAST_DEFINE_TESTSUITE_PRIO(NFTokenAuth, app, ripple, 2); -} // namespace ripple \ No newline at end of file +} // namespace ripple diff --git a/src/test/csf/collectors.h b/src/test/csf/collectors.h index 7b91863cbd..0494178ae9 100644 --- a/src/test/csf/collectors.h +++ b/src/test/csf/collectors.h @@ -720,4 +720,4 @@ struct JumpCollector } // namespace test } // namespace ripple -#endif \ No newline at end of file +#endif diff --git a/src/test/jtx/delegate.h b/src/test/jtx/delegate.h index 9e8850fbe2..ea368557b8 100644 --- a/src/test/jtx/delegate.h +++ b/src/test/jtx/delegate.h @@ -59,4 +59,4 @@ public: } // namespace delegate } // namespace jtx } // namespace test -} // namespace ripple \ No newline at end of file +} // namespace ripple diff --git a/src/test/jtx/impl/delegate.cpp b/src/test/jtx/impl/delegate.cpp index 3ceedff190..8ef2fac13d 100644 --- a/src/test/jtx/impl/delegate.cpp +++ b/src/test/jtx/impl/delegate.cpp @@ -64,4 +64,4 @@ entry(jtx::Env& env, jtx::Account const& account, jtx::Account const& authorize) } // namespace delegate } // namespace jtx } // namespace test -} // namespace ripple \ No newline at end of file +} // namespace ripple diff --git a/src/xrpld/app/tx/detail/DelegateSet.cpp b/src/xrpld/app/tx/detail/DelegateSet.cpp index 34e1c3afd3..708cdf0dc2 100644 --- a/src/xrpld/app/tx/detail/DelegateSet.cpp +++ b/src/xrpld/app/tx/detail/DelegateSet.cpp @@ -159,4 +159,4 @@ DelegateSet::deleteDelegate( return tesSUCCESS; } -} // namespace ripple \ No newline at end of file +} // namespace ripple diff --git a/src/xrpld/app/tx/detail/DelegateSet.h b/src/xrpld/app/tx/detail/DelegateSet.h index 6b01d63281..c72b1e3c58 100644 --- a/src/xrpld/app/tx/detail/DelegateSet.h +++ b/src/xrpld/app/tx/detail/DelegateSet.h @@ -53,4 +53,4 @@ public: } // namespace ripple -#endif \ No newline at end of file +#endif From 2e255812ae8fcd8b0da6acf0281c5f286c2827ed Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Fri, 22 Aug 2025 15:58:36 +0100 Subject: [PATCH 133/244] chore: Workaround for CI build errors on arm64 (#5717) CI builds with `clang-20` on `linux/arm64` are failing due to boost 1.86. This is hopefully fixed in version 1.88. --- .github/scripts/strategy-matrix/generate.py | 4 ++++ .gitignore | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/scripts/strategy-matrix/generate.py b/.github/scripts/strategy-matrix/generate.py index 0acdca8d4f..652cb8871f 100644 --- a/.github/scripts/strategy-matrix/generate.py +++ b/.github/scripts/strategy-matrix/generate.py @@ -123,6 +123,10 @@ def generate_strategy_matrix(all: bool, architecture: list[dict], os: list[dict] if os['distro_name'] == 'rhel' and architecture['platform'] == 'linux/arm64': continue + # We skip all clang-20 on arm64 due to boost 1.86 build error + if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-20' and architecture['platform'] == 'linux/arm64': + continue + # Generate a unique name for the configuration, e.g. macos-arm64-debug # or debian-bookworm-gcc-12-amd64-release-unity. config_name = os['distro_name'] diff --git a/.gitignore b/.gitignore index ab54adba74..5476f21a41 100644 --- a/.gitignore +++ b/.gitignore @@ -110,4 +110,4 @@ bld.rippled/ .vscode # Suggested in-tree build directory -/.build/ +/.build*/ From 095dc4d9cc89f96aaf82940e822a753bff4b96af Mon Sep 17 00:00:00 2001 From: Mayukha Vadari Date: Fri, 22 Aug 2025 12:15:03 -0400 Subject: [PATCH 134/244] fix(test): handle null metadata for unvalidated tx in Env::meta (#5715) This change handles errors better when calling `env.meta`. It prints some debug help and throws an error if `env.meta` is going to return a `nullptr`. --- src/test/jtx/impl/Env.cpp | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/test/jtx/impl/Env.cpp b/src/test/jtx/impl/Env.cpp index 46558a188a..d6956b30c7 100644 --- a/src/test/jtx/impl/Env.cpp +++ b/src/test/jtx/impl/Env.cpp @@ -499,7 +499,16 @@ Env::meta() close(); } auto const item = closed()->txRead(txid_); - return item.second; + auto const result = item.second; + if (result == nullptr) + { + test.log << "Env::meta: no metadata for txid: " << txid_ << std::endl; + test.log << "This is probably because the transaction failed with a " + "non-tec error." + << std::endl; + Throw("Env::meta: no metadata for txid"); + } + return result; } std::shared_ptr From c14ce956adeabe476ad73c18d73103f347c9c613 Mon Sep 17 00:00:00 2001 From: Bart Date: Fri, 22 Aug 2025 13:37:11 -0400 Subject: [PATCH 135/244] chore: Update clang-format and prettier with pre-commit (#5709) The change updates how clang-format is called in CI and locally, and adds prettier to the pre-commit hook. Proto files are now also formatted, while external files are excluded. --- .clang-format | 36 +++++------ .git-blame-ignore-revs | 2 + .github/scripts/strategy-matrix/linux.json | 24 ++------ .github/scripts/strategy-matrix/macos.json | 12 +--- .github/scripts/strategy-matrix/windows.json | 14 +---- .github/workflows/check-format.yml | 63 ++++--------------- .github/workflows/on-pr.yml | 4 +- .pre-commit-config.yaml | 64 +++++++++++++++++++- .prettierignore | 1 - 9 files changed, 103 insertions(+), 117 deletions(-) diff --git a/.clang-format b/.clang-format index 9c3820a6bf..bd446022df 100644 --- a/.clang-format +++ b/.clang-format @@ -1,4 +1,20 @@ --- +BreakBeforeBraces: Custom +BraceWrapping: + AfterClass: true + AfterControlStatement: true + AfterEnum: false + AfterFunction: true + AfterNamespace: false + AfterObjCDeclaration: true + AfterStruct: true + AfterUnion: true + BeforeCatch: true + BeforeElse: true + IndentBraces: false +KeepEmptyLinesAtTheStartOfBlocks: false +MaxEmptyLinesToKeep: 1 +--- Language: Cpp AccessModifierOffset: -4 AlignAfterOpenBracket: AlwaysBreak @@ -18,20 +34,7 @@ AlwaysBreakBeforeMultilineStrings: true AlwaysBreakTemplateDeclarations: true BinPackArguments: false BinPackParameters: false -BraceWrapping: - AfterClass: true - AfterControlStatement: true - AfterEnum: false - AfterFunction: true - AfterNamespace: false - AfterObjCDeclaration: true - AfterStruct: true - AfterUnion: true - BeforeCatch: true - BeforeElse: true - IndentBraces: false BreakBeforeBinaryOperators: false -BreakBeforeBraces: Custom BreakBeforeTernaryOperators: true BreakConstructorInitializersBeforeComma: true ColumnLimit: 80 @@ -66,8 +69,6 @@ IndentFunctionDeclarationAfterType: false IndentRequiresClause: true IndentWidth: 4 IndentWrappedFunctionNames: false -KeepEmptyLinesAtTheStartOfBlocks: false -MaxEmptyLinesToKeep: 1 NamespaceIndentation: None ObjCSpaceAfterProperty: false ObjCSpaceBeforeProtocolList: false @@ -96,11 +97,6 @@ TabWidth: 8 UseTab: Never QualifierAlignment: Right --- -Language: JavaScript ---- -Language: Json -IndentWidth: 2 ---- Language: Proto BasedOnStyle: Google ColumnLimit: 0 diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index a9805e705c..cf50d48f95 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -12,3 +12,5 @@ fe9a5365b8a52d4acc42eb27369247e6f238a4f9 9a93577314e6a8d4b4a8368cc9d2b15a5d8303e8 552377c76f55b403a1c876df873a23d780fcc81c 97f0747e103f13e26e45b731731059b32f7679ac +b13370ac0d207217354f1fc1c29aef87769fb8a1 +896b8c3b54a22b0497cb0d1ce95e1095f9a227ce diff --git a/.github/scripts/strategy-matrix/linux.json b/.github/scripts/strategy-matrix/linux.json index d8f176273d..44eaebd074 100644 --- a/.github/scripts/strategy-matrix/linux.json +++ b/.github/scripts/strategy-matrix/linux.json @@ -2,21 +2,11 @@ "architecture": [ { "platform": "linux/amd64", - "runner": [ - "self-hosted", - "Linux", - "X64", - "heavy" - ] + "runner": ["self-hosted", "Linux", "X64", "heavy"] }, { "platform": "linux/arm64", - "runner": [ - "self-hosted", - "Linux", - "ARM64", - "heavy-arm64" - ] + "runner": ["self-hosted", "Linux", "ARM64", "heavy-arm64"] } ], "os": [ @@ -159,12 +149,6 @@ "compiler_version": "19" } ], - "build_type": [ - "Debug", - "Release" - ], - "cmake_args": [ - "-Dunity=OFF", - "-Dunity=ON" - ] + "build_type": ["Debug", "Release"], + "cmake_args": ["-Dunity=OFF", "-Dunity=ON"] } diff --git a/.github/scripts/strategy-matrix/macos.json b/.github/scripts/strategy-matrix/macos.json index a6ffdf14b7..de37639ddd 100644 --- a/.github/scripts/strategy-matrix/macos.json +++ b/.github/scripts/strategy-matrix/macos.json @@ -2,12 +2,7 @@ "architecture": [ { "platform": "macos/arm64", - "runner": [ - "self-hosted", - "macOS", - "ARM64", - "mac-runner-m1" - ] + "runner": ["self-hosted", "macOS", "ARM64", "mac-runner-m1"] } ], "os": [ @@ -18,10 +13,7 @@ "compiler_version": "" } ], - "build_type": [ - "Debug", - "Release" - ], + "build_type": ["Debug", "Release"], "cmake_args": [ "-Dunity=OFF -DCMAKE_POLICY_VERSION_MINIMUM=3.5", "-Dunity=ON -DCMAKE_POLICY_VERSION_MINIMUM=3.5" diff --git a/.github/scripts/strategy-matrix/windows.json b/.github/scripts/strategy-matrix/windows.json index aaa8c94411..5e6e536750 100644 --- a/.github/scripts/strategy-matrix/windows.json +++ b/.github/scripts/strategy-matrix/windows.json @@ -2,9 +2,7 @@ "architecture": [ { "platform": "windows/amd64", - "runner": [ - "windows-latest" - ] + "runner": ["windows-latest"] } ], "os": [ @@ -15,12 +13,6 @@ "compiler_version": "" } ], - "build_type": [ - "Debug", - "Release" - ], - "cmake_args": [ - "-Dunity=OFF", - "-Dunity=ON" - ] + "build_type": ["Debug", "Release"], + "cmake_args": ["-Dunity=OFF", "-Dunity=ON"] } diff --git a/.github/workflows/check-format.yml b/.github/workflows/check-format.yml index 5e3da10028..359e3e634b 100644 --- a/.github/workflows/check-format.yml +++ b/.github/workflows/check-format.yml @@ -13,9 +13,9 @@ defaults: shell: bash jobs: - clang-format: + pre-commit: runs-on: ubuntu-latest - container: ghcr.io/xrplf/ci/tools-rippled-clang-format + container: ghcr.io/xrplf/ci/tools-rippled-pre-commit steps: # The $GITHUB_WORKSPACE and ${{ github.workspace }} might not point to the # same directory for jobs running in containers. The actions/checkout step @@ -38,48 +38,11 @@ jobs: echo 'Checking environment variables.' env | sort + echo 'Checking pre-commit version.' + pre-commit --version + echo 'Checking clang-format version.' clang-format --version - - name: Format code - run: find include src tests -type f \( -name '*.cpp' -o -name '*.hpp' -o -name '*.h' -o -name '*.ipp' \) -exec clang-format -i {} + - - name: Check for differences - env: - MESSAGE: | - One or more files did not conform to the formatting specified in - .clang-format. Maybe you did not run 'git-clang-format' or - 'clang-format' before committing, or your version of clang-format - has an incompatibility with the one used here (see the "Check - configuration" step above). - - Run 'git-clang-format --extensions cpp,h,hpp,ipp develop' in your - repo, and then commit and push the changes. - run: | - DIFF=$(git status --porcelain) - if [ -n "${DIFF}" ]; then - # Print the files that changed to give the contributor a hint about - # what to expect when running git-clang-format on their own machine. - git status - echo "${MESSAGE}" - exit 1 - fi - - prettier: - runs-on: ubuntu-latest - container: ghcr.io/xrplf/ci/tools-rippled-prettier - steps: - - name: Configure git safe.directory - run: | - git config --global --add safe.directory $GITHUB_WORKSPACE - git config --global --add safe.directory ${{ github.workspace }} - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - - name: Check configuration - run: | - echo 'Checking path.' - echo ${PATH} | tr ':' '\n' - - echo 'Checking environment variables.' - env | sort echo 'Checking NPM version.' npm --version @@ -90,22 +53,22 @@ jobs: echo 'Checking prettier version.' prettier --version - name: Format code - run: prettier --check . + run: pre-commit run --show-diff-on-failure --color=always --all-files - name: Check for differences env: MESSAGE: | - One or more files did not conform to the formatting rules specified - by Prettier. Maybe you did not run 'prettier' before committing, or - your version of prettier has an incompatibility with the one used - here (see the "Check configuration" step above). + One or more files did not conform to the formatting. Maybe you did + not run 'pre-commit' before committing, or your version of + 'clang-format' or 'prettier' has an incompatibility with the ones + used here (see the "Check configuration" step above). - Run 'prettier --check .' in your repo, and then commit and push the - changes. + Run 'pre-commit run --all-files' in your repo, and then commit and + push the changes. run: | DIFF=$(git status --porcelain) if [ -n "${DIFF}" ]; then # Print the files that changed to give the contributor a hint about - # what to expect when running prettier on their own machine. + # what to expect when running pre-commit on their own machine. git status echo "${MESSAGE}" exit 1 diff --git a/.github/workflows/on-pr.yml b/.github/workflows/on-pr.yml index f3811c4ea4..d1623874ac 100644 --- a/.github/workflows/on-pr.yml +++ b/.github/workflows/on-pr.yml @@ -69,7 +69,7 @@ jobs: runs-on: ubuntu-latest steps: - name: No-op - run: echo '' + run: true check-format: needs: should-run @@ -86,7 +86,7 @@ jobs: runs-on: ubuntu-latest steps: - name: No-op - run: echo '' + run: true outputs: conan_remote_name: ${{ env.CONAN_REMOTE_NAME }} conan_remote_url: ${{ env.CONAN_REMOTE_URL }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7daecdb5ec..3bd60b76d0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,64 @@ -# .pre-commit-config.yaml +# To run pre-commit hooks, first install pre-commit: +# - `pip install pre-commit==${PRE_COMMIT_VERSION}` +# - `pip install pre-commit-hooks==${PRE_COMMIT_HOOKS_VERSION}` +# +# Depending on your system, you can use `brew install` or `apt install` as well +# for installing the pre-commit package, but `pip` is needed to install the +# hooks; you can also use `pipx` if you prefer. +# Next, install the required formatters: +# - `pip install clang-format==${CLANG_VERSION}` +# - `npm install prettier@${PRETTIER_VERSION}` +# +# See https://github.com/XRPLF/ci/blob/main/.github/workflows/tools-rippled.yml +# for the versions used in the CI pipeline. You will need to have the exact same +# versions of the tools installed on your system to produce the same results as +# the pipeline. +# +# Then, run the following command to install the git hook scripts: +# - `pre-commit install` +# You can run all configured hooks against all files with: +# - `pre-commit run --all-files` +# To manually run a specific hook, use: +# - `pre-commit run --all-files` +# To run the hooks against only the files changed in the current commit, use: +# - `pre-commit run` repos: - - repo: https://github.com/pre-commit/mirrors-clang-format - rev: v18.1.8 + - repo: local hooks: - id: clang-format + name: clang-format + language: system + entry: clang-format -i + files: '\.(cpp|hpp|h|ipp|proto)$' + - id: trailing-whitespace + name: trailing-whitespace + entry: trailing-whitespace-fixer + language: system + types: [text] + - id: end-of-file + name: end-of-file + entry: end-of-file-fixer + language: system + types: [text] + - id: mixed-line-ending + name: mixed-line-ending + entry: mixed-line-ending + language: system + types: [text] + - id: check-merge-conflict + name: check-merge-conflict + entry: check-merge-conflict --assume-in-merge + language: system + types: [text] + - repo: local + hooks: + - id: prettier + name: prettier + language: system + entry: prettier --ignore-unknown --write + +exclude: | + (?x)^( + external/.*| + .github/scripts/levelization/results/.*\.txt + )$ diff --git a/.prettierignore b/.prettierignore index 477120ade1..5446323fad 100644 --- a/.prettierignore +++ b/.prettierignore @@ -1,2 +1 @@ external -.* From c57cd8b23ead8a092ff28a7be67c23d610e29c46 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Fri, 22 Aug 2025 17:30:08 -0400 Subject: [PATCH 136/244] Revert "perf: Move mutex to the partition level (#5486)" This reverts commit 94decc753b515e7499808ca0d5b9e24d172c691e. --- include/xrpl/basics/SHAMapHash.h | 1 + include/xrpl/basics/TaggedCache.h | 26 ++- include/xrpl/basics/TaggedCache.ipp | 197 +++++++++--------- .../xrpl/basics/partitioned_unordered_map.h | 12 -- include/xrpl/protocol/Protocol.h | 1 + src/test/basics/TaggedCache_test.cpp | 24 +-- src/xrpld/app/ledger/LedgerHistory.cpp | 15 +- src/xrpld/rpc/handlers/GetCounts.cpp | 2 +- 8 files changed, 143 insertions(+), 135 deletions(-) diff --git a/include/xrpl/basics/SHAMapHash.h b/include/xrpl/basics/SHAMapHash.h index 1ec326409c..2d2dcdc3ef 100644 --- a/include/xrpl/basics/SHAMapHash.h +++ b/include/xrpl/basics/SHAMapHash.h @@ -21,6 +21,7 @@ #define RIPPLE_BASICS_SHAMAP_HASH_H_INCLUDED #include +#include #include diff --git a/include/xrpl/basics/TaggedCache.h b/include/xrpl/basics/TaggedCache.h index 7eace6fe72..99c91fe393 100644 --- a/include/xrpl/basics/TaggedCache.h +++ b/include/xrpl/basics/TaggedCache.h @@ -90,6 +90,9 @@ public: int getCacheSize() const; + int + getTrackSize() const; + float getHitRate(); @@ -167,6 +170,9 @@ public: bool retrieve(key_type const& key, T& data); + mutex_type& + peekMutex(); + std::vector getKeys() const; @@ -187,14 +193,11 @@ public: private: SharedPointerType - initialFetch(key_type const& key); + initialFetch(key_type const& key, std::lock_guard const& l); void collect_metrics(); - Mutex& - lockPartition(key_type const& key) const; - private: struct Stats { @@ -297,8 +300,8 @@ private: [[maybe_unused]] clock_type::time_point const& now, typename KeyValueCacheType::map_type& partition, SweptPointersVector& stuffToSweep, - std::atomic& allRemoval, - Mutex& partitionLock); + std::atomic& allRemovals, + std::lock_guard const&); [[nodiscard]] std::thread sweepHelper( @@ -307,12 +310,14 @@ private: typename KeyOnlyCacheType::map_type& partition, SweptPointersVector&, std::atomic& allRemovals, - Mutex& partitionLock); + std::lock_guard const&); beast::Journal m_journal; clock_type& m_clock; Stats m_stats; + mutex_type mutable m_mutex; + // Used for logging std::string m_name; @@ -323,11 +328,10 @@ private: clock_type::duration const m_target_age; // Number of items cached - std::atomic m_cache_count; + int m_cache_count; cache_type m_cache; // Hold strong reference to recent objects - std::atomic m_hits; - std::atomic m_misses; - mutable std::vector partitionLocks_; + std::uint64_t m_hits; + std::uint64_t m_misses; }; } // namespace ripple diff --git a/include/xrpl/basics/TaggedCache.ipp b/include/xrpl/basics/TaggedCache.ipp index c909ec6ad1..16a3f7587a 100644 --- a/include/xrpl/basics/TaggedCache.ipp +++ b/include/xrpl/basics/TaggedCache.ipp @@ -22,7 +22,6 @@ #include #include -#include namespace ripple { @@ -61,7 +60,6 @@ inline TaggedCache< , m_hits(0) , m_misses(0) { - partitionLocks_ = std::vector(m_cache.partitions()); } template < @@ -107,13 +105,8 @@ TaggedCache< KeyEqual, Mutex>::size() const { - std::size_t totalSize = 0; - for (size_t i = 0; i < partitionLocks_.size(); ++i) - { - std::lock_guard lock(partitionLocks_[i]); - totalSize += m_cache.map()[i].size(); - } - return totalSize; + std::lock_guard lock(m_mutex); + return m_cache.size(); } template < @@ -136,7 +129,32 @@ TaggedCache< KeyEqual, Mutex>::getCacheSize() const { - return m_cache_count.load(std::memory_order_relaxed); + std::lock_guard lock(m_mutex); + return m_cache_count; +} + +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +inline int +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>::getTrackSize() const +{ + std::lock_guard lock(m_mutex); + return m_cache.size(); } template < @@ -159,10 +177,9 @@ TaggedCache< KeyEqual, Mutex>::getHitRate() { - auto const hits = m_hits.load(std::memory_order_relaxed); - auto const misses = m_misses.load(std::memory_order_relaxed); - float const total = float(hits + misses); - return hits * (100.0f / std::max(1.0f, total)); + std::lock_guard lock(m_mutex); + auto const total = static_cast(m_hits + m_misses); + return m_hits * (100.0f / std::max(1.0f, total)); } template < @@ -185,12 +202,9 @@ TaggedCache< KeyEqual, Mutex>::clear() { - for (auto& mutex : partitionLocks_) - mutex.lock(); + std::lock_guard lock(m_mutex); m_cache.clear(); - for (auto& mutex : partitionLocks_) - mutex.unlock(); - m_cache_count.store(0, std::memory_order_relaxed); + m_cache_count = 0; } template < @@ -213,9 +227,11 @@ TaggedCache< KeyEqual, Mutex>::reset() { - clear(); - m_hits.store(0, std::memory_order_relaxed); - m_misses.store(0, std::memory_order_relaxed); + std::lock_guard lock(m_mutex); + m_cache.clear(); + m_cache_count = 0; + m_hits = 0; + m_misses = 0; } template < @@ -239,7 +255,7 @@ TaggedCache< KeyEqual, Mutex>::touch_if_exists(KeyComparable const& key) { - std::lock_guard lock(lockPartition(key)); + std::lock_guard lock(m_mutex); auto const iter(m_cache.find(key)); if (iter == m_cache.end()) { @@ -281,6 +297,8 @@ TaggedCache< auto const start = std::chrono::steady_clock::now(); { + std::lock_guard lock(m_mutex); + if (m_target_size == 0 || (static_cast(m_cache.size()) <= m_target_size)) { @@ -312,13 +330,12 @@ TaggedCache< m_cache.map()[p], allStuffToSweep[p], allRemovals, - partitionLocks_[p])); + lock)); } for (std::thread& worker : workers) worker.join(); - int removals = allRemovals.load(std::memory_order_relaxed); - m_cache_count.fetch_sub(removals, std::memory_order_relaxed); + m_cache_count -= allRemovals; } // At this point allStuffToSweep will go out of scope outside the lock // and decrement the reference count on each strong pointer. @@ -352,8 +369,7 @@ TaggedCache< { // Remove from cache, if !valid, remove from map too. Returns true if // removed from cache - - std::lock_guard lock(lockPartition(key)); + std::lock_guard lock(m_mutex); auto cit = m_cache.find(key); @@ -366,7 +382,7 @@ TaggedCache< if (entry.isCached()) { - m_cache_count.fetch_sub(1, std::memory_order_relaxed); + --m_cache_count; entry.ptr.convertToWeak(); ret = true; } @@ -404,16 +420,17 @@ TaggedCache< { // Return canonical value, store if needed, refresh in cache // Return values: true=we had the data already + std::lock_guard lock(m_mutex); - std::lock_guard lock(lockPartition(key)); auto cit = m_cache.find(key); + if (cit == m_cache.end()) { m_cache.emplace( std::piecewise_construct, std::forward_as_tuple(key), std::forward_as_tuple(m_clock.now(), data)); - m_cache_count.fetch_add(1, std::memory_order_relaxed); + ++m_cache_count; return false; } @@ -462,12 +479,12 @@ TaggedCache< data = cachedData; } - m_cache_count.fetch_add(1, std::memory_order_relaxed); + ++m_cache_count; return true; } entry.ptr = data; - m_cache_count.fetch_add(1, std::memory_order_relaxed); + ++m_cache_count; return false; } @@ -543,11 +560,10 @@ TaggedCache< KeyEqual, Mutex>::fetch(key_type const& key) { - std::lock_guard lock(lockPartition(key)); - - auto ret = initialFetch(key); + std::lock_guard l(m_mutex); + auto ret = initialFetch(key, l); if (!ret) - m_misses.fetch_add(1, std::memory_order_relaxed); + ++m_misses; return ret; } @@ -611,8 +627,8 @@ TaggedCache< Mutex>::insert(key_type const& key) -> std::enable_if_t { + std::lock_guard lock(m_mutex); clock_type::time_point const now(m_clock.now()); - std::lock_guard lock(lockPartition(key)); auto [it, inserted] = m_cache.emplace( std::piecewise_construct, std::forward_as_tuple(key), @@ -652,6 +668,29 @@ TaggedCache< return true; } +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +inline auto +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>::peekMutex() -> mutex_type& +{ + return m_mutex; +} + template < class Key, class T, @@ -675,13 +714,10 @@ TaggedCache< std::vector v; { + std::lock_guard lock(m_mutex); v.reserve(m_cache.size()); - for (std::size_t i = 0; i < partitionLocks_.size(); ++i) - { - std::lock_guard lock(partitionLocks_[i]); - for (auto const& entry : m_cache.map()[i]) - v.push_back(entry.first); - } + for (auto const& _ : m_cache) + v.push_back(_.first); } return v; @@ -707,12 +743,11 @@ TaggedCache< KeyEqual, Mutex>::rate() const { - auto const hits = m_hits.load(std::memory_order_relaxed); - auto const misses = m_misses.load(std::memory_order_relaxed); - auto const tot = hits + misses; + std::lock_guard lock(m_mutex); + auto const tot = m_hits + m_misses; if (tot == 0) - return 0.0; - return double(hits) / tot; + return 0; + return double(m_hits) / tot; } template < @@ -736,16 +771,18 @@ TaggedCache< KeyEqual, Mutex>::fetch(key_type const& digest, Handler const& h) { - std::lock_guard lock(lockPartition(digest)); - - if (auto ret = initialFetch(digest)) - return ret; + { + std::lock_guard l(m_mutex); + if (auto ret = initialFetch(digest, l)) + return ret; + } auto sle = h(); if (!sle) return {}; - m_misses.fetch_add(1, std::memory_order_relaxed); + std::lock_guard l(m_mutex); + ++m_misses; auto const [it, inserted] = m_cache.emplace(digest, Entry(m_clock.now(), std::move(sle))); if (!inserted) @@ -772,10 +809,9 @@ TaggedCache< SharedPointerType, Hash, KeyEqual, - Mutex>::initialFetch(key_type const& key) + Mutex>:: + initialFetch(key_type const& key, std::lock_guard const& l) { - std::lock_guard lock(lockPartition(key)); - auto cit = m_cache.find(key); if (cit == m_cache.end()) return {}; @@ -783,7 +819,7 @@ TaggedCache< Entry& entry = cit->second; if (entry.isCached()) { - m_hits.fetch_add(1, std::memory_order_relaxed); + ++m_hits; entry.touch(m_clock.now()); return entry.ptr.getStrong(); } @@ -791,13 +827,12 @@ TaggedCache< if (entry.isCached()) { // independent of cache size, so not counted as a hit - m_cache_count.fetch_add(1, std::memory_order_relaxed); + ++m_cache_count; entry.touch(m_clock.now()); return entry.ptr.getStrong(); } m_cache.erase(cit); - return {}; } @@ -826,11 +861,10 @@ TaggedCache< { beast::insight::Gauge::value_type hit_rate(0); { - auto const hits = m_hits.load(std::memory_order_relaxed); - auto const misses = m_misses.load(std::memory_order_relaxed); - auto const total = hits + misses; + std::lock_guard lock(m_mutex); + auto const total(m_hits + m_misses); if (total != 0) - hit_rate = (hits * 100) / total; + hit_rate = (m_hits * 100) / total; } m_stats.hit_rate.set(hit_rate); } @@ -861,16 +895,12 @@ TaggedCache< typename KeyValueCacheType::map_type& partition, SweptPointersVector& stuffToSweep, std::atomic& allRemovals, - Mutex& partitionLock) + std::lock_guard const&) { return std::thread([&, this]() { - beast::setCurrentThreadName("sweep-KVCache"); - int cacheRemovals = 0; int mapRemovals = 0; - std::lock_guard lock(partitionLock); - // Keep references to all the stuff we sweep // so that we can destroy them outside the lock. stuffToSweep.reserve(partition.size()); @@ -954,16 +984,12 @@ TaggedCache< typename KeyOnlyCacheType::map_type& partition, SweptPointersVector&, std::atomic& allRemovals, - Mutex& partitionLock) + std::lock_guard const&) { return std::thread([&, this]() { - beast::setCurrentThreadName("sweep-KCache"); - int cacheRemovals = 0; int mapRemovals = 0; - std::lock_guard lock(partitionLock); - // Keep references to all the stuff we sweep // so that we can destroy them outside the lock. { @@ -998,29 +1024,6 @@ TaggedCache< }); } -template < - class Key, - class T, - bool IsKeyCache, - class SharedWeakUnionPointer, - class SharedPointerType, - class Hash, - class KeyEqual, - class Mutex> -inline Mutex& -TaggedCache< - Key, - T, - IsKeyCache, - SharedWeakUnionPointer, - SharedPointerType, - Hash, - KeyEqual, - Mutex>::lockPartition(key_type const& key) const -{ - return partitionLocks_[m_cache.partition_index(key)]; -} - } // namespace ripple #endif diff --git a/include/xrpl/basics/partitioned_unordered_map.h b/include/xrpl/basics/partitioned_unordered_map.h index ecaf16a47e..4e503ad0fa 100644 --- a/include/xrpl/basics/partitioned_unordered_map.h +++ b/include/xrpl/basics/partitioned_unordered_map.h @@ -277,12 +277,6 @@ public: return map_; } - partition_map_type const& - map() const - { - return map_; - } - iterator begin() { @@ -327,12 +321,6 @@ public: return cend(); } - std::size_t - partition_index(key_type const& key) const - { - return partitioner(key); - } - private: template void diff --git a/include/xrpl/protocol/Protocol.h b/include/xrpl/protocol/Protocol.h index bd39233cca..898fd06fbd 100644 --- a/include/xrpl/protocol/Protocol.h +++ b/include/xrpl/protocol/Protocol.h @@ -22,6 +22,7 @@ #include #include +#include #include diff --git a/src/test/basics/TaggedCache_test.cpp b/src/test/basics/TaggedCache_test.cpp index ec450e46dd..3d3dba698d 100644 --- a/src/test/basics/TaggedCache_test.cpp +++ b/src/test/basics/TaggedCache_test.cpp @@ -58,10 +58,10 @@ public: // Insert an item, retrieve it, and age it so it gets purged. { BEAST_EXPECT(c.getCacheSize() == 0); - BEAST_EXPECT(c.size() == 0); + BEAST_EXPECT(c.getTrackSize() == 0); BEAST_EXPECT(!c.insert(1, "one")); BEAST_EXPECT(c.getCacheSize() == 1); - BEAST_EXPECT(c.size() == 1); + BEAST_EXPECT(c.getTrackSize() == 1); { std::string s; @@ -72,7 +72,7 @@ public: ++clock; c.sweep(); BEAST_EXPECT(c.getCacheSize() == 0); - BEAST_EXPECT(c.size() == 0); + BEAST_EXPECT(c.getTrackSize() == 0); } // Insert an item, maintain a strong pointer, age it, and @@ -80,7 +80,7 @@ public: { BEAST_EXPECT(!c.insert(2, "two")); BEAST_EXPECT(c.getCacheSize() == 1); - BEAST_EXPECT(c.size() == 1); + BEAST_EXPECT(c.getTrackSize() == 1); { auto p = c.fetch(2); @@ -88,14 +88,14 @@ public: ++clock; c.sweep(); BEAST_EXPECT(c.getCacheSize() == 0); - BEAST_EXPECT(c.size() == 1); + BEAST_EXPECT(c.getTrackSize() == 1); } // Make sure its gone now that our reference is gone ++clock; c.sweep(); BEAST_EXPECT(c.getCacheSize() == 0); - BEAST_EXPECT(c.size() == 0); + BEAST_EXPECT(c.getTrackSize() == 0); } // Insert the same key/value pair and make sure we get the same result @@ -111,7 +111,7 @@ public: ++clock; c.sweep(); BEAST_EXPECT(c.getCacheSize() == 0); - BEAST_EXPECT(c.size() == 0); + BEAST_EXPECT(c.getTrackSize() == 0); } // Put an object in but keep a strong pointer to it, advance the clock a @@ -121,24 +121,24 @@ public: // Put an object in BEAST_EXPECT(!c.insert(4, "four")); BEAST_EXPECT(c.getCacheSize() == 1); - BEAST_EXPECT(c.size() == 1); + BEAST_EXPECT(c.getTrackSize() == 1); { // Keep a strong pointer to it auto const p1 = c.fetch(4); BEAST_EXPECT(p1 != nullptr); BEAST_EXPECT(c.getCacheSize() == 1); - BEAST_EXPECT(c.size() == 1); + BEAST_EXPECT(c.getTrackSize() == 1); // Advance the clock a lot ++clock; c.sweep(); BEAST_EXPECT(c.getCacheSize() == 0); - BEAST_EXPECT(c.size() == 1); + BEAST_EXPECT(c.getTrackSize() == 1); // Canonicalize a new object with the same key auto p2 = std::make_shared("four"); BEAST_EXPECT(c.canonicalize_replace_client(4, p2)); BEAST_EXPECT(c.getCacheSize() == 1); - BEAST_EXPECT(c.size() == 1); + BEAST_EXPECT(c.getTrackSize() == 1); // Make sure we get the original object BEAST_EXPECT(p1.get() == p2.get()); } @@ -146,7 +146,7 @@ public: ++clock; c.sweep(); BEAST_EXPECT(c.getCacheSize() == 0); - BEAST_EXPECT(c.size() == 0); + BEAST_EXPECT(c.getTrackSize() == 0); } } }; diff --git a/src/xrpld/app/ledger/LedgerHistory.cpp b/src/xrpld/app/ledger/LedgerHistory.cpp index dcbd722120..ccec209bd4 100644 --- a/src/xrpld/app/ledger/LedgerHistory.cpp +++ b/src/xrpld/app/ledger/LedgerHistory.cpp @@ -63,6 +63,8 @@ LedgerHistory::insert( ledger->stateMap().getHash().isNonZero(), "ripple::LedgerHistory::insert : nonzero hash"); + std::unique_lock sl(m_ledgers_by_hash.peekMutex()); + bool const alreadyHad = m_ledgers_by_hash.canonicalize_replace_cache( ledger->info().hash, ledger); if (validated) @@ -74,6 +76,7 @@ LedgerHistory::insert( LedgerHash LedgerHistory::getLedgerHash(LedgerIndex index) { + std::unique_lock sl(m_ledgers_by_hash.peekMutex()); if (auto it = mLedgersByIndex.find(index); it != mLedgersByIndex.end()) return it->second; return {}; @@ -83,11 +86,13 @@ std::shared_ptr LedgerHistory::getLedgerBySeq(LedgerIndex index) { { + std::unique_lock sl(m_ledgers_by_hash.peekMutex()); auto it = mLedgersByIndex.find(index); if (it != mLedgersByIndex.end()) { uint256 hash = it->second; + sl.unlock(); return getLedgerByHash(hash); } } @@ -103,6 +108,7 @@ LedgerHistory::getLedgerBySeq(LedgerIndex index) { // Add this ledger to the local tracking by index + std::unique_lock sl(m_ledgers_by_hash.peekMutex()); XRPL_ASSERT( ret->isImmutable(), @@ -452,6 +458,8 @@ LedgerHistory::builtLedger( XRPL_ASSERT( !hash.isZero(), "ripple::LedgerHistory::builtLedger : nonzero hash"); + std::unique_lock sl(m_consensus_validated.peekMutex()); + auto entry = std::make_shared(); m_consensus_validated.canonicalize_replace_client(index, entry); @@ -492,6 +500,8 @@ LedgerHistory::validatedLedger( !hash.isZero(), "ripple::LedgerHistory::validatedLedger : nonzero hash"); + std::unique_lock sl(m_consensus_validated.peekMutex()); + auto entry = std::make_shared(); m_consensus_validated.canonicalize_replace_client(index, entry); @@ -525,9 +535,10 @@ LedgerHistory::validatedLedger( bool LedgerHistory::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash) { - auto ledger = m_ledgers_by_hash.fetch(ledgerHash); + std::unique_lock sl(m_ledgers_by_hash.peekMutex()); auto it = mLedgersByIndex.find(ledgerIndex); - if (ledger && (it != mLedgersByIndex.end()) && (it->second != ledgerHash)) + + if ((it != mLedgersByIndex.end()) && (it->second != ledgerHash)) { it->second = ledgerHash; return false; diff --git a/src/xrpld/rpc/handlers/GetCounts.cpp b/src/xrpld/rpc/handlers/GetCounts.cpp index 2987da46d5..3c1d8cccdd 100644 --- a/src/xrpld/rpc/handlers/GetCounts.cpp +++ b/src/xrpld/rpc/handlers/GetCounts.cpp @@ -114,7 +114,7 @@ getCountsJson(Application& app, int minObjectCount) ret[jss::treenode_cache_size] = app.getNodeFamily().getTreeNodeCache()->getCacheSize(); ret[jss::treenode_track_size] = - static_cast(app.getNodeFamily().getTreeNodeCache()->size()); + app.getNodeFamily().getTreeNodeCache()->getTrackSize(); std::string uptime; auto s = UptimeClock::now(); From c5fe97064678ff8cdf2762acc69b814142db0757 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Fri, 22 Aug 2025 17:32:31 -0400 Subject: [PATCH 137/244] Set version to 2.6.0-rc3 --- src/libxrpl/protocol/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libxrpl/protocol/BuildInfo.cpp b/src/libxrpl/protocol/BuildInfo.cpp index 0d7ea1a7ca..4b55f82f49 100644 --- a/src/libxrpl/protocol/BuildInfo.cpp +++ b/src/libxrpl/protocol/BuildInfo.cpp @@ -36,7 +36,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "2.6.0-rc2" +char const* const versionString = "2.6.0-rc3" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From c61096239c5d672b40e1da08c778cbb9e9d9189c Mon Sep 17 00:00:00 2001 From: Bart Date: Fri, 22 Aug 2025 19:31:01 -0400 Subject: [PATCH 138/244] chore: Remove codecov token check to support tokenless uploads on forks (#5722) --- .github/actions/build-test/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/build-test/action.yml b/.github/actions/build-test/action.yml index d68f302698..ee945dcf38 100644 --- a/.github/actions/build-test/action.yml +++ b/.github/actions/build-test/action.yml @@ -83,7 +83,7 @@ runs: ./rippled --unittest --unittest-jobs $(nproc) ctest -j $(nproc) --output-on-failure - name: Upload coverage report - if: ${{ inputs.cmake_target == 'coverage' && inputs.codecov_token }} + if: ${{ inputs.cmake_target == 'coverage' }} uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3 with: disable_search: true From 77fef8732bff67229440114cbc10d08bb776a4db Mon Sep 17 00:00:00 2001 From: Bart Date: Mon, 25 Aug 2025 13:32:07 -0400 Subject: [PATCH 139/244] fix: Simplify PR pipeline trigger rules (#5727) This change removes `labeled` and `unlabeled` as pipeline trigger actions, and instead adds `reopened` and `ready_for_review`. The logic whether to run the pipeline jobs is then simplified, although to get a draft PR with the `DraftCIRun` label to run it can be necessary to close and reopen a PR. --- .github/workflows/on-pr.yml | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) diff --git a/.github/workflows/on-pr.yml b/.github/workflows/on-pr.yml index d1623874ac..e626222865 100644 --- a/.github/workflows/on-pr.yml +++ b/.github/workflows/on-pr.yml @@ -31,9 +31,9 @@ on: - "conanfile.py" types: - opened + - reopened - synchronize - - labeled - - unlabeled + - ready_for_review concurrency: group: ${{ github.workflow }}-${{ github.ref }} @@ -48,24 +48,10 @@ env: CONAN_REMOTE_URL: https://conan.ripplex.io jobs: - # This job determines whether the workflow should run. It runs when: - # * Opened as a non-draft PR. - # * A commit is added to a non-draft PR or the PR has the 'DraftRunCI' label. - # * A draft PR has the 'DraftRunCI' label added. - # * A non-draft PR has the 'DraftRunCI' label removed. - # These checks are in part to ensure the workflow won't run needlessly while - # also allowing it to be triggered without having to add a no-op commit. A new - # workflow execution can be triggered by adding and then removing the label on - # a non-draft PR, or conversely by removing it and then adding it back on a - # draft PR; this can be useful in certain cases. + # This job determines whether the workflow should run. It runs when the PR is + # not a draft or has the 'DraftRunCI' label. should-run: - if: >- - ${{ - (github.event.action == 'opened' && !github.event.pull_request.draft) || - (github.event.action == 'synchronize' && (!github.event.pull_request.draft || contains(github.event.pull_request.labels.*.name, 'DraftRunCI'))) || - (github.event.action == 'labeled' && github.event.pull_request.draft && github.event.label.name == 'DraftRunCI') || - (github.event.action == 'unlabeled' && !github.event.pull_request.draft && github.event.label.name == 'DraftRunCI') - }} + if: ${{ !github.event.pull_request.draft || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }} runs-on: ubuntu-latest steps: - name: No-op From 285120684c3bb8fb1f700529b2346926e34d99c6 Mon Sep 17 00:00:00 2001 From: Bart Date: Tue, 26 Aug 2025 16:00:00 -0400 Subject: [PATCH 140/244] refactor: Replace 'on: pull_request: paths' by 'changed-files' action (#5728) This PR moves the list of files from the `paths:` section in the `on: pull_request` into a separate job. --- .github/scripts/strategy-matrix/generate.py | 12 ++-- .github/workflows/on-pr.yml | 77 ++++++++++++++------- .github/workflows/on-trigger.yml | 16 +++-- 3 files changed, 68 insertions(+), 37 deletions(-) diff --git a/.github/scripts/strategy-matrix/generate.py b/.github/scripts/strategy-matrix/generate.py index 652cb8871f..9743d5a4e3 100644 --- a/.github/scripts/strategy-matrix/generate.py +++ b/.github/scripts/strategy-matrix/generate.py @@ -38,7 +38,7 @@ def generate_strategy_matrix(all: bool, architecture: list[dict], os: list[dict] # - Bookworm using GCC 13: Release and Unity on linux/arm64, set # the reference fee to 500. # - Bookworm using GCC 15: Debug and no Unity on linux/amd64, enable - # code coverage. + # code coverage (which will be done below). # - Bookworm using Clang 16: Debug and no Unity on linux/arm64, # enable voidstar. # - Bookworm using Clang 17: Release and no Unity on linux/amd64, @@ -51,9 +51,6 @@ def generate_strategy_matrix(all: bool, architecture: list[dict], os: list[dict] cmake_args = f'-DUNIT_TEST_REFERENCE_FEE=500 {cmake_args}' skip = False if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-15' and build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/amd64': - cmake_args = f'-Dcoverage=ON -Dcoverage_format=xml -DCODE_COVERAGE_VERBOSE=ON -DCMAKE_C_FLAGS=-O0 -DCMAKE_CXX_FLAGS=-O0 {cmake_args}' - cmake_target = 'coverage' - build_only = True skip = False if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-16' and build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/arm64': cmake_args = f'-Dvoidstar=ON {cmake_args}' @@ -127,6 +124,13 @@ def generate_strategy_matrix(all: bool, architecture: list[dict], os: list[dict] if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-20' and architecture['platform'] == 'linux/arm64': continue + # Enable code coverage for Debian Bookworm using GCC 15 in Debug and no + # Unity on linux/amd64 + if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-15' and build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/amd64': + cmake_args = f'-Dcoverage=ON -Dcoverage_format=xml -DCODE_COVERAGE_VERBOSE=ON -DCMAKE_C_FLAGS=-O0 -DCMAKE_CXX_FLAGS=-O0 {cmake_args}' + cmake_target = 'coverage' + build_only = True + # Generate a unique name for the configuration, e.g. macos-arm64-debug # or debian-bookworm-gcc-12-amd64-release-unity. config_name = os['distro_name'] diff --git a/.github/workflows/on-pr.yml b/.github/workflows/on-pr.yml index e626222865..02048efa64 100644 --- a/.github/workflows/on-pr.yml +++ b/.github/workflows/on-pr.yml @@ -6,29 +6,6 @@ name: PR on: pull_request: - paths: - - ".github/actions/build-deps/**" - - ".github/actions/build-test/**" - - ".github/scripts/levelization/**" - - ".github/scripts/strategy-matrix/**" - - ".github/workflows/build-test.yml" - - ".github/workflows/check-format.yml" - - ".github/workflows/check-levelization.yml" - - ".github/workflows/notify-clio.yml" - - ".github/workflows/on-pr.yml" - # Keep the list of paths below in sync with those in the `on-trigger.yml` - # file. - - "cmake/**" - - "conan/**" - - "external/**" - - "include/**" - - "src/**" - - "tests/**" - - ".clang-format" - - ".codecov.yml" - - ".pre-commit-config.yaml" - - "CMakeLists.txt" - - "conanfile.py" types: - opened - reopened @@ -57,18 +34,66 @@ jobs: - name: No-op run: true - check-format: + # This job checks whether any files have changed that should cause the next + # jobs to run. We do it this way rather than using `paths` in the `on:` + # section, because all required checks must pass, even for changes that do not + # modify anything that affects those checks. We would therefore like to make + # the checks required only if the job runs, but GitHub does not support that + # directly. By always executing the workflow on new commits and by using the + # changed-files action below, we ensure that Github considers any skipped jobs + # to have passed, and in turn the required checks as well. + any-changed: needs: should-run + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 + - name: Determine changed files + id: changes + uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5 + with: + files: | + # These paths are unique to `on-pr.yml`. + .github/scripts/levelization/** + .github/workflows/check-format.yml + .github/workflows/check-levelization.yml + .github/workflows/notify-clio.yml + .github/workflows/on-pr.yml + .clang-format + .pre-commit-config.yaml + + # Keep the paths below in sync with those in `on-trigger.yml`. + .github/actions/build-deps/** + .github/actions/build-test/** + .github/scripts/strategy-matrix/** + .github/workflows/build-test.yml + .codecov.yml + cmake/** + conan/** + external/** + include/** + src/** + tests/** + CMakeLists.txt + conanfile.py + outputs: + changed: ${{ steps.changes.outputs.any_changed }} + + check-format: + needs: any-changed + if: needs.any-changed.outputs.changed == 'true' uses: ./.github/workflows/check-format.yml check-levelization: - needs: should-run + needs: any-changed + if: needs.any-changed.outputs.changed == 'true' uses: ./.github/workflows/check-levelization.yml # This job works around the limitation that GitHub Actions does not support # using environment variables as inputs for reusable workflows. generate-outputs: - needs: should-run + needs: any-changed + if: needs.any-changed.outputs.changed == 'true' runs-on: ubuntu-latest steps: - name: No-op diff --git a/.github/workflows/on-trigger.yml b/.github/workflows/on-trigger.yml index 55e93b9866..b4c940ae4e 100644 --- a/.github/workflows/on-trigger.yml +++ b/.github/workflows/on-trigger.yml @@ -13,31 +13,33 @@ on: - release - master paths: + # These paths are unique to `on-trigger.yml`. + - ".github/workflows/check-missing-commits.yml" + - ".github/workflows/on-trigger.yml" + - ".github/workflows/publish-docs.yml" + + # Keep the paths below in sync with those in `on-pr.yml`. - ".github/actions/build-deps/**" - ".github/actions/build-test/**" - ".github/scripts/strategy-matrix/**" - ".github/workflows/build-test.yml" - - ".github/workflows/check-missing-commits.yml" - - ".github/workflows/on-trigger.yml" - - ".github/workflows/publish-docs.yml" - # Keep the list of paths below in sync with those in `on-pr.yml`. + - ".codecov.yml" - "cmake/**" - "conan/**" - "external/**" - "include/**" - "src/**" - "tests/**" - - ".clang-format" - - ".codecov.yml" - - ".pre-commit-config.yaml" - "CMakeLists.txt" - "conanfile.py" + # Run at 06:32 UTC on every day of the week from Monday through Friday. This # will force all dependencies to be rebuilt, which is useful to verify that # all dependencies can be built successfully. Only the dependencies that # are actually missing from the remote will be uploaded. schedule: - cron: "32 6 * * 1-5" + # Run when manually triggered via the GitHub UI or API. If `force_upload` is # true, then the dependencies that were missing (`force_rebuild` is false) or # rebuilt (`force_rebuild` is true) will be uploaded, overwriting existing From 92431a42387c6e5c9944b3514b0d0c1b30b29ced Mon Sep 17 00:00:00 2001 From: Bart Date: Tue, 26 Aug 2025 17:12:37 -0400 Subject: [PATCH 141/244] chore: Add support for merge_group event (#5734) This change adds support for the merge_group CI event, which will allow us to enable merge queues. --- .github/workflows/on-pr.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/on-pr.yml b/.github/workflows/on-pr.yml index 02048efa64..a5f1d60c42 100644 --- a/.github/workflows/on-pr.yml +++ b/.github/workflows/on-pr.yml @@ -5,6 +5,9 @@ name: PR on: + merge_group: + types: + - checks_requested pull_request: types: - opened From 808c86663c79159812389b325ac52db3323c5f28 Mon Sep 17 00:00:00 2001 From: Bart Date: Tue, 26 Aug 2025 19:07:23 -0400 Subject: [PATCH 142/244] fix: Add codecov token to trigger workflow (#5736) This change adds the Codecov token to the on-trigger workflow. --- .github/workflows/on-trigger.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/on-trigger.yml b/.github/workflows/on-trigger.yml index b4c940ae4e..7732b814ad 100644 --- a/.github/workflows/on-trigger.yml +++ b/.github/workflows/on-trigger.yml @@ -113,5 +113,6 @@ jobs: os: ${{ matrix.os }} strategy_matrix: "all" secrets: + codecov_token: ${{ secrets.CODECOV_TOKEN }} conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }} conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }} From 1506e65558818ea73a2233d2a55c1d1441d5fb10 Mon Sep 17 00:00:00 2001 From: Alex Kremer Date: Wed, 27 Aug 2025 10:34:50 +0100 Subject: [PATCH 143/244] refactor: Update to Boost 1.88 (#5570) This updates Boost to 1.88, which is needed because Clio wants to move to 1.88 as that fixes several ASAN false positives around coroutine usage. In order for Clio to move to newer boost, libXRPL needs to move too. Hence the changes in this PR. A lot has changed between 1.83 and 1.88 so there are lots of changes in the diff, especially in regards to Boost.Asio and coroutines in particular. --- cmake/RippledSettings.cmake | 2 +- cmake/deps/Boost.cmake | 1 + conanfile.py | 5 +- include/xrpl/basics/ResolverAsio.h | 4 +- include/xrpl/beast/asio/io_latency_probe.h | 39 ++++--- include/xrpl/beast/test/yield_to.h | 26 +++-- include/xrpl/json/json_reader.h | 2 +- include/xrpl/net/AutoSocket.h | 4 +- include/xrpl/net/HTTPClient.h | 8 +- include/xrpl/net/HTTPClientSSLContext.h | 8 +- include/xrpl/server/Server.h | 6 +- include/xrpl/server/Session.h | 6 +- include/xrpl/server/detail/BaseHTTPPeer.h | 20 ++-- include/xrpl/server/detail/BasePeer.h | 4 +- include/xrpl/server/detail/BaseWSPeer.h | 27 +++-- include/xrpl/server/detail/Door.h | 17 +-- include/xrpl/server/detail/PlainHTTPPeer.h | 4 +- include/xrpl/server/detail/SSLHTTPPeer.h | 6 +- include/xrpl/server/detail/ServerImpl.h | 28 +++-- include/xrpl/server/detail/Spawn.h | 108 ++++++++++++++++++ include/xrpl/server/detail/io_list.h | 2 +- src/libxrpl/basics/ResolverAsio.cpp | 87 +++++++++----- src/libxrpl/beast/insight/StatsDCollector.cpp | 105 ++++++++++------- src/libxrpl/beast/net/IPAddressV4.cpp | 8 +- src/libxrpl/beast/net/IPAddressV6.cpp | 6 +- src/libxrpl/beast/net/IPEndpoint.cpp | 4 +- src/libxrpl/net/HTTPClient.cpp | 68 ++++++----- src/libxrpl/server/Port.cpp | 3 +- src/test/app/DNS_test.cpp | 9 +- src/test/app/LedgerReplay_test.cpp | 2 +- src/test/app/ValidatorSite_test.cpp | 2 +- src/test/beast/IPEndpoint_test.cpp | 35 +++--- .../beast/beast_io_latency_probe_test.cpp | 23 ++-- src/test/jtx/TrustedPublisherServer.h | 4 +- src/test/jtx/impl/JSONRPCClient.cpp | 2 +- src/test/jtx/impl/WSClient.cpp | 55 ++++++--- src/test/overlay/compression_test.cpp | 2 +- src/test/overlay/reduce_relay_test.cpp | 2 +- src/test/overlay/short_read_test.cpp | 28 +++-- src/test/overlay/tx_reduce_relay_test.cpp | 8 +- src/test/rpc/ValidatorRPC_test.cpp | 4 +- src/test/server/ServerStatus_test.cpp | 23 ++-- src/test/server/Server_test.cpp | 29 ++--- .../app/ledger/detail/TimeoutCounter.cpp | 2 +- src/xrpld/app/ledger/detail/TimeoutCounter.h | 2 +- src/xrpld/app/main/Application.cpp | 60 +++++----- src/xrpld/app/main/Application.h | 4 +- src/xrpld/app/main/BasicApp.cpp | 6 +- src/xrpld/app/main/BasicApp.h | 16 +-- src/xrpld/app/main/Main.cpp | 12 +- src/xrpld/app/misc/NetworkOPs.cpp | 38 +++--- src/xrpld/app/misc/NetworkOPs.h | 2 +- src/xrpld/app/misc/detail/ValidatorSite.cpp | 8 +- src/xrpld/app/misc/detail/WorkBase.h | 69 ++++++----- src/xrpld/app/misc/detail/WorkFile.h | 23 ++-- src/xrpld/app/misc/detail/WorkPlain.h | 4 +- src/xrpld/app/misc/detail/WorkSSL.cpp | 10 +- src/xrpld/app/misc/detail/WorkSSL.h | 2 +- src/xrpld/overlay/detail/ConnectAttempt.cpp | 105 +++++++++++------ src/xrpld/overlay/detail/ConnectAttempt.h | 4 +- src/xrpld/overlay/detail/Handshake.cpp | 4 +- src/xrpld/overlay/detail/OverlayImpl.cpp | 29 ++--- src/xrpld/overlay/detail/OverlayImpl.h | 11 +- src/xrpld/overlay/detail/PeerImp.cpp | 36 ++++-- src/xrpld/overlay/detail/PeerImp.h | 2 +- src/xrpld/overlay/detail/PeerSet.cpp | 2 +- src/xrpld/overlay/detail/ZeroCopyStream.h | 4 +- src/xrpld/overlay/make_Overlay.h | 4 +- src/xrpld/peerfinder/detail/Checker.h | 20 ++-- .../peerfinder/detail/PeerfinderManager.cpp | 21 ++-- src/xrpld/peerfinder/make_Manager.h | 4 +- src/xrpld/rpc/RPCCall.h | 4 +- src/xrpld/rpc/RPCSub.h | 6 +- src/xrpld/rpc/ServerHandler.h | 6 +- src/xrpld/rpc/detail/RPCCall.cpp | 6 +- src/xrpld/rpc/detail/RPCSub.cpp | 12 +- src/xrpld/rpc/detail/ServerHandler.cpp | 11 +- src/xrpld/rpc/handlers/Subscribe.cpp | 2 +- 78 files changed, 871 insertions(+), 516 deletions(-) create mode 100644 include/xrpl/server/detail/Spawn.h diff --git a/cmake/RippledSettings.cmake b/cmake/RippledSettings.cmake index 9dc8609f58..9f59d9e9eb 100644 --- a/cmake/RippledSettings.cmake +++ b/cmake/RippledSettings.cmake @@ -118,7 +118,7 @@ option(beast_no_unit_test_inline "Prevents unit test definitions from being inserted into global table" OFF) option(single_io_service_thread - "Restricts the number of threads calling io_service::run to one. \ + "Restricts the number of threads calling io_context::run to one. \ This can be useful when debugging." OFF) option(boost_show_deprecated diff --git a/cmake/deps/Boost.cmake b/cmake/deps/Boost.cmake index 031202f4d2..bde40c0ce5 100644 --- a/cmake/deps/Boost.cmake +++ b/cmake/deps/Boost.cmake @@ -30,6 +30,7 @@ target_link_libraries(ripple_boost Boost::date_time Boost::filesystem Boost::json + Boost::process Boost::program_options Boost::regex Boost::system diff --git a/conanfile.py b/conanfile.py index da99836157..01f61c5d4e 100644 --- a/conanfile.py +++ b/conanfile.py @@ -100,11 +100,13 @@ class Xrpl(ConanFile): def configure(self): if self.settings.compiler == 'apple-clang': self.options['boost'].visibility = 'global' + if self.settings.compiler in ['clang', 'gcc']: + self.options['boost'].without_cobalt = True def requirements(self): # Conan 2 requires transitive headers to be specified transitive_headers_opt = {'transitive_headers': True} if conan_version.split('.')[0] == '2' else {} - self.requires('boost/1.86.0', force=True, **transitive_headers_opt) + self.requires('boost/1.88.0', force=True, **transitive_headers_opt) self.requires('date/3.0.4', **transitive_headers_opt) self.requires('lz4/1.10.0', force=True) self.requires('protobuf/3.21.12', force=True) @@ -175,6 +177,7 @@ class Xrpl(ConanFile): 'boost::filesystem', 'boost::json', 'boost::program_options', + 'boost::process', 'boost::regex', 'boost::system', 'boost::thread', diff --git a/include/xrpl/basics/ResolverAsio.h b/include/xrpl/basics/ResolverAsio.h index 49700d2b24..94688de650 100644 --- a/include/xrpl/basics/ResolverAsio.h +++ b/include/xrpl/basics/ResolverAsio.h @@ -23,7 +23,7 @@ #include #include -#include +#include namespace ripple { @@ -33,7 +33,7 @@ public: explicit ResolverAsio() = default; static std::unique_ptr - New(boost::asio::io_service&, beast::Journal); + New(boost::asio::io_context&, beast::Journal); }; } // namespace ripple diff --git a/include/xrpl/beast/asio/io_latency_probe.h b/include/xrpl/beast/asio/io_latency_probe.h index 966b4686ae..37f75cf649 100644 --- a/include/xrpl/beast/asio/io_latency_probe.h +++ b/include/xrpl/beast/asio/io_latency_probe.h @@ -23,7 +23,8 @@ #include #include -#include +#include +#include #include #include @@ -32,7 +33,7 @@ namespace beast { -/** Measures handler latency on an io_service queue. */ +/** Measures handler latency on an io_context queue. */ template class io_latency_probe { @@ -44,12 +45,12 @@ private: std::condition_variable_any m_cond; std::size_t m_count; duration const m_period; - boost::asio::io_service& m_ios; + boost::asio::io_context& m_ios; boost::asio::basic_waitable_timer m_timer; bool m_cancel; public: - io_latency_probe(duration const& period, boost::asio::io_service& ios) + io_latency_probe(duration const& period, boost::asio::io_context& ios) : m_count(1) , m_period(period) , m_ios(ios) @@ -64,16 +65,16 @@ public: cancel(lock, true); } - /** Return the io_service associated with the latency probe. */ + /** Return the io_context associated with the latency probe. */ /** @{ */ - boost::asio::io_service& - get_io_service() + boost::asio::io_context& + get_io_context() { return m_ios; } - boost::asio::io_service const& - get_io_service() const + boost::asio::io_context const& + get_io_context() const { return m_ios; } @@ -109,8 +110,10 @@ public: std::lock_guard lock(m_mutex); if (m_cancel) throw std::logic_error("io_latency_probe is canceled"); - m_ios.post(sample_op( - std::forward(handler), Clock::now(), false, this)); + boost::asio::post( + m_ios, + sample_op( + std::forward(handler), Clock::now(), false, this)); } /** Initiate continuous i/o latency sampling. @@ -124,8 +127,10 @@ public: std::lock_guard lock(m_mutex); if (m_cancel) throw std::logic_error("io_latency_probe is canceled"); - m_ios.post(sample_op( - std::forward(handler), Clock::now(), true, this)); + boost::asio::post( + m_ios, + sample_op( + std::forward(handler), Clock::now(), true, this)); } private: @@ -236,12 +241,13 @@ private: // The latency is too high to maintain the desired // period so don't bother with a timer. // - m_probe->m_ios.post( + boost::asio::post( + m_probe->m_ios, sample_op(m_handler, now, m_repeat, m_probe)); } else { - m_probe->m_timer.expires_from_now(when - now); + m_probe->m_timer.expires_after(when - now); m_probe->m_timer.async_wait( sample_op(m_handler, now, m_repeat, m_probe)); } @@ -254,7 +260,8 @@ private: if (!m_probe) return; typename Clock::time_point const now(Clock::now()); - m_probe->m_ios.post( + boost::asio::post( + m_probe->m_ios, sample_op(m_handler, now, m_repeat, m_probe)); } }; diff --git a/include/xrpl/beast/test/yield_to.h b/include/xrpl/beast/test/yield_to.h index 27a3a2db20..a222e8627e 100644 --- a/include/xrpl/beast/test/yield_to.h +++ b/include/xrpl/beast/test/yield_to.h @@ -8,9 +8,11 @@ #ifndef BEAST_TEST_YIELD_TO_HPP #define BEAST_TEST_YIELD_TO_HPP -#include +#include +#include #include #include +#include #include #include @@ -29,10 +31,12 @@ namespace test { class enable_yield_to { protected: - boost::asio::io_service ios_; + boost::asio::io_context ios_; private: - boost::optional work_; + boost::optional> + work_; std::vector threads_; std::mutex m_; std::condition_variable cv_; @@ -42,7 +46,8 @@ public: /// The type of yield context passed to functions. using yield_context = boost::asio::yield_context; - explicit enable_yield_to(std::size_t concurrency = 1) : work_(ios_) + explicit enable_yield_to(std::size_t concurrency = 1) + : work_(boost::asio::make_work_guard(ios_)) { threads_.reserve(concurrency); while (concurrency--) @@ -56,9 +61,9 @@ public: t.join(); } - /// Return the `io_service` associated with the object - boost::asio::io_service& - get_io_service() + /// Return the `io_context` associated with the object + boost::asio::io_context& + get_io_context() { return ios_; } @@ -111,13 +116,18 @@ enable_yield_to::spawn(F0&& f, FN&&... fn) { boost::asio::spawn( ios_, + boost::allocator_arg, + boost::context::fixedsize_stack(2 * 1024 * 1024), [&](yield_context yield) { f(yield); std::lock_guard lock{m_}; if (--running_ == 0) cv_.notify_all(); }, - boost::coroutines::attributes(2 * 1024 * 1024)); + [](std::exception_ptr e) { + if (e) + std::rethrow_exception(e); + }); spawn(fn...); } diff --git a/include/xrpl/json/json_reader.h b/include/xrpl/json/json_reader.h index 81866819a5..8eceee1f1c 100644 --- a/include/xrpl/json/json_reader.h +++ b/include/xrpl/json/json_reader.h @@ -217,7 +217,7 @@ Reader::parse(Value& root, BufferSequence const& bs) std::string s; s.reserve(buffer_size(bs)); for (auto const& b : bs) - s.append(buffer_cast(b), buffer_size(b)); + s.append(static_cast(b.data()), buffer_size(b)); return parse(s, root); } diff --git a/include/xrpl/net/AutoSocket.h b/include/xrpl/net/AutoSocket.h index d06787340b..5f82854039 100644 --- a/include/xrpl/net/AutoSocket.h +++ b/include/xrpl/net/AutoSocket.h @@ -47,7 +47,7 @@ public: public: AutoSocket( - boost::asio::io_service& s, + boost::asio::io_context& s, boost::asio::ssl::context& c, bool secureOnly, bool plainOnly) @@ -58,7 +58,7 @@ public: mSocket = std::make_unique(s, c); } - AutoSocket(boost::asio::io_service& s, boost::asio::ssl::context& c) + AutoSocket(boost::asio::io_context& s, boost::asio::ssl::context& c) : AutoSocket(s, c, false, false) { } diff --git a/include/xrpl/net/HTTPClient.h b/include/xrpl/net/HTTPClient.h index ef295e8e5a..b5043cd024 100644 --- a/include/xrpl/net/HTTPClient.h +++ b/include/xrpl/net/HTTPClient.h @@ -23,7 +23,7 @@ #include #include -#include +#include #include #include @@ -51,7 +51,7 @@ public: static void get(bool bSSL, - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, std::deque deqSites, unsigned short const port, std::string const& strPath, @@ -65,7 +65,7 @@ public: static void get(bool bSSL, - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, std::string strSite, unsigned short const port, std::string const& strPath, @@ -80,7 +80,7 @@ public: static void request( bool bSSL, - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, std::string strSite, unsigned short const port, std::function< diff --git a/include/xrpl/net/HTTPClientSSLContext.h b/include/xrpl/net/HTTPClientSSLContext.h index 2f7d6c005e..f5dd1e54c6 100644 --- a/include/xrpl/net/HTTPClientSSLContext.h +++ b/include/xrpl/net/HTTPClientSSLContext.h @@ -153,7 +153,7 @@ public: { strm.set_verify_callback( std::bind( - &rfc2818_verify, + &rfc6125_verify, host, std::placeholders::_1, std::placeholders::_2, @@ -167,7 +167,7 @@ public: /** * @brief callback invoked for name verification - just passes through - * to the asio rfc2818 implementation. + * to the asio `host_name_verification` (rfc6125) implementation. * * @param domain hostname expected * @param preverified passed by implementation @@ -175,13 +175,13 @@ public: * @param j journal for logging */ static bool - rfc2818_verify( + rfc6125_verify( std::string const& domain, bool preverified, boost::asio::ssl::verify_context& ctx, beast::Journal j) { - if (boost::asio::ssl::rfc2818_verification(domain)(preverified, ctx)) + if (boost::asio::ssl::host_name_verification(domain)(preverified, ctx)) return true; JLOG(j.warn()) << "Outbound SSL connection to " << domain diff --git a/include/xrpl/server/Server.h b/include/xrpl/server/Server.h index 232d1c381b..a8f9c7f8af 100644 --- a/include/xrpl/server/Server.h +++ b/include/xrpl/server/Server.h @@ -25,7 +25,7 @@ #include #include -#include +#include namespace ripple { @@ -34,10 +34,10 @@ template std::unique_ptr make_Server( Handler& handler, - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, beast::Journal journal) { - return std::make_unique>(handler, io_service, journal); + return std::make_unique>(handler, io_context, journal); } } // namespace ripple diff --git a/include/xrpl/server/Session.h b/include/xrpl/server/Session.h index 196f8c78c2..586172a5da 100644 --- a/include/xrpl/server/Session.h +++ b/include/xrpl/server/Session.h @@ -88,9 +88,7 @@ public: ++iter) { typename BufferSequence::value_type const& buffer(*iter); - write( - boost::asio::buffer_cast(buffer), - boost::asio::buffer_size(buffer)); + write(buffer.data(), boost::asio::buffer_size(buffer)); } } @@ -104,7 +102,7 @@ public: /** Detach the session. This holds the session open so that the response can be sent - asynchronously. Calls to io_service::run made by the server + asynchronously. Calls to io_context::run made by the server will not return until all detached sessions are closed. */ virtual std::shared_ptr diff --git a/include/xrpl/server/detail/BaseHTTPPeer.h b/include/xrpl/server/detail/BaseHTTPPeer.h index b065a97cf0..b7f471bdee 100644 --- a/include/xrpl/server/detail/BaseHTTPPeer.h +++ b/include/xrpl/server/detail/BaseHTTPPeer.h @@ -24,11 +24,13 @@ #include #include #include +#include #include #include #include #include +#include #include #include #include @@ -215,8 +217,8 @@ BaseHTTPPeer::BaseHTTPPeer( ConstBufferSequence const& buffers) : port_(port) , handler_(handler) - , work_(executor) - , strand_(executor) + , work_(boost::asio::make_work_guard(executor)) + , strand_(boost::asio::make_strand(executor)) , remote_address_(remote_address) , journal_(journal) { @@ -356,7 +358,7 @@ BaseHTTPPeer::on_write( return; if (graceful_) return do_close(); - boost::asio::spawn( + util::spawn( strand_, std::bind( &BaseHTTPPeer::do_read, @@ -375,7 +377,7 @@ BaseHTTPPeer::do_writer( { auto const p = impl().shared_from_this(); resume = std::function([this, p, writer, keep_alive]() { - boost::asio::spawn( + util::spawn( strand_, std::bind( &BaseHTTPPeer::do_writer, @@ -406,7 +408,7 @@ BaseHTTPPeer::do_writer( if (!keep_alive) return do_close(); - boost::asio::spawn( + util::spawn( strand_, std::bind( &BaseHTTPPeer::do_read, @@ -448,14 +450,14 @@ BaseHTTPPeer::write( std::shared_ptr const& writer, bool keep_alive) { - boost::asio::spawn(bind_executor( + util::spawn( strand_, std::bind( &BaseHTTPPeer::do_writer, impl().shared_from_this(), writer, keep_alive, - std::placeholders::_1))); + std::placeholders::_1)); } // DEPRECATED @@ -490,12 +492,12 @@ BaseHTTPPeer::complete() } // keep-alive - boost::asio::spawn(bind_executor( + util::spawn( strand_, std::bind( &BaseHTTPPeer::do_read, impl().shared_from_this(), - std::placeholders::_1))); + std::placeholders::_1)); } // DEPRECATED diff --git a/include/xrpl/server/detail/BasePeer.h b/include/xrpl/server/detail/BasePeer.h index 35975efafb..30de63e6ff 100644 --- a/include/xrpl/server/detail/BasePeer.h +++ b/include/xrpl/server/detail/BasePeer.h @@ -91,8 +91,8 @@ BasePeer::BasePeer( return "##" + std::to_string(++id) + " "; }()) , j_(sink_) - , work_(executor) - , strand_(executor) + , work_(boost::asio::make_work_guard(executor)) + , strand_(boost::asio::make_strand(executor)) { } diff --git a/include/xrpl/server/detail/BaseWSPeer.h b/include/xrpl/server/detail/BaseWSPeer.h index 027b0cbf7c..391c5c337e 100644 --- a/include/xrpl/server/detail/BaseWSPeer.h +++ b/include/xrpl/server/detail/BaseWSPeer.h @@ -29,6 +29,7 @@ #include #include +#include #include #include #include @@ -420,11 +421,17 @@ BaseWSPeer::start_timer() // Max seconds without completing a message static constexpr std::chrono::seconds timeout{30}; static constexpr std::chrono::seconds timeoutLocal{3}; - error_code ec; - timer_.expires_from_now( - remote_endpoint().address().is_loopback() ? timeoutLocal : timeout, ec); - if (ec) - return fail(ec, "start_timer"); + + try + { + timer_.expires_after( + remote_endpoint().address().is_loopback() ? timeoutLocal : timeout); + } + catch (boost::system::system_error const& e) + { + return fail(e.code(), "start_timer"); + } + timer_.async_wait(bind_executor( strand_, std::bind( @@ -438,8 +445,14 @@ template void BaseWSPeer::cancel_timer() { - error_code ec; - timer_.cancel(ec); + try + { + timer_.cancel(); + } + catch (boost::system::system_error const&) + { + // ignored + } } template diff --git a/include/xrpl/server/detail/Door.h b/include/xrpl/server/detail/Door.h index 88e19db8cd..7906af2a52 100644 --- a/include/xrpl/server/detail/Door.h +++ b/include/xrpl/server/detail/Door.h @@ -69,7 +69,7 @@ private: stream_type stream_; socket_type& socket_; endpoint_type remote_address_; - boost::asio::io_context::strand strand_; + boost::asio::strand strand_; beast::Journal const j_; public: @@ -95,7 +95,7 @@ private: Handler& handler_; boost::asio::io_context& ioc_; acceptor_type acceptor_; - boost::asio::io_context::strand strand_; + boost::asio::strand strand_; bool ssl_; bool plain_; @@ -155,7 +155,7 @@ Door::Detector::Detector( , stream_(std::move(stream)) , socket_(stream_.socket()) , remote_address_(remote_address) - , strand_(ioc_) + , strand_(boost::asio::make_strand(ioc_)) , j_(j) { } @@ -164,7 +164,7 @@ template void Door::Detector::run() { - boost::asio::spawn( + util::spawn( strand_, std::bind( &Detector::do_detect, @@ -269,7 +269,7 @@ Door::reOpen() Throw(); } - acceptor_.listen(boost::asio::socket_base::max_connections, ec); + acceptor_.listen(boost::asio::socket_base::max_listen_connections, ec); if (ec) { JLOG(j_.error()) << "Listen on port '" << port_.name @@ -291,7 +291,7 @@ Door::Door( , handler_(handler) , ioc_(io_context) , acceptor_(io_context) - , strand_(io_context) + , strand_(boost::asio::make_strand(io_context)) , ssl_( port_.protocol.count("https") > 0 || port_.protocol.count("wss") > 0 || port_.protocol.count("wss2") > 0 || @@ -307,7 +307,7 @@ template void Door::run() { - boost::asio::spawn( + util::spawn( strand_, std::bind( &Door::do_accept, @@ -320,7 +320,8 @@ void Door::close() { if (!strand_.running_in_this_thread()) - return strand_.post( + return boost::asio::post( + strand_, std::bind(&Door::close, this->shared_from_this())); error_code ec; acceptor_.close(ec); diff --git a/include/xrpl/server/detail/PlainHTTPPeer.h b/include/xrpl/server/detail/PlainHTTPPeer.h index ee31c78cad..f6f8e5b010 100644 --- a/include/xrpl/server/detail/PlainHTTPPeer.h +++ b/include/xrpl/server/detail/PlainHTTPPeer.h @@ -105,7 +105,7 @@ PlainHTTPPeer::run() { if (!this->handler_.onAccept(this->session(), this->remote_address_)) { - boost::asio::spawn( + util::spawn( this->strand_, std::bind(&PlainHTTPPeer::do_close, this->shared_from_this())); return; @@ -114,7 +114,7 @@ PlainHTTPPeer::run() if (!socket_.is_open()) return; - boost::asio::spawn( + util::spawn( this->strand_, std::bind( &PlainHTTPPeer::do_read, diff --git a/include/xrpl/server/detail/SSLHTTPPeer.h b/include/xrpl/server/detail/SSLHTTPPeer.h index fac4b866d3..8564263114 100644 --- a/include/xrpl/server/detail/SSLHTTPPeer.h +++ b/include/xrpl/server/detail/SSLHTTPPeer.h @@ -115,14 +115,14 @@ SSLHTTPPeer::run() { if (!this->handler_.onAccept(this->session(), this->remote_address_)) { - boost::asio::spawn( + util::spawn( this->strand_, std::bind(&SSLHTTPPeer::do_close, this->shared_from_this())); return; } if (!socket_.is_open()) return; - boost::asio::spawn( + util::spawn( this->strand_, std::bind( &SSLHTTPPeer::do_handshake, @@ -164,7 +164,7 @@ SSLHTTPPeer::do_handshake(yield_context do_yield) this->port().protocol.count("https") > 0; if (http) { - boost::asio::spawn( + util::spawn( this->strand_, std::bind( &SSLHTTPPeer::do_read, diff --git a/include/xrpl/server/detail/ServerImpl.h b/include/xrpl/server/detail/ServerImpl.h index fd0b082b46..4090aa0a6b 100644 --- a/include/xrpl/server/detail/ServerImpl.h +++ b/include/xrpl/server/detail/ServerImpl.h @@ -26,6 +26,8 @@ #include #include +#include +#include #include #include @@ -85,9 +87,11 @@ private: Handler& handler_; beast::Journal const j_; - boost::asio::io_service& io_service_; - boost::asio::io_service::strand strand_; - std::optional work_; + boost::asio::io_context& io_context_; + boost::asio::strand strand_; + std::optional> + work_; std::mutex m_; std::vector ports_; @@ -100,7 +104,7 @@ private: public: ServerImpl( Handler& handler, - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, beast::Journal journal); ~ServerImpl(); @@ -123,10 +127,10 @@ public: return ios_; } - boost::asio::io_service& - get_io_service() + boost::asio::io_context& + get_io_context() { - return io_service_; + return io_context_; } bool @@ -140,13 +144,13 @@ private: template ServerImpl::ServerImpl( Handler& handler, - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, beast::Journal journal) : handler_(handler) , j_(journal) - , io_service_(io_service) - , strand_(io_service_) - , work_(io_service_) + , io_context_(io_context) + , strand_(boost::asio::make_strand(io_context_)) + , work_(std::in_place, boost::asio::make_work_guard(io_context_)) { } @@ -173,7 +177,7 @@ ServerImpl::ports(std::vector const& ports) ports_.push_back(port); auto& internalPort = ports_.back(); if (auto sp = ios_.emplace>( - handler_, io_service_, internalPort, j_)) + handler_, io_context_, internalPort, j_)) { list_.push_back(sp); diff --git a/include/xrpl/server/detail/Spawn.h b/include/xrpl/server/detail/Spawn.h new file mode 100644 index 0000000000..56f173dec3 --- /dev/null +++ b/include/xrpl/server/detail/Spawn.h @@ -0,0 +1,108 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright(c) 2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_SERVER_SPAWN_H_INCLUDED +#define RIPPLE_SERVER_SPAWN_H_INCLUDED + +#include + +#include +#include + +#include +#include + +namespace ripple::util { +namespace impl { + +template +concept IsStrand = std::same_as< + std::decay_t, + boost::asio::strand::inner_executor_type>>; + +/** + * @brief A completion handler that restores `boost::asio::spawn`'s behaviour + * from Boost 1.83 + * + * This is intended to be passed as the third argument to `boost::asio::spawn` + * so that exceptions are not ignored but propagated to `io_context.run()` call + * site. + * + * @param ePtr The exception that was caught on the coroutine + */ +inline constexpr auto kPROPAGATE_EXCEPTIONS = [](std::exception_ptr ePtr) { + if (ePtr) + { + try + { + std::rethrow_exception(ePtr); + } + catch (std::exception const& e) + { + JLOG(debugLog().warn()) << "Spawn exception: " << e.what(); + throw; + } + catch (...) + { + JLOG(debugLog().warn()) << "Spawn exception: Unknown"; + throw; + } + } +}; + +} // namespace impl + +/** + * @brief Spawns a coroutine using `boost::asio::spawn` + * + * @note This uses kPROPAGATE_EXCEPTIONS to force asio to propagate exceptions + * through `io_context` + * @note Since implicit strand was removed from boost::asio::spawn this helper + * function adds the strand back + * + * @tparam Ctx The type of the context/strand + * @tparam F The type of the function to execute + * @param ctx The execution context + * @param func The function to execute. Must return `void` + */ +template + requires std::is_invocable_r_v +void +spawn(Ctx&& ctx, F&& func) +{ + if constexpr (impl::IsStrand) + { + boost::asio::spawn( + std::forward(ctx), + std::forward(func), + impl::kPROPAGATE_EXCEPTIONS); + } + else + { + boost::asio::spawn( + boost::asio::make_strand( + boost::asio::get_associated_executor(std::forward(ctx))), + std::forward(func), + impl::kPROPAGATE_EXCEPTIONS); + } +} + +} // namespace ripple::util + +#endif diff --git a/include/xrpl/server/detail/io_list.h b/include/xrpl/server/detail/io_list.h index fba8b28f87..6292794864 100644 --- a/include/xrpl/server/detail/io_list.h +++ b/include/xrpl/server/detail/io_list.h @@ -166,7 +166,7 @@ public: May be called concurrently. Preconditions: - No call to io_service::run on any io_service + No call to io_context::run on any io_context used by work objects associated with this io_list exists in the caller's call stack. */ diff --git a/src/libxrpl/basics/ResolverAsio.cpp b/src/libxrpl/basics/ResolverAsio.cpp index fde27189e7..1b52465a80 100644 --- a/src/libxrpl/basics/ResolverAsio.cpp +++ b/src/libxrpl/basics/ResolverAsio.cpp @@ -25,8 +25,9 @@ #include #include +#include #include -#include +#include #include #include @@ -124,8 +125,8 @@ public: beast::Journal m_journal; - boost::asio::io_service& m_io_service; - boost::asio::io_service::strand m_strand; + boost::asio::io_context& m_io_context; + boost::asio::strand m_strand; boost::asio::ip::tcp::resolver m_resolver; std::condition_variable m_cv; @@ -155,12 +156,12 @@ public: std::deque m_work; ResolverAsioImpl( - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, beast::Journal journal) : m_journal(journal) - , m_io_service(io_service) - , m_strand(io_service) - , m_resolver(io_service) + , m_io_context(io_context) + , m_strand(boost::asio::make_strand(io_context)) + , m_resolver(io_context) , m_asyncHandlersCompleted(true) , m_stop_called(false) , m_stopped(true) @@ -216,8 +217,14 @@ public: { if (m_stop_called.exchange(true) == false) { - m_io_service.dispatch(m_strand.wrap(std::bind( - &ResolverAsioImpl::do_stop, this, CompletionCounter(this)))); + boost::asio::dispatch( + m_io_context, + boost::asio::bind_executor( + m_strand, + std::bind( + &ResolverAsioImpl::do_stop, + this, + CompletionCounter(this)))); JLOG(m_journal.debug()) << "Queued a stop request"; } @@ -248,12 +255,16 @@ public: // TODO NIKB use rvalue references to construct and move // reducing cost. - m_io_service.dispatch(m_strand.wrap(std::bind( - &ResolverAsioImpl::do_resolve, - this, - names, - handler, - CompletionCounter(this)))); + boost::asio::dispatch( + m_io_context, + boost::asio::bind_executor( + m_strand, + std::bind( + &ResolverAsioImpl::do_resolve, + this, + names, + handler, + CompletionCounter(this)))); } //------------------------------------------------------------------------- @@ -279,19 +290,20 @@ public: std::string name, boost::system::error_code const& ec, HandlerType handler, - boost::asio::ip::tcp::resolver::iterator iter, + boost::asio::ip::tcp::resolver::results_type results, CompletionCounter) { if (ec == boost::asio::error::operation_aborted) return; std::vector addresses; + auto iter = results.begin(); // If we get an error message back, we don't return any // results that we may have gotten. if (!ec) { - while (iter != boost::asio::ip::tcp::resolver::iterator()) + while (iter != results.end()) { addresses.push_back( beast::IPAddressConversion::from_asio(*iter)); @@ -301,8 +313,14 @@ public: handler(name, addresses); - m_io_service.post(m_strand.wrap(std::bind( - &ResolverAsioImpl::do_work, this, CompletionCounter(this)))); + boost::asio::post( + m_io_context, + boost::asio::bind_executor( + m_strand, + std::bind( + &ResolverAsioImpl::do_work, + this, + CompletionCounter(this)))); } HostAndPort @@ -383,16 +401,21 @@ public: { JLOG(m_journal.error()) << "Unable to parse '" << name << "'"; - m_io_service.post(m_strand.wrap(std::bind( - &ResolverAsioImpl::do_work, this, CompletionCounter(this)))); + boost::asio::post( + m_io_context, + boost::asio::bind_executor( + m_strand, + std::bind( + &ResolverAsioImpl::do_work, + this, + CompletionCounter(this)))); return; } - boost::asio::ip::tcp::resolver::query query(host, port); - m_resolver.async_resolve( - query, + host, + port, std::bind( &ResolverAsioImpl::do_finish, this, @@ -423,10 +446,14 @@ public: if (m_work.size() > 0) { - m_io_service.post(m_strand.wrap(std::bind( - &ResolverAsioImpl::do_work, - this, - CompletionCounter(this)))); + boost::asio::post( + m_io_context, + boost::asio::bind_executor( + m_strand, + std::bind( + &ResolverAsioImpl::do_work, + this, + CompletionCounter(this)))); } } } @@ -435,9 +462,9 @@ public: //----------------------------------------------------------------------------- std::unique_ptr -ResolverAsio::New(boost::asio::io_service& io_service, beast::Journal journal) +ResolverAsio::New(boost::asio::io_context& io_context, beast::Journal journal) { - return std::make_unique(io_service, journal); + return std::make_unique(io_context, journal); } //----------------------------------------------------------------------------- diff --git a/src/libxrpl/beast/insight/StatsDCollector.cpp b/src/libxrpl/beast/insight/StatsDCollector.cpp index b0e00c3cfd..7a3929e0d5 100644 --- a/src/libxrpl/beast/insight/StatsDCollector.cpp +++ b/src/libxrpl/beast/insight/StatsDCollector.cpp @@ -30,9 +30,11 @@ #include #include +#include #include #include -#include +#include +#include #include #include #include @@ -238,9 +240,11 @@ private: Journal m_journal; IP::Endpoint m_address; std::string m_prefix; - boost::asio::io_service m_io_service; - std::optional m_work; - boost::asio::io_service::strand m_strand; + boost::asio::io_context m_io_context; + std::optional> + m_work; + boost::asio::strand m_strand; boost::asio::basic_waitable_timer m_timer; boost::asio::ip::udp::socket m_socket; std::deque m_data; @@ -264,18 +268,24 @@ public: : m_journal(journal) , m_address(address) , m_prefix(prefix) - , m_work(std::ref(m_io_service)) - , m_strand(m_io_service) - , m_timer(m_io_service) - , m_socket(m_io_service) + , m_work(boost::asio::make_work_guard(m_io_context)) + , m_strand(boost::asio::make_strand(m_io_context)) + , m_timer(m_io_context) + , m_socket(m_io_context) , m_thread(&StatsDCollectorImp::run, this) { } ~StatsDCollectorImp() override { - boost::system::error_code ec; - m_timer.cancel(ec); + try + { + m_timer.cancel(); + } + catch (boost::system::system_error const&) + { + // ignored + } m_work.reset(); m_thread.join(); @@ -334,10 +344,10 @@ public: //-------------------------------------------------------------------------- - boost::asio::io_service& - get_io_service() + boost::asio::io_context& + get_io_context() { - return m_io_service; + return m_io_context; } std::string const& @@ -355,8 +365,14 @@ public: void post_buffer(std::string&& buffer) { - m_io_service.dispatch(m_strand.wrap(std::bind( - &StatsDCollectorImp::do_post_buffer, this, std::move(buffer)))); + boost::asio::dispatch( + m_io_context, + boost::asio::bind_executor( + m_strand, + std::bind( + &StatsDCollectorImp::do_post_buffer, + this, + std::move(buffer)))); } // The keepAlive parameter makes sure the buffers sent to @@ -386,8 +402,7 @@ public: for (auto const& buffer : buffers) { std::string const s( - boost::asio::buffer_cast(buffer), - boost::asio::buffer_size(buffer)); + buffer.data(), boost::asio::buffer_size(buffer)); std::cerr << s; } std::cerr << '\n'; @@ -456,7 +471,7 @@ public: set_timer() { using namespace std::chrono_literals; - m_timer.expires_from_now(1s); + m_timer.expires_after(1s); m_timer.async_wait(std::bind( &StatsDCollectorImp::on_timer, this, std::placeholders::_1)); } @@ -498,13 +513,13 @@ public: set_timer(); - m_io_service.run(); + m_io_context.run(); m_socket.shutdown(boost::asio::ip::udp::socket::shutdown_send, ec); m_socket.close(); - m_io_service.poll(); + m_io_context.poll(); } }; @@ -547,10 +562,12 @@ StatsDCounterImpl::~StatsDCounterImpl() void StatsDCounterImpl::increment(CounterImpl::value_type amount) { - m_impl->get_io_service().dispatch(std::bind( - &StatsDCounterImpl::do_increment, - std::static_pointer_cast(shared_from_this()), - amount)); + boost::asio::dispatch( + m_impl->get_io_context(), + std::bind( + &StatsDCounterImpl::do_increment, + std::static_pointer_cast(shared_from_this()), + amount)); } void @@ -592,10 +609,12 @@ StatsDEventImpl::StatsDEventImpl( void StatsDEventImpl::notify(EventImpl::value_type const& value) { - m_impl->get_io_service().dispatch(std::bind( - &StatsDEventImpl::do_notify, - std::static_pointer_cast(shared_from_this()), - value)); + boost::asio::dispatch( + m_impl->get_io_context(), + std::bind( + &StatsDEventImpl::do_notify, + std::static_pointer_cast(shared_from_this()), + value)); } void @@ -625,19 +644,23 @@ StatsDGaugeImpl::~StatsDGaugeImpl() void StatsDGaugeImpl::set(GaugeImpl::value_type value) { - m_impl->get_io_service().dispatch(std::bind( - &StatsDGaugeImpl::do_set, - std::static_pointer_cast(shared_from_this()), - value)); + boost::asio::dispatch( + m_impl->get_io_context(), + std::bind( + &StatsDGaugeImpl::do_set, + std::static_pointer_cast(shared_from_this()), + value)); } void StatsDGaugeImpl::increment(GaugeImpl::difference_type amount) { - m_impl->get_io_service().dispatch(std::bind( - &StatsDGaugeImpl::do_increment, - std::static_pointer_cast(shared_from_this()), - amount)); + boost::asio::dispatch( + m_impl->get_io_context(), + std::bind( + &StatsDGaugeImpl::do_increment, + std::static_pointer_cast(shared_from_this()), + amount)); } void @@ -713,10 +736,12 @@ StatsDMeterImpl::~StatsDMeterImpl() void StatsDMeterImpl::increment(MeterImpl::value_type amount) { - m_impl->get_io_service().dispatch(std::bind( - &StatsDMeterImpl::do_increment, - std::static_pointer_cast(shared_from_this()), - amount)); + boost::asio::dispatch( + m_impl->get_io_context(), + std::bind( + &StatsDMeterImpl::do_increment, + std::static_pointer_cast(shared_from_this()), + amount)); } void diff --git a/src/libxrpl/beast/net/IPAddressV4.cpp b/src/libxrpl/beast/net/IPAddressV4.cpp index 29455024f6..22162c2bbe 100644 --- a/src/libxrpl/beast/net/IPAddressV4.cpp +++ b/src/libxrpl/beast/net/IPAddressV4.cpp @@ -25,11 +25,11 @@ namespace IP { bool is_private(AddressV4 const& addr) { - return ((addr.to_ulong() & 0xff000000) == + return ((addr.to_uint() & 0xff000000) == 0x0a000000) || // Prefix /8, 10. #.#.# - ((addr.to_ulong() & 0xfff00000) == + ((addr.to_uint() & 0xfff00000) == 0xac100000) || // Prefix /12 172. 16.#.# - 172.31.#.# - ((addr.to_ulong() & 0xffff0000) == + ((addr.to_uint() & 0xffff0000) == 0xc0a80000) || // Prefix /16 192.168.#.# addr.is_loopback(); } @@ -44,7 +44,7 @@ char get_class(AddressV4 const& addr) { static char const* table = "AAAABBCD"; - return table[(addr.to_ulong() & 0xE0000000) >> 29]; + return table[(addr.to_uint() & 0xE0000000) >> 29]; } } // namespace IP diff --git a/src/libxrpl/beast/net/IPAddressV6.cpp b/src/libxrpl/beast/net/IPAddressV6.cpp index f90a6d066b..d1b86ba9bd 100644 --- a/src/libxrpl/beast/net/IPAddressV6.cpp +++ b/src/libxrpl/beast/net/IPAddressV6.cpp @@ -20,6 +20,8 @@ #include #include +#include + namespace beast { namespace IP { @@ -28,7 +30,9 @@ is_private(AddressV6 const& addr) { return ( (addr.to_bytes()[0] & 0xfd) || // TODO fc00::/8 too ? - (addr.is_v4_mapped() && is_private(addr.to_v4()))); + (addr.is_v4_mapped() && + is_private(boost::asio::ip::make_address_v4( + boost::asio::ip::v4_mapped, addr)))); } bool diff --git a/src/libxrpl/beast/net/IPEndpoint.cpp b/src/libxrpl/beast/net/IPEndpoint.cpp index ffe664498c..f1ffc23e82 100644 --- a/src/libxrpl/beast/net/IPEndpoint.cpp +++ b/src/libxrpl/beast/net/IPEndpoint.cpp @@ -21,6 +21,8 @@ #include #include +#include +#include #include #include @@ -167,7 +169,7 @@ operator>>(std::istream& is, Endpoint& endpoint) } boost::system::error_code ec; - auto addr = Address::from_string(addrStr, ec); + auto addr = boost::asio::ip::make_address(addrStr, ec); if (ec) { is.setstate(std::ios_base::failbit); diff --git a/src/libxrpl/net/HTTPClient.cpp b/src/libxrpl/net/HTTPClient.cpp index f7d540750a..964be32dd8 100644 --- a/src/libxrpl/net/HTTPClient.cpp +++ b/src/libxrpl/net/HTTPClient.cpp @@ -24,6 +24,7 @@ #include #include +#include #include #include #include @@ -55,16 +56,16 @@ class HTTPClientImp : public std::enable_shared_from_this, { public: HTTPClientImp( - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, unsigned short const port, std::size_t maxResponseSize, beast::Journal& j) - : mSocket(io_service, httpClientSSLContext->context()) - , mResolver(io_service) + : mSocket(io_context, httpClientSSLContext->context()) + , mResolver(io_context) , mHeader(maxClientHeaderBytes) , mPort(port) , maxResponseSize_(maxResponseSize) - , mDeadline(io_service) + , mDeadline(io_context) , j_(j) { } @@ -146,18 +147,21 @@ public: { JLOG(j_.trace()) << "Fetch: " << mDeqSites[0]; - auto query = std::make_shared( + auto query = std::make_shared( mDeqSites[0], std::to_string(mPort), boost::asio::ip::resolver_query_base::numeric_service); mQuery = query; - mDeadline.expires_from_now(mTimeout, mShutdown); - - JLOG(j_.trace()) << "expires_from_now: " << mShutdown.message(); - - if (!mShutdown) + try { + mDeadline.expires_after(mTimeout); + } + catch (boost::system::system_error const& e) + { + mShutdown = e.code(); + + JLOG(j_.trace()) << "expires_after: " << mShutdown.message(); mDeadline.async_wait(std::bind( &HTTPClientImp::handleDeadline, shared_from_this(), @@ -169,7 +173,9 @@ public: JLOG(j_.trace()) << "Resolving: " << mDeqSites[0]; mResolver.async_resolve( - *mQuery, + mQuery->host, + mQuery->port, + mQuery->flags, std::bind( &HTTPClientImp::handleResolve, shared_from_this(), @@ -233,7 +239,7 @@ public: void handleResolve( boost::system::error_code const& ecResult, - boost::asio::ip::tcp::resolver::iterator itrEndpoint) + boost::asio::ip::tcp::resolver::results_type result) { if (!mShutdown) { @@ -255,7 +261,7 @@ public: boost::asio::async_connect( mSocket.lowest_layer(), - itrEndpoint, + result, std::bind( &HTTPClientImp::handleConnect, shared_from_this(), @@ -475,13 +481,15 @@ public: std::string const& strData = "") { boost::system::error_code ecCancel; - - (void)mDeadline.cancel(ecCancel); - - if (ecCancel) + try { - JLOG(j_.trace()) << "invokeComplete: Deadline cancel error: " - << ecCancel.message(); + mDeadline.cancel(); + } + catch (boost::system::system_error const& e) + { + JLOG(j_.trace()) + << "invokeComplete: Deadline cancel error: " << e.what(); + ecCancel = e.code(); } JLOG(j_.debug()) << "invokeComplete: Deadline popping: " @@ -515,7 +523,15 @@ private: bool mSSL; AutoSocket mSocket; boost::asio::ip::tcp::resolver mResolver; - std::shared_ptr mQuery; + + struct Query + { + std::string host; + std::string port; + boost::asio::ip::resolver_query_base::flags flags; + }; + std::shared_ptr mQuery; + boost::asio::streambuf mRequest; boost::asio::streambuf mHeader; boost::asio::streambuf mResponse; @@ -546,7 +562,7 @@ private: void HTTPClient::get( bool bSSL, - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, std::deque deqSites, unsigned short const port, std::string const& strPath, @@ -559,14 +575,14 @@ HTTPClient::get( beast::Journal& j) { auto client = - std::make_shared(io_service, port, responseMax, j); + std::make_shared(io_context, port, responseMax, j); client->get(bSSL, deqSites, strPath, timeout, complete); } void HTTPClient::get( bool bSSL, - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, std::string strSite, unsigned short const port, std::string const& strPath, @@ -581,14 +597,14 @@ HTTPClient::get( std::deque deqSites(1, strSite); auto client = - std::make_shared(io_service, port, responseMax, j); + std::make_shared(io_context, port, responseMax, j); client->get(bSSL, deqSites, strPath, timeout, complete); } void HTTPClient::request( bool bSSL, - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, std::string strSite, unsigned short const port, std::function @@ -604,7 +620,7 @@ HTTPClient::request( std::deque deqSites(1, strSite); auto client = - std::make_shared(io_service, port, responseMax, j); + std::make_shared(io_context, port, responseMax, j); client->request(bSSL, deqSites, setRequest, timeout, complete); } diff --git a/src/libxrpl/server/Port.cpp b/src/libxrpl/server/Port.cpp index 95709fc298..be86a77a9f 100644 --- a/src/libxrpl/server/Port.cpp +++ b/src/libxrpl/server/Port.cpp @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -219,7 +220,7 @@ parse_Port(ParsedPort& port, Section const& section, std::ostream& log) { try { - port.ip = boost::asio::ip::address::from_string(*optResult); + port.ip = boost::asio::ip::make_address(*optResult); } catch (std::exception const&) { diff --git a/src/test/app/DNS_test.cpp b/src/test/app/DNS_test.cpp index 28a143e93d..c4e476de9f 100644 --- a/src/test/app/DNS_test.cpp +++ b/src/test/app/DNS_test.cpp @@ -63,7 +63,7 @@ public: pUrl_.domain, pUrl_.path, port_, - env_.app().getIOService(), + env_.app().getIOContext(), env_.journal, env_.app().config(), lastEndpoint, @@ -80,10 +80,11 @@ public: isMultipleEndpoints() { using boost::asio::ip::tcp; - tcp::resolver resolver(env_.app().getIOService()); + tcp::resolver resolver(env_.app().getIOContext()); std::string port = pUrl_.port ? std::to_string(*pUrl_.port) : "443"; - tcp::resolver::iterator it = resolver.resolve(pUrl_.domain, port); - tcp::resolver::iterator end; + auto results = resolver.resolve(pUrl_.domain, port); + auto it = results.begin(); + auto end = results.end(); int n = 0; for (; it != end; ++it) ++n; diff --git a/src/test/app/LedgerReplay_test.cpp b/src/test/app/LedgerReplay_test.cpp index 76ab5b3218..88d944d789 100644 --- a/src/test/app/LedgerReplay_test.cpp +++ b/src/test/app/LedgerReplay_test.cpp @@ -1107,7 +1107,7 @@ struct LedgerReplayer_test : public beast::unit_test::suite return false; beast::IP::Address addr = - boost::asio::ip::address::from_string("172.1.1.100"); + boost::asio::ip::make_address("172.1.1.100"); jtx::Env serverEnv(*this); serverEnv.app().config().LEDGER_REPLAY = server; auto http_resp = ripple::makeResponse( diff --git a/src/test/app/ValidatorSite_test.cpp b/src/test/app/ValidatorSite_test.cpp index 7a7511e6f0..579cd79a5a 100644 --- a/src/test/app/ValidatorSite_test.cpp +++ b/src/test/app/ValidatorSite_test.cpp @@ -205,7 +205,7 @@ private: NetClock::time_point const expires2 = effective2 + cfg.expiresFromNow; item.server = make_TrustedPublisherServer( - env.app().getIOService(), + env.app().getIOContext(), item.list, expires, {{effective2, expires2}}, diff --git a/src/test/beast/IPEndpoint_test.cpp b/src/test/beast/IPEndpoint_test.cpp index aed6d715d4..a99dccf5a0 100644 --- a/src/test/beast/IPEndpoint_test.cpp +++ b/src/test/beast/IPEndpoint_test.cpp @@ -45,13 +45,13 @@ public: std::string const& normal = "") { boost::system::error_code ec; - Address const result{Address::from_string(s, ec)}; + Address const result{boost::asio::ip::make_address(s, ec)}; if (!BEAST_EXPECTS(!ec, ec.message())) return; if (!BEAST_EXPECTS(result.is_v4(), s + " not v4")) return; if (!BEAST_EXPECTS( - result.to_v4().to_ulong() == value, s + " value mismatch")) + result.to_v4().to_uint() == value, s + " value mismatch")) return; BEAST_EXPECTS( result.to_string() == (normal.empty() ? s : normal), @@ -62,7 +62,7 @@ public: failParseAddr(std::string const& s) { boost::system::error_code ec; - auto a = Address::from_string(s, ec); + auto a = boost::asio::ip::make_address(s, ec); BEAST_EXPECTS(ec, s + " parses as " + a.to_string()); } @@ -71,24 +71,24 @@ public: { testcase("AddressV4"); - BEAST_EXPECT(AddressV4{}.to_ulong() == 0); + BEAST_EXPECT(AddressV4{}.to_uint() == 0); BEAST_EXPECT(is_unspecified(AddressV4{})); - BEAST_EXPECT(AddressV4{0x01020304}.to_ulong() == 0x01020304); + BEAST_EXPECT(AddressV4{0x01020304}.to_uint() == 0x01020304); { AddressV4::bytes_type d = {{1, 2, 3, 4}}; - BEAST_EXPECT(AddressV4{d}.to_ulong() == 0x01020304); + BEAST_EXPECT(AddressV4{d}.to_uint() == 0x01020304); unexpected(is_unspecified(AddressV4{d})); } AddressV4 const v1{1}; - BEAST_EXPECT(AddressV4{v1}.to_ulong() == 1); + BEAST_EXPECT(AddressV4{v1}.to_uint() == 1); { AddressV4 v; v = v1; - BEAST_EXPECT(v.to_ulong() == v1.to_ulong()); + BEAST_EXPECT(v.to_uint() == v1.to_uint()); } { @@ -99,7 +99,7 @@ public: d[2] = 3; d[3] = 4; v = AddressV4{d}; - BEAST_EXPECT(v.to_ulong() == 0x01020304); + BEAST_EXPECT(v.to_uint() == 0x01020304); } BEAST_EXPECT(AddressV4(0x01020304).to_string() == "1.2.3.4"); @@ -161,7 +161,7 @@ public: testcase("Address"); boost::system::error_code ec; - Address result{Address::from_string("1.2.3.4", ec)}; + Address result{boost::asio::ip::make_address("1.2.3.4", ec)}; AddressV4::bytes_type d = {{1, 2, 3, 4}}; BEAST_EXPECT(!ec); BEAST_EXPECT(result.is_v4() && result.to_v4() == AddressV4{d}); @@ -263,7 +263,10 @@ public: BEAST_EXPECT(is_loopback(ep)); BEAST_EXPECT(to_string(ep) == "127.0.0.1:80"); // same address as v4 mapped in ipv6 - ep = Endpoint(AddressV6::v4_mapped(AddressV4{d}), 80); + ep = Endpoint( + boost::asio::ip::make_address_v6( + boost::asio::ip::v4_mapped, AddressV4{d}), + 80); BEAST_EXPECT(!is_unspecified(ep)); BEAST_EXPECT(!is_public(ep)); BEAST_EXPECT(is_private(ep)); @@ -281,8 +284,11 @@ public: BEAST_EXPECT(!is_loopback(ep)); BEAST_EXPECT(to_string(ep) == "10.0.0.1"); // same address as v4 mapped in ipv6 - ep = Endpoint(AddressV6::v4_mapped(AddressV4{d})); - BEAST_EXPECT(get_class(ep.to_v6().to_v4()) == 'A'); + ep = Endpoint(boost::asio::ip::make_address_v6( + boost::asio::ip::v4_mapped, AddressV4{d})); + BEAST_EXPECT( + get_class(boost::asio::ip::make_address_v4( + boost::asio::ip::v4_mapped, ep.to_v6())) == 'A'); BEAST_EXPECT(!is_unspecified(ep)); BEAST_EXPECT(!is_public(ep)); BEAST_EXPECT(is_private(ep)); @@ -299,7 +305,8 @@ public: BEAST_EXPECT(!is_loopback(ep)); BEAST_EXPECT(to_string(ep) == "166.78.151.147"); // same address as v4 mapped in ipv6 - ep = Endpoint(AddressV6::v4_mapped(AddressV4{d})); + ep = Endpoint(boost::asio::ip::make_address_v6( + boost::asio::ip::v4_mapped, AddressV4{d})); BEAST_EXPECT(!is_unspecified(ep)); BEAST_EXPECT(is_public(ep)); BEAST_EXPECT(!is_private(ep)); diff --git a/src/test/beast/beast_io_latency_probe_test.cpp b/src/test/beast/beast_io_latency_probe_test.cpp index c72336bf27..841272d05a 100644 --- a/src/test/beast/beast_io_latency_probe_test.cpp +++ b/src/test/beast/beast_io_latency_probe_test.cpp @@ -23,7 +23,8 @@ #include #include -#include +#include +#include #include #include @@ -60,8 +61,10 @@ class io_latency_probe_test : public beast::unit_test::suite, measure_asio_timers(duration interval = 100ms, size_t num_samples = 50) { using namespace std::chrono; - boost::asio::io_service ios; - std::optional work{ios}; + boost::asio::io_context ios; + std::optional> + work{boost::asio::make_work_guard(ios)}; std::thread worker{[&] { ios.run(); }}; boost::asio::basic_waitable_timer timer{ios}; elapsed_times_.reserve(num_samples); @@ -135,7 +138,7 @@ class io_latency_probe_test : public beast::unit_test::suite, test_sampler( std::chrono::milliseconds interval, - boost::asio::io_service& ios) + boost::asio::io_context& ios) : probe_(interval, ios) { } @@ -164,9 +167,9 @@ class io_latency_probe_test : public beast::unit_test::suite, { testcase << "sample one"; boost::system::error_code ec; - test_sampler io_probe{100ms, get_io_service()}; + test_sampler io_probe{100ms, get_io_context()}; io_probe.start_one(); - MyTimer timer{get_io_service(), 1s}; + MyTimer timer{get_io_context(), 1s}; timer.async_wait(yield[ec]); if (!BEAST_EXPECTS(!ec, ec.message())) return; @@ -198,9 +201,9 @@ class io_latency_probe_test : public beast::unit_test::suite, duration_cast(probe_duration).count()) / static_cast(tt.getMean()); #endif - test_sampler io_probe{interval, get_io_service()}; + test_sampler io_probe{interval, get_io_context()}; io_probe.start(); - MyTimer timer{get_io_service(), probe_duration}; + MyTimer timer{get_io_context(), probe_duration}; timer.async_wait(yield[ec]); if (!BEAST_EXPECTS(!ec, ec.message())) return; @@ -212,7 +215,7 @@ class io_latency_probe_test : public beast::unit_test::suite, io_probe.probe_.cancel_async(); // wait again in order to flush the remaining // probes from the work queue - timer.expires_from_now(1s); + timer.expires_after(1s); timer.async_wait(yield[ec]); } @@ -220,7 +223,7 @@ class io_latency_probe_test : public beast::unit_test::suite, testCanceled(boost::asio::yield_context& yield) { testcase << "canceled"; - test_sampler io_probe{100ms, get_io_service()}; + test_sampler io_probe{100ms, get_io_context()}; io_probe.probe_.cancel_async(); except([&io_probe]() { io_probe.start_one(); }); except([&io_probe]() { io_probe.start(); }); diff --git a/src/test/jtx/TrustedPublisherServer.h b/src/test/jtx/TrustedPublisherServer.h index 7bc092cbe3..26e676c024 100644 --- a/src/test/jtx/TrustedPublisherServer.h +++ b/src/test/jtx/TrustedPublisherServer.h @@ -183,7 +183,7 @@ public: bool immediateStart = true, int sequence = 1) : sock_{ioc} - , ep_{beast::IP::Address::from_string( + , ep_{boost::asio::ip::make_address( ripple::test::getEnvLocalhostAddr()), // 0 means let OS pick the port based on what's available 0} @@ -284,7 +284,7 @@ public: acceptor_.set_option( boost::asio::ip::tcp::acceptor::reuse_address(true), ec); acceptor_.bind(ep_); - acceptor_.listen(boost::asio::socket_base::max_connections); + acceptor_.listen(boost::asio::socket_base::max_listen_connections); acceptor_.async_accept( sock_, [wp = std::weak_ptr{shared_from_this()}]( diff --git a/src/test/jtx/impl/JSONRPCClient.cpp b/src/test/jtx/impl/JSONRPCClient.cpp index 4db13c95fd..a4c5817788 100644 --- a/src/test/jtx/impl/JSONRPCClient.cpp +++ b/src/test/jtx/impl/JSONRPCClient.cpp @@ -78,7 +78,7 @@ class JSONRPCClient : public AbstractClient } boost::asio::ip::tcp::endpoint ep_; - boost::asio::io_service ios_; + boost::asio::io_context ios_; boost::asio::ip::tcp::socket stream_; boost::beast::multi_buffer bin_; boost::beast::multi_buffer bout_; diff --git a/src/test/jtx/impl/WSClient.cpp b/src/test/jtx/impl/WSClient.cpp index 20cca3179a..a3dc7d9733 100644 --- a/src/test/jtx/impl/WSClient.cpp +++ b/src/test/jtx/impl/WSClient.cpp @@ -25,6 +25,9 @@ #include #include +#include +#include +#include #include #include @@ -89,9 +92,11 @@ class WSClientImpl : public WSClient return s; } - boost::asio::io_service ios_; - std::optional work_; - boost::asio::io_service::strand strand_; + boost::asio::io_context ios_; + std::optional> + work_; + boost::asio::strand strand_; std::thread thread_; boost::asio::ip::tcp::socket stream_; boost::beast::websocket::stream ws_; @@ -114,14 +119,24 @@ class WSClientImpl : public WSClient void cleanup() { - ios_.post(strand_.wrap([this] { - if (!peerClosed_) - { - ws_.async_close({}, strand_.wrap([&](error_code ec) { - stream_.cancel(ec); - })); - } - })); + boost::asio::post( + ios_, boost::asio::bind_executor(strand_, [this] { + if (!peerClosed_) + { + ws_.async_close( + {}, + boost::asio::bind_executor(strand_, [&](error_code) { + try + { + stream_.cancel(); + } + catch (boost::system::system_error const&) + { + // ignored + } + })); + } + })); work_ = std::nullopt; thread_.join(); } @@ -132,8 +147,8 @@ public: bool v2, unsigned rpc_version, std::unordered_map const& headers = {}) - : work_(ios_) - , strand_(ios_) + : work_(std::in_place, boost::asio::make_work_guard(ios_)) + , strand_(boost::asio::make_strand(ios_)) , thread_([&] { ios_.run(); }) , stream_(ios_) , ws_(stream_) @@ -153,8 +168,12 @@ public: "/"); ws_.async_read( rb_, - strand_.wrap(std::bind( - &WSClientImpl::on_read_msg, this, std::placeholders::_1))); + boost::asio::bind_executor( + strand_, + std::bind( + &WSClientImpl::on_read_msg, + this, + std::placeholders::_1))); } catch (std::exception&) { @@ -284,8 +303,10 @@ private: } ws_.async_read( rb_, - strand_.wrap(std::bind( - &WSClientImpl::on_read_msg, this, std::placeholders::_1))); + boost::asio::bind_executor( + strand_, + std::bind( + &WSClientImpl::on_read_msg, this, std::placeholders::_1))); } // Called when the read op terminates diff --git a/src/test/overlay/compression_test.cpp b/src/test/overlay/compression_test.cpp index 01be43d58b..4bfbcae4f0 100644 --- a/src/test/overlay/compression_test.cpp +++ b/src/test/overlay/compression_test.cpp @@ -485,7 +485,7 @@ public: }; auto handshake = [&](int outboundEnable, int inboundEnable) { beast::IP::Address addr = - boost::asio::ip::address::from_string("172.1.1.100"); + boost::asio::ip::make_address("172.1.1.100"); auto env = getEnv(outboundEnable); auto request = ripple::makeRequest( diff --git a/src/test/overlay/reduce_relay_test.cpp b/src/test/overlay/reduce_relay_test.cpp index 0047454cf9..e53f53f2db 100644 --- a/src/test/overlay/reduce_relay_test.cpp +++ b/src/test/overlay/reduce_relay_test.cpp @@ -1655,7 +1655,7 @@ vp_base_squelch_max_selected_peers=2 }; auto handshake = [&](int outboundEnable, int inboundEnable) { beast::IP::Address addr = - boost::asio::ip::address::from_string("172.1.1.100"); + boost::asio::ip::make_address("172.1.1.100"); setEnv(outboundEnable); auto request = ripple::makeRequest( diff --git a/src/test/overlay/short_read_test.cpp b/src/test/overlay/short_read_test.cpp index 739d7ea954..88c6e7698b 100644 --- a/src/test/overlay/short_read_test.cpp +++ b/src/test/overlay/short_read_test.cpp @@ -23,12 +23,17 @@ #include #include -#include +#include +#include +#include +#include #include +#include +#include #include +#include #include -#include #include #include @@ -49,7 +54,7 @@ class short_read_test : public beast::unit_test::suite { private: using io_context_type = boost::asio::io_context; - using strand_type = boost::asio::io_context::strand; + using strand_type = boost::asio::strand; using timer_type = boost::asio::basic_waitable_timer; using acceptor_type = boost::asio::ip::tcp::acceptor; @@ -60,7 +65,8 @@ private: using address_type = boost::asio::ip::address; io_context_type io_context_; - std::optional> + boost::optional> work_; std::thread thread_; std::shared_ptr context_; @@ -72,7 +78,7 @@ private: using boost::asio::buffer; using boost::asio::buffer_copy; using boost::asio::buffer_size; - boost::asio::const_buffers_1 buf(s.data(), s.size()); + boost::asio::const_buffer buf(s.data(), s.size()); sb.commit(buffer_copy(sb.prepare(buffer_size(buf)), buf)); } @@ -185,11 +191,11 @@ private: , acceptor_( test_.io_context_, endpoint_type( - beast::IP::Address::from_string( + boost::asio::ip::make_address( test::getEnvLocalhostAddr()), 0)) , socket_(test_.io_context_) - , strand_(test_.io_context_) + , strand_(boost::asio::make_strand(test_.io_context_)) { acceptor_.listen(); server_.endpoint_ = acceptor_.local_endpoint(); @@ -265,7 +271,7 @@ private: , test_(server_.test_) , socket_(std::move(socket)) , stream_(socket_, *test_.context_) - , strand_(test_.io_context_) + , strand_(boost::asio::make_strand(test_.io_context_)) , timer_(test_.io_context_) { } @@ -287,7 +293,7 @@ private: void run() { - timer_.expires_from_now(std::chrono::seconds(3)); + timer_.expires_after(std::chrono::seconds(3)); timer_.async_wait(bind_executor( strand_, std::bind( @@ -450,7 +456,7 @@ private: , test_(client_.test_) , socket_(test_.io_context_) , stream_(socket_, *test_.context_) - , strand_(test_.io_context_) + , strand_(boost::asio::make_strand(test_.io_context_)) , timer_(test_.io_context_) , ep_(ep) { @@ -473,7 +479,7 @@ private: void run(endpoint_type const& ep) { - timer_.expires_from_now(std::chrono::seconds(3)); + timer_.expires_after(std::chrono::seconds(3)); timer_.async_wait(bind_executor( strand_, std::bind( diff --git a/src/test/overlay/tx_reduce_relay_test.cpp b/src/test/overlay/tx_reduce_relay_test.cpp index 0024f2b98e..0c67fd581c 100644 --- a/src/test/overlay/tx_reduce_relay_test.cpp +++ b/src/test/overlay/tx_reduce_relay_test.cpp @@ -174,13 +174,13 @@ private: makeFeaturesRequestHeader(false, false, true, false)) : (void)nDisabled--; auto stream_ptr = std::make_unique( - socket_type(std::forward( - env.app().getIOService())), + socket_type(std::forward( + env.app().getIOContext())), *context_); beast::IP::Endpoint local( - beast::IP::Address::from_string("172.1.1." + std::to_string(lid_))); + boost::asio::ip::make_address("172.1.1." + std::to_string(lid_))); beast::IP::Endpoint remote( - beast::IP::Address::from_string("172.1.1." + std::to_string(rid_))); + boost::asio::ip::make_address("172.1.1." + std::to_string(rid_))); PublicKey key(std::get<0>(randomKeyPair(KeyType::ed25519))); auto consumer = overlay.resourceManager().newInboundEndpoint(remote); auto slot = overlay.peerFinder().new_inbound_slot(local, remote); diff --git a/src/test/rpc/ValidatorRPC_test.cpp b/src/test/rpc/ValidatorRPC_test.cpp index d139a662de..bc54c8567c 100644 --- a/src/test/rpc/ValidatorRPC_test.cpp +++ b/src/test/rpc/ValidatorRPC_test.cpp @@ -187,14 +187,14 @@ public: for (auto const& val : validators) expectedKeys.insert(toStr(val.masterPublic)); - // Manage single-thread io_service for server. + // Manage single-thread io_context for server. BasicApp worker{1}; using namespace std::chrono_literals; NetClock::time_point const validUntil{3600s}; NetClock::time_point const validFrom2{validUntil - 60s}; NetClock::time_point const validUntil2{validFrom2 + 3600s}; auto server = make_TrustedPublisherServer( - worker.get_io_service(), + worker.get_io_context(), validators, validUntil, {{validFrom2, validUntil2}}, diff --git a/src/test/server/ServerStatus_test.cpp b/src/test/server/ServerStatus_test.cpp index b27dee6e0a..8bbad2cd99 100644 --- a/src/test/server/ServerStatus_test.cpp +++ b/src/test/server/ServerStatus_test.cpp @@ -33,6 +33,7 @@ #include #include +#include #include #include #include @@ -165,12 +166,11 @@ class ServerStatus_test : public beast::unit_test::suite, { using namespace boost::asio; using namespace boost::beast::http; - io_service& ios = get_io_service(); + io_context& ios = get_io_context(); ip::tcp::resolver r{ios}; boost::beast::multi_buffer sb; - auto it = r.async_resolve( - ip::tcp::resolver::query{host, std::to_string(port)}, yield[ec]); + auto it = r.async_resolve(host, std::to_string(port), yield[ec]); if (ec) return; @@ -476,12 +476,11 @@ class ServerStatus_test : public beast::unit_test::suite, auto req_string = boost::lexical_cast(req); req_string.erase(req_string.find_last_of("13"), std::string::npos); - io_service& ios = get_io_service(); + io_context& ios = get_io_context(); ip::tcp::resolver r{ios}; boost::beast::multi_buffer sb; - auto it = r.async_resolve( - ip::tcp::resolver::query{*ip, std::to_string(*port)}, yield[ec]); + auto it = r.async_resolve(*ip, std::to_string(*port), yield[ec]); if (!BEAST_EXPECTS(!ec, ec.message())) return; @@ -610,14 +609,13 @@ class ServerStatus_test : public beast::unit_test::suite, env.app().config()["port_rpc"].get("ip").value(); boost::system::error_code ec; - io_service& ios = get_io_service(); + io_context& ios = get_io_context(); ip::tcp::resolver r{ios}; Json::Value jr; jr[jss::method] = "server_info"; - auto it = r.async_resolve( - ip::tcp::resolver::query{ip, std::to_string(port)}, yield[ec]); + auto it = r.async_resolve(ip, std::to_string(port), yield[ec]); BEAST_EXPECT(!ec); std::vector> @@ -681,7 +679,7 @@ class ServerStatus_test : public beast::unit_test::suite, resp["Upgrade"] == "websocket"); BEAST_EXPECT( resp.find("Connection") != resp.end() && - resp["Connection"] == "Upgrade"); + boost::iequals(resp["Connection"], "upgrade")); } void @@ -728,11 +726,10 @@ class ServerStatus_test : public beast::unit_test::suite, env.app().config()["port_ws"].get("ip").value(); boost::system::error_code ec; - io_service& ios = get_io_service(); + io_context& ios = get_io_context(); ip::tcp::resolver r{ios}; - auto it = r.async_resolve( - ip::tcp::resolver::query{ip, std::to_string(port)}, yield[ec]); + auto it = r.async_resolve(ip, std::to_string(port), yield[ec]); if (!BEAST_EXPECT(!ec)) return; diff --git a/src/test/server/Server_test.cpp b/src/test/server/Server_test.cpp index fab271ff1c..874558f428 100644 --- a/src/test/server/Server_test.cpp +++ b/src/test/server/Server_test.cpp @@ -31,6 +31,7 @@ #include #include +#include #include #include #include @@ -52,14 +53,16 @@ public: class TestThread { private: - boost::asio::io_service io_service_; - std::optional work_; + boost::asio::io_context io_context_; + std::optional> + work_; std::thread thread_; public: TestThread() - : work_(std::in_place, std::ref(io_service_)) - , thread_([&]() { this->io_service_.run(); }) + : work_(std::in_place, boost::asio::make_work_guard(io_context_)) + , thread_([&]() { this->io_context_.run(); }) { } @@ -69,10 +72,10 @@ public: thread_.join(); } - boost::asio::io_service& - get_io_service() + boost::asio::io_context& + get_io_context() { - return io_service_; + return io_context_; } }; @@ -234,7 +237,7 @@ public: void test_request(boost::asio::ip::tcp::endpoint const& ep) { - boost::asio::io_service ios; + boost::asio::io_context ios; using socket = boost::asio::ip::tcp::socket; socket s(ios); @@ -260,7 +263,7 @@ public: void test_keepalive(boost::asio::ip::tcp::endpoint const& ep) { - boost::asio::io_service ios; + boost::asio::io_context ios; using socket = boost::asio::ip::tcp::socket; socket s(ios); @@ -300,10 +303,10 @@ public: sink.threshold(beast::severities::Severity::kAll); beast::Journal journal{sink}; TestHandler handler; - auto s = make_Server(handler, thread.get_io_service(), journal); + auto s = make_Server(handler, thread.get_io_context(), journal); std::vector serverPort(1); serverPort.back().ip = - beast::IP::Address::from_string(getEnvLocalhostAddr()), + boost::asio::ip::make_address(getEnvLocalhostAddr()), serverPort.back().port = 0; serverPort.back().protocol.insert("http"); auto eps = s->ports(serverPort); @@ -375,10 +378,10 @@ public: for (int i = 0; i < 1000; ++i) { TestThread thread; - auto s = make_Server(h, thread.get_io_service(), journal); + auto s = make_Server(h, thread.get_io_context(), journal); std::vector serverPort(1); serverPort.back().ip = - beast::IP::Address::from_string(getEnvLocalhostAddr()), + boost::asio::ip::make_address(getEnvLocalhostAddr()), serverPort.back().port = 0; serverPort.back().protocol.insert("http"); s->ports(serverPort); diff --git a/src/xrpld/app/ledger/detail/TimeoutCounter.cpp b/src/xrpld/app/ledger/detail/TimeoutCounter.cpp index e81ec6574d..774b70e4d1 100644 --- a/src/xrpld/app/ledger/detail/TimeoutCounter.cpp +++ b/src/xrpld/app/ledger/detail/TimeoutCounter.cpp @@ -39,7 +39,7 @@ TimeoutCounter::TimeoutCounter( , progress_(false) , timerInterval_(interval) , queueJobParameter_(std::move(jobParameter)) - , timer_(app_.getIOService()) + , timer_(app_.getIOContext()) { XRPL_ASSERT( (timerInterval_ > 10ms) && (timerInterval_ < 30s), diff --git a/src/xrpld/app/ledger/detail/TimeoutCounter.h b/src/xrpld/app/ledger/detail/TimeoutCounter.h index 85ce6fc3b4..8da290dd36 100644 --- a/src/xrpld/app/ledger/detail/TimeoutCounter.h +++ b/src/xrpld/app/ledger/detail/TimeoutCounter.h @@ -120,7 +120,7 @@ protected: return complete_ || failed_; } - // Used in this class for access to boost::asio::io_service and + // Used in this class for access to boost::asio::io_context and // ripple::Overlay. Used in subtypes for the kitchen sink. Application& app_; beast::Journal journal_; diff --git a/src/xrpld/app/main/Application.cpp b/src/xrpld/app/main/Application.cpp index c824eccfba..beaf85ce2e 100644 --- a/src/xrpld/app/main/Application.cpp +++ b/src/xrpld/app/main/Application.cpp @@ -83,7 +83,6 @@ #include #include #include -#include #include namespace ripple { @@ -108,7 +107,7 @@ private: beast::insight::Event ev, beast::Journal journal, std::chrono::milliseconds interval, - boost::asio::io_service& ios) + boost::asio::io_context& ios) : m_event(ev) , m_journal(journal) , m_probe(interval, ios) @@ -136,7 +135,7 @@ private: if (lastSample >= 500ms) { JLOG(m_journal.warn()) - << "io_service latency = " << lastSample.count(); + << "io_context latency = " << lastSample.count(); } } @@ -405,7 +404,7 @@ public: *m_jobQueue, *m_ledgerMaster, validatorKeys_, - get_io_service(), + get_io_context(), logs_->journal("NetworkOPs"), m_collectorManager->collector())) @@ -432,7 +431,7 @@ public: , serverHandler_(make_ServerHandler( *this, - get_io_service(), + get_io_context(), *m_jobQueue, *m_networkOPs, *m_resourceManager, @@ -456,22 +455,22 @@ public: , txQ_( std::make_unique(setup_TxQ(*config_), logs_->journal("TxQ"))) - , sweepTimer_(get_io_service()) + , sweepTimer_(get_io_context()) - , entropyTimer_(get_io_service()) + , entropyTimer_(get_io_context()) - , m_signals(get_io_service()) + , m_signals(get_io_context()) , checkSigs_(true) , m_resolver( - ResolverAsio::New(get_io_service(), logs_->journal("Resolver"))) + ResolverAsio::New(get_io_context(), logs_->journal("Resolver"))) , m_io_latency_sampler( m_collectorManager->collector()->make_event("ios_latency"), logs_->journal("Application"), std::chrono::milliseconds(100), - get_io_service()) + get_io_context()) , grpcServer_(std::make_unique(*this)) { initAccountIdCache(config_->getValueFor(SizedItem::accountIdCacheSize)); @@ -594,10 +593,10 @@ public: return *serverHandler_; } - boost::asio::io_service& - getIOService() override + boost::asio::io_context& + getIOContext() override { - return get_io_service(); + return get_io_context(); } std::chrono::milliseconds @@ -935,9 +934,8 @@ public: })) { using namespace std::chrono; - sweepTimer_.expires_from_now( - seconds{config_->SWEEP_INTERVAL.value_or( - config_->getValueFor(SizedItem::sweepInterval))}); + sweepTimer_.expires_after(seconds{config_->SWEEP_INTERVAL.value_or( + config_->getValueFor(SizedItem::sweepInterval))}); sweepTimer_.async_wait(std::move(*optionalCountedHandler)); } } @@ -966,7 +964,7 @@ public: })) { using namespace std::chrono_literals; - entropyTimer_.expires_from_now(5min); + entropyTimer_.expires_after(5min); entropyTimer_.async_wait(std::move(*optionalCountedHandler)); } } @@ -1398,7 +1396,7 @@ ApplicationImp::setup(boost::program_options::variables_map const& cmdline) *serverHandler_, *m_resourceManager, *m_resolver, - get_io_service(), + get_io_context(), *config_, m_collectorManager->collector()); add(*overlay_); // add to PropertyStream @@ -1571,11 +1569,11 @@ ApplicationImp::run() m_io_latency_sampler.cancel_async(); // VFALCO Enormous hack, we have to force the probe to cancel - // before we stop the io_service queue or else it never + // before we stop the io_context queue or else it never // unblocks in its destructor. The fix is to make all // io_objects gracefully handle exit so that we can - // naturally return from io_service::run() instead of - // forcing a call to io_service::stop() + // naturally return from io_context::run() instead of + // forcing a call to io_context::stop() m_io_latency_sampler.cancel(); m_resolver->stop_async(); @@ -1586,20 +1584,24 @@ ApplicationImp::run() m_resolver->stop(); { - boost::system::error_code ec; - sweepTimer_.cancel(ec); - if (ec) + try + { + sweepTimer_.cancel(); + } + catch (boost::system::system_error const& e) { JLOG(m_journal.error()) - << "Application: sweepTimer cancel error: " << ec.message(); + << "Application: sweepTimer cancel error: " << e.what(); } - ec.clear(); - entropyTimer_.cancel(ec); - if (ec) + try + { + entropyTimer_.cancel(); + } + catch (boost::system::system_error const& e) { JLOG(m_journal.error()) - << "Application: entropyTimer cancel error: " << ec.message(); + << "Application: entropyTimer cancel error: " << e.what(); } } diff --git a/src/xrpld/app/main/Application.h b/src/xrpld/app/main/Application.h index 36477cb75c..b3a433fee8 100644 --- a/src/xrpld/app/main/Application.h +++ b/src/xrpld/app/main/Application.h @@ -162,8 +162,8 @@ public: virtual Config& config() = 0; - virtual boost::asio::io_service& - getIOService() = 0; + virtual boost::asio::io_context& + getIOContext() = 0; virtual CollectorManager& getCollectorManager() = 0; diff --git a/src/xrpld/app/main/BasicApp.cpp b/src/xrpld/app/main/BasicApp.cpp index a4b1a74685..87f440dfc8 100644 --- a/src/xrpld/app/main/BasicApp.cpp +++ b/src/xrpld/app/main/BasicApp.cpp @@ -21,9 +21,11 @@ #include +#include + BasicApp::BasicApp(std::size_t numberOfThreads) { - work_.emplace(io_service_); + work_.emplace(boost::asio::make_work_guard(io_context_)); threads_.reserve(numberOfThreads); while (numberOfThreads--) @@ -31,7 +33,7 @@ BasicApp::BasicApp(std::size_t numberOfThreads) threads_.emplace_back([this, numberOfThreads]() { beast::setCurrentThreadName( "io svc #" + std::to_string(numberOfThreads)); - this->io_service_.run(); + this->io_context_.run(); }); } } diff --git a/src/xrpld/app/main/BasicApp.h b/src/xrpld/app/main/BasicApp.h index cd1e8c1a71..276676ca18 100644 --- a/src/xrpld/app/main/BasicApp.h +++ b/src/xrpld/app/main/BasicApp.h @@ -20,28 +20,30 @@ #ifndef RIPPLE_APP_BASICAPP_H_INCLUDED #define RIPPLE_APP_BASICAPP_H_INCLUDED -#include +#include #include #include #include -// This is so that the io_service can outlive all the children +// This is so that the io_context can outlive all the children class BasicApp { private: - std::optional work_; + std::optional> + work_; std::vector threads_; - boost::asio::io_service io_service_; + boost::asio::io_context io_context_; public: BasicApp(std::size_t numberOfThreads); ~BasicApp(); - boost::asio::io_service& - get_io_service() + boost::asio::io_context& + get_io_context() { - return io_service_; + return io_context_; } }; diff --git a/src/xrpld/app/main/Main.cpp b/src/xrpld/app/main/Main.cpp index 3fdf362dd9..2353d7acd1 100644 --- a/src/xrpld/app/main/Main.cpp +++ b/src/xrpld/app/main/Main.cpp @@ -28,12 +28,17 @@ #include #include +#include +#include +#include +#include + #ifdef ENABLE_TESTS #include #include #endif // ENABLE_TESTS -#include +#include #include #include @@ -283,7 +288,7 @@ runUnitTests( if (!child) { multi_runner_parent parent_runner; - std::vector children; + std::vector children; std::string const exe_name = argv[0]; std::vector args; @@ -296,7 +301,8 @@ runUnitTests( for (std::size_t i = 0; i < num_jobs; ++i) children.emplace_back( - boost::process::exe = exe_name, boost::process::args = args); + boost::process::v1::exe = exe_name, + boost::process::v1::args = args); int bad_child_exits = 0; int terminated_child_exits = 0; diff --git a/src/xrpld/app/misc/NetworkOPs.cpp b/src/xrpld/app/misc/NetworkOPs.cpp index 3220ce99fc..403090c390 100644 --- a/src/xrpld/app/misc/NetworkOPs.cpp +++ b/src/xrpld/app/misc/NetworkOPs.cpp @@ -233,7 +233,7 @@ public: JobQueue& job_queue, LedgerMaster& ledgerMaster, ValidatorKeys const& validatorKeys, - boost::asio::io_service& io_svc, + boost::asio::io_context& io_svc, beast::Journal journal, beast::insight::Collector::ptr const& collector) : app_(app) @@ -588,31 +588,35 @@ public: stop() override { { - boost::system::error_code ec; - heartbeatTimer_.cancel(ec); - if (ec) + try + { + heartbeatTimer_.cancel(); + } + catch (boost::system::system_error const& e) { JLOG(m_journal.error()) - << "NetworkOPs: heartbeatTimer cancel error: " - << ec.message(); + << "NetworkOPs: heartbeatTimer cancel error: " << e.what(); } - ec.clear(); - clusterTimer_.cancel(ec); - if (ec) + try + { + clusterTimer_.cancel(); + } + catch (boost::system::system_error const& e) { JLOG(m_journal.error()) - << "NetworkOPs: clusterTimer cancel error: " - << ec.message(); + << "NetworkOPs: clusterTimer cancel error: " << e.what(); } - ec.clear(); - accountHistoryTxTimer_.cancel(ec); - if (ec) + try + { + accountHistoryTxTimer_.cancel(); + } + catch (boost::system::system_error const& e) { JLOG(m_journal.error()) << "NetworkOPs: accountHistoryTxTimer cancel error: " - << ec.message(); + << e.what(); } } // Make sure that any waitHandlers pending in our timers are done. @@ -984,7 +988,7 @@ NetworkOPsImp::setTimer( } })) { - timer.expires_from_now(expiry_time); + timer.expires_after(expiry_time); timer.async_wait(std::move(*optionalCountedHandler)); } } @@ -4855,7 +4859,7 @@ make_NetworkOPs( JobQueue& job_queue, LedgerMaster& ledgerMaster, ValidatorKeys const& validatorKeys, - boost::asio::io_service& io_svc, + boost::asio::io_context& io_svc, beast::Journal journal, beast::insight::Collector::ptr const& collector) { diff --git a/src/xrpld/app/misc/NetworkOPs.h b/src/xrpld/app/misc/NetworkOPs.h index 639cd782b7..9587d63b3a 100644 --- a/src/xrpld/app/misc/NetworkOPs.h +++ b/src/xrpld/app/misc/NetworkOPs.h @@ -290,7 +290,7 @@ make_NetworkOPs( JobQueue& job_queue, LedgerMaster& ledgerMaster, ValidatorKeys const& validatorKeys, - boost::asio::io_service& io_svc, + boost::asio::io_context& io_svc, beast::Journal journal, beast::insight::Collector::ptr const& collector); diff --git a/src/xrpld/app/misc/detail/ValidatorSite.cpp b/src/xrpld/app/misc/detail/ValidatorSite.cpp index 42d4e9e271..e235ff3e66 100644 --- a/src/xrpld/app/misc/detail/ValidatorSite.cpp +++ b/src/xrpld/app/misc/detail/ValidatorSite.cpp @@ -91,7 +91,7 @@ ValidatorSite::ValidatorSite( std::chrono::seconds timeout) : app_{app} , j_{j ? *j : app_.logs().journal("ValidatorSite")} - , timer_{app_.getIOService()} + , timer_{app_.getIOContext()} , fetching_{false} , pending_{false} , stopping_{false} @@ -271,7 +271,7 @@ ValidatorSite::makeRequest( resource->pUrl.domain, resource->pUrl.path, std::to_string(*resource->pUrl.port), - app_.getIOService(), + app_.getIOContext(), j_, app_.config(), sites_[siteIdx].lastRequestEndpoint, @@ -284,7 +284,7 @@ ValidatorSite::makeRequest( resource->pUrl.domain, resource->pUrl.path, std::to_string(*resource->pUrl.port), - app_.getIOService(), + app_.getIOContext(), sites_[siteIdx].lastRequestEndpoint, sites_[siteIdx].lastRequestSuccessful, onFetch); @@ -293,7 +293,7 @@ ValidatorSite::makeRequest( { BOOST_ASSERT(resource->pUrl.scheme == "file"); sp = std::make_shared( - resource->pUrl.path, app_.getIOService(), onFetchFile); + resource->pUrl.path, app_.getIOContext(), onFetchFile); } sites_[siteIdx].lastRequestSuccessful = false; diff --git a/src/xrpld/app/misc/detail/WorkBase.h b/src/xrpld/app/misc/detail/WorkBase.h index 17f935126b..a73cd3d597 100644 --- a/src/xrpld/app/misc/detail/WorkBase.h +++ b/src/xrpld/app/misc/detail/WorkBase.h @@ -26,6 +26,7 @@ #include #include +#include #include #include #include @@ -57,8 +58,8 @@ protected: std::string path_; std::string port_; callback_type cb_; - boost::asio::io_service& ios_; - boost::asio::io_service::strand strand_; + boost::asio::io_context& ios_; + boost::asio::strand strand_; resolver_type resolver_; socket_type socket_; request_type req_; @@ -72,7 +73,7 @@ public: std::string const& host, std::string const& path, std::string const& port, - boost::asio::io_service& ios, + boost::asio::io_context& ios, endpoint_type const& lastEndpoint, bool lastStatus, callback_type cb); @@ -120,7 +121,7 @@ WorkBase::WorkBase( std::string const& host, std::string const& path, std::string const& port, - boost::asio::io_service& ios, + boost::asio::io_context& ios, endpoint_type const& lastEndpoint, bool lastStatus, callback_type cb) @@ -129,7 +130,7 @@ WorkBase::WorkBase( , port_(port) , cb_(std::move(cb)) , ios_(ios) - , strand_(ios) + , strand_(boost::asio::make_strand(ios)) , resolver_(ios) , socket_(ios) , lastEndpoint_{lastEndpoint} @@ -152,17 +153,21 @@ void WorkBase::run() { if (!strand_.running_in_this_thread()) - return ios_.post( - strand_.wrap(std::bind(&WorkBase::run, impl().shared_from_this()))); + return boost::asio::post( + ios_, + boost::asio::bind_executor( + strand_, std::bind(&WorkBase::run, impl().shared_from_this()))); resolver_.async_resolve( host_, port_, - strand_.wrap(std::bind( - &WorkBase::onResolve, - impl().shared_from_this(), - std::placeholders::_1, - std::placeholders::_2))); + boost::asio::bind_executor( + strand_, + std::bind( + &WorkBase::onResolve, + impl().shared_from_this(), + std::placeholders::_1, + std::placeholders::_2))); } template @@ -171,8 +176,12 @@ WorkBase::cancel() { if (!strand_.running_in_this_thread()) { - return ios_.post(strand_.wrap( - std::bind(&WorkBase::cancel, impl().shared_from_this()))); + return boost::asio::post( + ios_, + + boost::asio::bind_executor( + strand_, + std::bind(&WorkBase::cancel, impl().shared_from_this()))); } error_code ec; @@ -201,11 +210,13 @@ WorkBase::onResolve(error_code const& ec, results_type results) boost::asio::async_connect( socket_, results, - strand_.wrap(std::bind( - &WorkBase::onConnect, - impl().shared_from_this(), - std::placeholders::_1, - std::placeholders::_2))); + boost::asio::bind_executor( + strand_, + std::bind( + &WorkBase::onConnect, + impl().shared_from_this(), + std::placeholders::_1, + std::placeholders::_2))); } template @@ -233,10 +244,12 @@ WorkBase::onStart() boost::beast::http::async_write( impl().stream(), req_, - strand_.wrap(std::bind( - &WorkBase::onRequest, - impl().shared_from_this(), - std::placeholders::_1))); + boost::asio::bind_executor( + strand_, + std::bind( + &WorkBase::onRequest, + impl().shared_from_this(), + std::placeholders::_1))); } template @@ -250,10 +263,12 @@ WorkBase::onRequest(error_code const& ec) impl().stream(), readBuf_, res_, - strand_.wrap(std::bind( - &WorkBase::onResponse, - impl().shared_from_this(), - std::placeholders::_1))); + boost::asio::bind_executor( + strand_, + std::bind( + &WorkBase::onResponse, + impl().shared_from_this(), + std::placeholders::_1))); } template diff --git a/src/xrpld/app/misc/detail/WorkFile.h b/src/xrpld/app/misc/detail/WorkFile.h index 51fd6db78c..562e1c9ec1 100644 --- a/src/xrpld/app/misc/detail/WorkFile.h +++ b/src/xrpld/app/misc/detail/WorkFile.h @@ -26,6 +26,10 @@ #include #include +#include +#include +#include + namespace ripple { namespace detail { @@ -45,7 +49,7 @@ public: public: WorkFile( std::string const& path, - boost::asio::io_service& ios, + boost::asio::io_context& ios, callback_type cb); ~WorkFile(); @@ -58,17 +62,20 @@ public: private: std::string path_; callback_type cb_; - boost::asio::io_service& ios_; - boost::asio::io_service::strand strand_; + boost::asio::io_context& ios_; + boost::asio::strand strand_; }; //------------------------------------------------------------------------------ WorkFile::WorkFile( std::string const& path, - boost::asio::io_service& ios, + boost::asio::io_context& ios, callback_type cb) - : path_(path), cb_(std::move(cb)), ios_(ios), strand_(ios) + : path_(path) + , cb_(std::move(cb)) + , ios_(ios) + , strand_(boost::asio::make_strand(ios)) { } @@ -82,8 +89,10 @@ void WorkFile::run() { if (!strand_.running_in_this_thread()) - return ios_.post( - strand_.wrap(std::bind(&WorkFile::run, shared_from_this()))); + return boost::asio::post( + ios_, + boost::asio::bind_executor( + strand_, std::bind(&WorkFile::run, shared_from_this()))); error_code ec; auto const fileContents = getFileContents(ec, path_, megabytes(1)); diff --git a/src/xrpld/app/misc/detail/WorkPlain.h b/src/xrpld/app/misc/detail/WorkPlain.h index 16bf424131..38dd0df9fa 100644 --- a/src/xrpld/app/misc/detail/WorkPlain.h +++ b/src/xrpld/app/misc/detail/WorkPlain.h @@ -37,7 +37,7 @@ public: std::string const& host, std::string const& path, std::string const& port, - boost::asio::io_service& ios, + boost::asio::io_context& ios, endpoint_type const& lastEndpoint, bool lastStatus, callback_type cb); @@ -60,7 +60,7 @@ WorkPlain::WorkPlain( std::string const& host, std::string const& path, std::string const& port, - boost::asio::io_service& ios, + boost::asio::io_context& ios, endpoint_type const& lastEndpoint, bool lastStatus, callback_type cb) diff --git a/src/xrpld/app/misc/detail/WorkSSL.cpp b/src/xrpld/app/misc/detail/WorkSSL.cpp index 0d6801ab84..a262a66ca7 100644 --- a/src/xrpld/app/misc/detail/WorkSSL.cpp +++ b/src/xrpld/app/misc/detail/WorkSSL.cpp @@ -26,7 +26,7 @@ WorkSSL::WorkSSL( std::string const& host, std::string const& path, std::string const& port, - boost::asio::io_service& ios, + boost::asio::io_context& ios, beast::Journal j, Config const& config, endpoint_type const& lastEndpoint, @@ -56,8 +56,12 @@ WorkSSL::onConnect(error_code const& ec) stream_.async_handshake( boost::asio::ssl::stream_base::client, - strand_.wrap(std::bind( - &WorkSSL::onHandshake, shared_from_this(), std::placeholders::_1))); + boost::asio::bind_executor( + strand_, + std::bind( + &WorkSSL::onHandshake, + shared_from_this(), + std::placeholders::_1))); } void diff --git a/src/xrpld/app/misc/detail/WorkSSL.h b/src/xrpld/app/misc/detail/WorkSSL.h index 6a310986e7..cadc3fd8fd 100644 --- a/src/xrpld/app/misc/detail/WorkSSL.h +++ b/src/xrpld/app/misc/detail/WorkSSL.h @@ -52,7 +52,7 @@ public: std::string const& host, std::string const& path, std::string const& port, - boost::asio::io_service& ios, + boost::asio::io_context& ios, beast::Journal j, Config const& config, endpoint_type const& lastEndpoint, diff --git a/src/xrpld/overlay/detail/ConnectAttempt.cpp b/src/xrpld/overlay/detail/ConnectAttempt.cpp index 61049579c5..397ac06ba6 100644 --- a/src/xrpld/overlay/detail/ConnectAttempt.cpp +++ b/src/xrpld/overlay/detail/ConnectAttempt.cpp @@ -28,7 +28,7 @@ namespace ripple { ConnectAttempt::ConnectAttempt( Application& app, - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, endpoint_type const& remote_endpoint, Resource::Consumer usage, shared_context const& context, @@ -43,10 +43,10 @@ ConnectAttempt::ConnectAttempt( , journal_(sink_) , remote_endpoint_(remote_endpoint) , usage_(usage) - , strand_(io_service) - , timer_(io_service) + , strand_(boost::asio::make_strand(io_context)) + , timer_(io_context) , stream_ptr_(std::make_unique( - socket_type(std::forward(io_service)), + socket_type(std::forward(io_context)), *context)) , socket_(stream_ptr_->next_layer().socket()) , stream_(*stream_ptr_) @@ -66,8 +66,8 @@ void ConnectAttempt::stop() { if (!strand_.running_in_this_thread()) - return strand_.post( - std::bind(&ConnectAttempt::stop, shared_from_this())); + return boost::asio::post( + strand_, std::bind(&ConnectAttempt::stop, shared_from_this())); if (socket_.is_open()) { JLOG(journal_.debug()) << "Stop"; @@ -80,10 +80,12 @@ ConnectAttempt::run() { stream_.next_layer().async_connect( remote_endpoint_, - strand_.wrap(std::bind( - &ConnectAttempt::onConnect, - shared_from_this(), - std::placeholders::_1))); + boost::asio::bind_executor( + strand_, + std::bind( + &ConnectAttempt::onConnect, + shared_from_this(), + std::placeholders::_1))); } //------------------------------------------------------------------------------ @@ -96,9 +98,16 @@ ConnectAttempt::close() "ripple::ConnectAttempt::close : strand in this thread"); if (socket_.is_open()) { - error_code ec; - timer_.cancel(ec); - socket_.close(ec); + try + { + timer_.cancel(); + socket_.close(); + } + catch (boost::system::system_error const&) + { + // ignored + } + JLOG(journal_.debug()) << "Closed"; } } @@ -120,23 +129,35 @@ ConnectAttempt::fail(std::string const& name, error_code ec) void ConnectAttempt::setTimer() { - error_code ec; - timer_.expires_from_now(std::chrono::seconds(15), ec); - if (ec) + try { - JLOG(journal_.error()) << "setTimer: " << ec.message(); + timer_.expires_after(std::chrono::seconds(15)); + } + catch (boost::system::system_error const& e) + { + JLOG(journal_.error()) << "setTimer: " << e.code(); return; } - timer_.async_wait(strand_.wrap(std::bind( - &ConnectAttempt::onTimer, shared_from_this(), std::placeholders::_1))); + timer_.async_wait(boost::asio::bind_executor( + strand_, + std::bind( + &ConnectAttempt::onTimer, + shared_from_this(), + std::placeholders::_1))); } void ConnectAttempt::cancelTimer() { - error_code ec; - timer_.cancel(ec); + try + { + timer_.cancel(); + } + catch (boost::system::system_error const&) + { + // ignored + } } void @@ -175,10 +196,12 @@ ConnectAttempt::onConnect(error_code ec) stream_.set_verify_mode(boost::asio::ssl::verify_none); stream_.async_handshake( boost::asio::ssl::stream_base::client, - strand_.wrap(std::bind( - &ConnectAttempt::onHandshake, - shared_from_this(), - std::placeholders::_1))); + boost::asio::bind_executor( + strand_, + std::bind( + &ConnectAttempt::onHandshake, + shared_from_this(), + std::placeholders::_1))); } void @@ -223,10 +246,12 @@ ConnectAttempt::onHandshake(error_code ec) boost::beast::http::async_write( stream_, req_, - strand_.wrap(std::bind( - &ConnectAttempt::onWrite, - shared_from_this(), - std::placeholders::_1))); + boost::asio::bind_executor( + strand_, + std::bind( + &ConnectAttempt::onWrite, + shared_from_this(), + std::placeholders::_1))); } void @@ -243,10 +268,12 @@ ConnectAttempt::onWrite(error_code ec) stream_, read_buf_, response_, - strand_.wrap(std::bind( - &ConnectAttempt::onRead, - shared_from_this(), - std::placeholders::_1))); + boost::asio::bind_executor( + strand_, + std::bind( + &ConnectAttempt::onRead, + shared_from_this(), + std::placeholders::_1))); } void @@ -262,10 +289,12 @@ ConnectAttempt::onRead(error_code ec) { JLOG(journal_.info()) << "EOF"; setTimer(); - return stream_.async_shutdown(strand_.wrap(std::bind( - &ConnectAttempt::onShutdown, - shared_from_this(), - std::placeholders::_1))); + return stream_.async_shutdown(boost::asio::bind_executor( + strand_, + std::bind( + &ConnectAttempt::onShutdown, + shared_from_this(), + std::placeholders::_1))); } if (ec) return fail("onRead", ec); @@ -299,7 +328,7 @@ ConnectAttempt::processResponse() s.reserve(boost::asio::buffer_size(response_.body().data())); for (auto const buffer : response_.body().data()) s.append( - boost::asio::buffer_cast(buffer), + static_cast(buffer.data()), boost::asio::buffer_size(buffer)); auto const success = r.parse(s, json); if (success) diff --git a/src/xrpld/overlay/detail/ConnectAttempt.h b/src/xrpld/overlay/detail/ConnectAttempt.h index c3e07f956a..febbe88f45 100644 --- a/src/xrpld/overlay/detail/ConnectAttempt.h +++ b/src/xrpld/overlay/detail/ConnectAttempt.h @@ -50,7 +50,7 @@ private: beast::Journal const journal_; endpoint_type remote_endpoint_; Resource::Consumer usage_; - boost::asio::io_service::strand strand_; + boost::asio::strand strand_; boost::asio::basic_waitable_timer timer_; std::unique_ptr stream_ptr_; socket_type& socket_; @@ -63,7 +63,7 @@ private: public: ConnectAttempt( Application& app, - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, endpoint_type const& remote_endpoint, Resource::Consumer usage, shared_context const& context, diff --git a/src/xrpld/overlay/detail/Handshake.cpp b/src/xrpld/overlay/detail/Handshake.cpp index e3617a1d98..5ce4954a5e 100644 --- a/src/xrpld/overlay/detail/Handshake.cpp +++ b/src/xrpld/overlay/detail/Handshake.cpp @@ -326,7 +326,7 @@ verifyHandshake( { boost::system::error_code ec; auto const local_ip = - boost::asio::ip::address::from_string(iter->value(), ec); + boost::asio::ip::make_address(std::string_view(iter->value()), ec); if (ec) throw std::runtime_error("Invalid Local-IP"); @@ -341,7 +341,7 @@ verifyHandshake( { boost::system::error_code ec; auto const remote_ip = - boost::asio::ip::address::from_string(iter->value(), ec); + boost::asio::ip::make_address(std::string_view(iter->value()), ec); if (ec) throw std::runtime_error("Invalid Remote-IP"); diff --git a/src/xrpld/overlay/detail/OverlayImpl.cpp b/src/xrpld/overlay/detail/OverlayImpl.cpp index 874f951f56..f2c683b69f 100644 --- a/src/xrpld/overlay/detail/OverlayImpl.cpp +++ b/src/xrpld/overlay/detail/OverlayImpl.cpp @@ -41,6 +41,7 @@ #include #include +#include namespace ripple { @@ -68,7 +69,7 @@ OverlayImpl::Child::~Child() //------------------------------------------------------------------------------ OverlayImpl::Timer::Timer(OverlayImpl& overlay) - : Child(overlay), timer_(overlay_.io_service_) + : Child(overlay), timer_(overlay_.io_context_) { } @@ -85,8 +86,10 @@ void OverlayImpl::Timer::async_wait() { timer_.expires_after(std::chrono::seconds(1)); - timer_.async_wait(overlay_.strand_.wrap(std::bind( - &Timer::on_timer, shared_from_this(), std::placeholders::_1))); + timer_.async_wait(boost::asio::bind_executor( + overlay_.strand_, + std::bind( + &Timer::on_timer, shared_from_this(), std::placeholders::_1))); } void @@ -121,19 +124,19 @@ OverlayImpl::OverlayImpl( ServerHandler& serverHandler, Resource::Manager& resourceManager, Resolver& resolver, - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, BasicConfig const& config, beast::insight::Collector::ptr const& collector) : app_(app) - , io_service_(io_service) - , work_(std::in_place, std::ref(io_service_)) - , strand_(io_service_) + , io_context_(io_context) + , work_(std::in_place, boost::asio::make_work_guard(io_context_)) + , strand_(boost::asio::make_strand(io_context_)) , setup_(setup) , journal_(app_.journal("Overlay")) , serverHandler_(serverHandler) , m_resourceManager(resourceManager) , m_peerFinder(PeerFinder::make_Manager( - io_service, + io_context, stopwatch(), app_.journal("PeerFinder"), config, @@ -408,7 +411,7 @@ OverlayImpl::connect(beast::IP::Endpoint const& remote_endpoint) auto const p = std::make_shared( app_, - io_service_, + io_context_, beast::IPAddressConversion::to_asio_endpoint(remote_endpoint), usage, setup_.context, @@ -560,7 +563,7 @@ OverlayImpl::start() void OverlayImpl::stop() { - strand_.dispatch(std::bind(&OverlayImpl::stopChildren, this)); + boost::asio::dispatch(strand_, std::bind(&OverlayImpl::stopChildren, this)); { std::unique_lock lock(mutex_); cond_.wait(lock, [this] { return list_.empty(); }); @@ -1498,7 +1501,7 @@ setup_Overlay(BasicConfig const& config) if (!ip.empty()) { boost::system::error_code ec; - setup.public_ip = beast::IP::Address::from_string(ip, ec); + setup.public_ip = boost::asio::ip::make_address(ip, ec); if (ec || beast::IP::is_private(setup.public_ip)) Throw("Configured public IP is invalid"); } @@ -1592,7 +1595,7 @@ make_Overlay( ServerHandler& serverHandler, Resource::Manager& resourceManager, Resolver& resolver, - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, BasicConfig const& config, beast::insight::Collector::ptr const& collector) { @@ -1602,7 +1605,7 @@ make_Overlay( serverHandler, resourceManager, resolver, - io_service, + io_context, config, collector); } diff --git a/src/xrpld/overlay/detail/OverlayImpl.h b/src/xrpld/overlay/detail/OverlayImpl.h index 86107fc591..b4ea3307ec 100644 --- a/src/xrpld/overlay/detail/OverlayImpl.h +++ b/src/xrpld/overlay/detail/OverlayImpl.h @@ -38,6 +38,7 @@ #include #include +#include #include #include #include @@ -100,9 +101,11 @@ private: }; Application& app_; - boost::asio::io_service& io_service_; - std::optional work_; - boost::asio::io_service::strand strand_; + boost::asio::io_context& io_context_; + std::optional> + work_; + boost::asio::strand strand_; mutable std::recursive_mutex mutex_; // VFALCO use std::mutex std::condition_variable_any cond_; std::weak_ptr timer_; @@ -143,7 +146,7 @@ public: ServerHandler& serverHandler, Resource::Manager& resourceManager, Resolver& resolver, - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, BasicConfig const& config, beast::insight::Collector::ptr const& collector); diff --git a/src/xrpld/overlay/detail/PeerImp.cpp b/src/xrpld/overlay/detail/PeerImp.cpp index 23b4760488..69f25e1eb4 100644 --- a/src/xrpld/overlay/detail/PeerImp.cpp +++ b/src/xrpld/overlay/detail/PeerImp.cpp @@ -84,7 +84,7 @@ PeerImp::PeerImp( , stream_ptr_(std::move(stream_ptr)) , socket_(stream_ptr_->next_layer().socket()) , stream_(*stream_ptr_) - , strand_(socket_.get_executor()) + , strand_(boost::asio::make_strand(socket_.get_executor())) , timer_(waitable_timer{socket_.get_executor()}) , remote_address_(slot->remote_endpoint()) , overlay_(overlay) @@ -581,9 +581,16 @@ PeerImp::close() if (socket_.is_open()) { detaching_ = true; // DEPRECATED - error_code ec; - timer_.cancel(ec); - socket_.close(ec); + try + { + timer_.cancel(); + socket_.close(); + } + catch (boost::system::system_error const&) + { + // ignored + } + overlay_.incPeerDisconnect(); if (inbound_) { @@ -654,12 +661,13 @@ PeerImp::gracefulClose() void PeerImp::setTimer() { - error_code ec; - timer_.expires_from_now(peerTimerInterval, ec); - - if (ec) + try { - JLOG(journal_.error()) << "setTimer: " << ec.message(); + timer_.expires_after(peerTimerInterval); + } + catch (boost::system::system_error const& e) + { + JLOG(journal_.error()) << "setTimer: " << e.code(); return; } timer_.async_wait(bind_executor( @@ -672,8 +680,14 @@ PeerImp::setTimer() void PeerImp::cancelTimer() { - error_code ec; - timer_.cancel(ec); + try + { + timer_.cancel(); + } + catch (boost::system::system_error const&) + { + // ignored + } } //------------------------------------------------------------------------------ diff --git a/src/xrpld/overlay/detail/PeerImp.h b/src/xrpld/overlay/detail/PeerImp.h index 5aa49fd152..3d9a0c0b1e 100644 --- a/src/xrpld/overlay/detail/PeerImp.h +++ b/src/xrpld/overlay/detail/PeerImp.h @@ -669,7 +669,7 @@ PeerImp::PeerImp( , stream_ptr_(std::move(stream_ptr)) , socket_(stream_ptr_->next_layer().socket()) , stream_(*stream_ptr_) - , strand_(socket_.get_executor()) + , strand_(boost::asio::make_strand(socket_.get_executor())) , timer_(waitable_timer{socket_.get_executor()}) , remote_address_(slot->remote_endpoint()) , overlay_(overlay) diff --git a/src/xrpld/overlay/detail/PeerSet.cpp b/src/xrpld/overlay/detail/PeerSet.cpp index 611728839c..74290f50d3 100644 --- a/src/xrpld/overlay/detail/PeerSet.cpp +++ b/src/xrpld/overlay/detail/PeerSet.cpp @@ -46,7 +46,7 @@ public: getPeerIds() const override; private: - // Used in this class for access to boost::asio::io_service and + // Used in this class for access to boost::asio::io_context and // ripple::Overlay. Application& app_; beast::Journal journal_; diff --git a/src/xrpld/overlay/detail/ZeroCopyStream.h b/src/xrpld/overlay/detail/ZeroCopyStream.h index 87a5e10bc2..23e26c5351 100644 --- a/src/xrpld/overlay/detail/ZeroCopyStream.h +++ b/src/xrpld/overlay/detail/ZeroCopyStream.h @@ -78,7 +78,7 @@ template bool ZeroCopyInputStream::Next(void const** data, int* size) { - *data = boost::asio::buffer_cast(pos_); + *data = pos_.data(); *size = boost::asio::buffer_size(pos_); if (first_ == last_) return false; @@ -195,7 +195,7 @@ ZeroCopyOutputStream::Next(void** data, int* size) pos_ = buffers_.begin(); } - *data = boost::asio::buffer_cast(*pos_); + *data = *pos_.data(); *size = boost::asio::buffer_size(*pos_); commit_ = *size; ++pos_; diff --git a/src/xrpld/overlay/make_Overlay.h b/src/xrpld/overlay/make_Overlay.h index 3476026562..142c922551 100644 --- a/src/xrpld/overlay/make_Overlay.h +++ b/src/xrpld/overlay/make_Overlay.h @@ -25,7 +25,7 @@ #include -#include +#include namespace ripple { @@ -40,7 +40,7 @@ make_Overlay( ServerHandler& serverHandler, Resource::Manager& resourceManager, Resolver& resolver, - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, BasicConfig const& config, beast::insight::Collector::ptr const& collector); diff --git a/src/xrpld/peerfinder/detail/Checker.h b/src/xrpld/peerfinder/detail/Checker.h index e7983471a5..c5221fcc13 100644 --- a/src/xrpld/peerfinder/detail/Checker.h +++ b/src/xrpld/peerfinder/detail/Checker.h @@ -22,7 +22,7 @@ #include -#include +#include #include #include @@ -65,7 +65,7 @@ private: async_op( Checker& owner, - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, Handler&& handler); ~async_op(); @@ -85,17 +85,17 @@ private: std::mutex mutex_; std::condition_variable cond_; - boost::asio::io_service& io_service_; + boost::asio::io_context& io_context_; list_type list_; bool stop_ = false; public: - explicit Checker(boost::asio::io_service& io_service); + explicit Checker(boost::asio::io_context& io_context); /** Destroy the service. Any pending I/O operations will be canceled. This call blocks until all pending operations complete (either with success or with - operation_aborted) and the associated thread and io_service have + operation_aborted) and the associated thread and io_context have no more work remaining. */ ~Checker(); @@ -132,10 +132,10 @@ template template Checker::async_op::async_op( Checker& owner, - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, Handler&& handler) : checker_(owner) - , socket_(io_service) + , socket_(io_context) , handler_(std::forward(handler)) { } @@ -167,8 +167,8 @@ Checker::async_op::operator()(error_code const& ec) //------------------------------------------------------------------------------ template -Checker::Checker(boost::asio::io_service& io_service) - : io_service_(io_service) +Checker::Checker(boost::asio::io_context& io_context) + : io_context_(io_context) { } @@ -208,7 +208,7 @@ Checker::async_connect( Handler&& handler) { auto const op = std::make_shared>( - *this, io_service_, std::forward(handler)); + *this, io_context_, std::forward(handler)); { std::lock_guard lock(mutex_); list_.push_back(*op); diff --git a/src/xrpld/peerfinder/detail/PeerfinderManager.cpp b/src/xrpld/peerfinder/detail/PeerfinderManager.cpp index 86093fa166..205df67fa6 100644 --- a/src/xrpld/peerfinder/detail/PeerfinderManager.cpp +++ b/src/xrpld/peerfinder/detail/PeerfinderManager.cpp @@ -23,7 +23,8 @@ #include #include -#include +#include +#include #include #include @@ -34,8 +35,10 @@ namespace PeerFinder { class ManagerImp : public Manager { public: - boost::asio::io_service& io_service_; - std::optional work_; + boost::asio::io_context& io_context_; + std::optional> + work_; clock_type& m_clock; beast::Journal m_journal; StoreSqdb m_store; @@ -46,18 +49,18 @@ public: //-------------------------------------------------------------------------- ManagerImp( - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, clock_type& clock, beast::Journal journal, BasicConfig const& config, beast::insight::Collector::ptr const& collector) : Manager() - , io_service_(io_service) - , work_(std::in_place, std::ref(io_service_)) + , io_context_(io_context) + , work_(std::in_place, boost::asio::make_work_guard(io_context_)) , m_clock(clock) , m_journal(journal) , m_store(journal) - , checker_(io_service_) + , checker_(io_context_) , m_logic(clock, m_store, checker_, journal) , m_config(config) , m_stats(std::bind(&ManagerImp::collect_metrics, this), collector) @@ -271,14 +274,14 @@ Manager::Manager() noexcept : beast::PropertyStream::Source("peerfinder") std::unique_ptr make_Manager( - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, clock_type& clock, beast::Journal journal, BasicConfig const& config, beast::insight::Collector::ptr const& collector) { return std::make_unique( - io_service, clock, journal, config, collector); + io_context, clock, journal, config, collector); } } // namespace PeerFinder diff --git a/src/xrpld/peerfinder/make_Manager.h b/src/xrpld/peerfinder/make_Manager.h index fba95e8f22..e55964f4a7 100644 --- a/src/xrpld/peerfinder/make_Manager.h +++ b/src/xrpld/peerfinder/make_Manager.h @@ -22,7 +22,7 @@ #include -#include +#include #include @@ -32,7 +32,7 @@ namespace PeerFinder { /** Create a new Manager. */ std::unique_ptr make_Manager( - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, clock_type& clock, beast::Journal journal, BasicConfig const& config, diff --git a/src/xrpld/rpc/RPCCall.h b/src/xrpld/rpc/RPCCall.h index 4c6d25ca57..9e160b8fbd 100644 --- a/src/xrpld/rpc/RPCCall.h +++ b/src/xrpld/rpc/RPCCall.h @@ -25,7 +25,7 @@ #include #include -#include +#include #include #include @@ -51,7 +51,7 @@ fromCommandLine( void fromNetwork( - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, std::string const& strIp, std::uint16_t const iPort, std::string const& strUsername, diff --git a/src/xrpld/rpc/RPCSub.h b/src/xrpld/rpc/RPCSub.h index 0f106be018..2fd1be0b20 100644 --- a/src/xrpld/rpc/RPCSub.h +++ b/src/xrpld/rpc/RPCSub.h @@ -23,7 +23,7 @@ #include #include -#include +#include namespace ripple { @@ -40,11 +40,11 @@ protected: explicit RPCSub(InfoSub::Source& source); }; -// VFALCO Why is the io_service needed? +// VFALCO Why is the io_context needed? std::shared_ptr make_RPCSub( InfoSub::Source& source, - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, JobQueue& jobQueue, std::string const& strUrl, std::string const& strUsername, diff --git a/src/xrpld/rpc/ServerHandler.h b/src/xrpld/rpc/ServerHandler.h index 5f72673313..d0ebdcd67d 100644 --- a/src/xrpld/rpc/ServerHandler.h +++ b/src/xrpld/rpc/ServerHandler.h @@ -111,7 +111,7 @@ private: friend std::unique_ptr make_ServerHandler( Application& app, - boost::asio::io_service&, + boost::asio::io_context&, JobQueue&, NetworkOPs&, Resource::Manager&, @@ -122,7 +122,7 @@ public: ServerHandler( ServerHandlerCreator const&, Application& app, - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, JobQueue& jobQueue, NetworkOPs& networkOPs, Resource::Manager& resourceManager, @@ -223,7 +223,7 @@ setup_ServerHandler(Config const& c, std::ostream&& log); std::unique_ptr make_ServerHandler( Application& app, - boost::asio::io_service&, + boost::asio::io_context&, JobQueue&, NetworkOPs&, Resource::Manager&, diff --git a/src/xrpld/rpc/detail/RPCCall.cpp b/src/xrpld/rpc/detail/RPCCall.cpp index aa8c80fff7..57432d920f 100644 --- a/src/xrpld/rpc/detail/RPCCall.cpp +++ b/src/xrpld/rpc/detail/RPCCall.cpp @@ -1543,7 +1543,7 @@ rpcClient( } { - boost::asio::io_service isService; + boost::asio::io_context isService; RPCCall::fromNetwork( isService, setup.client.ip, @@ -1647,7 +1647,7 @@ fromCommandLine( void fromNetwork( - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, std::string const& strIp, std::uint16_t const iPort, std::string const& strUsername, @@ -1685,7 +1685,7 @@ fromNetwork( HTTPClient::request( bSSL, - io_service, + io_context, strIp, iPort, std::bind( diff --git a/src/xrpld/rpc/detail/RPCSub.cpp b/src/xrpld/rpc/detail/RPCSub.cpp index 966ad6df4b..6619b5ddc5 100644 --- a/src/xrpld/rpc/detail/RPCSub.cpp +++ b/src/xrpld/rpc/detail/RPCSub.cpp @@ -35,14 +35,14 @@ class RPCSubImp : public RPCSub public: RPCSubImp( InfoSub::Source& source, - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, JobQueue& jobQueue, std::string const& strUrl, std::string const& strUsername, std::string const& strPassword, Logs& logs) : RPCSub(source) - , m_io_service(io_service) + , m_io_context(io_context) , m_jobQueue(jobQueue) , mUrl(strUrl) , mSSL(false) @@ -155,7 +155,7 @@ private: JLOG(j_.info()) << "RPCCall::fromNetwork: " << mIp; RPCCall::fromNetwork( - m_io_service, + m_io_context, mIp, mPort, mUsername, @@ -177,7 +177,7 @@ private: } private: - boost::asio::io_service& m_io_service; + boost::asio::io_context& m_io_context; JobQueue& m_jobQueue; std::string mUrl; @@ -207,7 +207,7 @@ RPCSub::RPCSub(InfoSub::Source& source) : InfoSub(source, Consumer()) std::shared_ptr make_RPCSub( InfoSub::Source& source, - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, JobQueue& jobQueue, std::string const& strUrl, std::string const& strUsername, @@ -216,7 +216,7 @@ make_RPCSub( { return std::make_shared( std::ref(source), - std::ref(io_service), + std::ref(io_context), std::ref(jobQueue), strUrl, strUsername, diff --git a/src/xrpld/rpc/detail/ServerHandler.cpp b/src/xrpld/rpc/detail/ServerHandler.cpp index 0c84e59413..f5f5e53238 100644 --- a/src/xrpld/rpc/detail/ServerHandler.cpp +++ b/src/xrpld/rpc/detail/ServerHandler.cpp @@ -104,7 +104,7 @@ authorized(Port const& port, std::map const& h) ServerHandler::ServerHandler( ServerHandlerCreator const&, Application& app, - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, JobQueue& jobQueue, NetworkOPs& networkOPs, Resource::Manager& resourceManager, @@ -113,7 +113,7 @@ ServerHandler::ServerHandler( , m_resourceManager(resourceManager) , m_journal(app_.journal("Server")) , m_networkOPs(networkOPs) - , m_server(make_Server(*this, io_service, app_.journal("Server"))) + , m_server(make_Server(*this, io_context, app_.journal("Server"))) , m_jobQueue(jobQueue) { auto const& group(cm.group("rpc")); @@ -282,14 +282,13 @@ template static std::string buffers_to_string(ConstBufferSequence const& bs) { - using boost::asio::buffer_cast; using boost::asio::buffer_size; std::string s; s.reserve(buffer_size(bs)); // Use auto&& so the right thing happens whether bs returns a copy or // a reference for (auto&& b : bs) - s.append(buffer_cast(b), buffer_size(b)); + s.append(static_cast(b.data()), buffer_size(b)); return s; } @@ -1267,7 +1266,7 @@ setup_ServerHandler(Config const& config, std::ostream&& log) std::unique_ptr make_ServerHandler( Application& app, - boost::asio::io_service& io_service, + boost::asio::io_context& io_context, JobQueue& jobQueue, NetworkOPs& networkOPs, Resource::Manager& resourceManager, @@ -1276,7 +1275,7 @@ make_ServerHandler( return std::make_unique( ServerHandler::ServerHandlerCreator(), app, - io_service, + io_context, jobQueue, networkOPs, resourceManager, diff --git a/src/xrpld/rpc/handlers/Subscribe.cpp b/src/xrpld/rpc/handlers/Subscribe.cpp index c089f0255d..1696754e9c 100644 --- a/src/xrpld/rpc/handlers/Subscribe.cpp +++ b/src/xrpld/rpc/handlers/Subscribe.cpp @@ -76,7 +76,7 @@ doSubscribe(RPC::JsonContext& context) { auto rspSub = make_RPCSub( context.app.getOPs(), - context.app.getIOService(), + context.app.getIOContext(), context.app.getJobQueue(), strUrl, strUsername, From 2df7dcfdebcb0cdbd030c1f4b09ac748af95659c Mon Sep 17 00:00:00 2001 From: Michael Legleux Date: Wed, 27 Aug 2025 10:25:53 -0700 Subject: [PATCH 144/244] Set version to 2.6.0 --- src/libxrpl/protocol/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libxrpl/protocol/BuildInfo.cpp b/src/libxrpl/protocol/BuildInfo.cpp index 4b55f82f49..d5077aa44d 100644 --- a/src/libxrpl/protocol/BuildInfo.cpp +++ b/src/libxrpl/protocol/BuildInfo.cpp @@ -36,7 +36,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "2.6.0-rc3" +char const* const versionString = "2.6.0" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From dedf3d3983c514930dd2883e83917649b3b6b45a Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Thu, 28 Aug 2025 15:15:17 +0100 Subject: [PATCH 145/244] Remove extraneous // LCOV_EXCL_START, and fix CMake warning (#5744) * Remove extraneous // LCOV_EXCL_START * Fix "At least one COMMAND must be given" CMake warning --- cmake/CodeCoverage.cmake | 5 ++++- src/xrpld/ledger/detail/View.cpp | 1 - 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/cmake/CodeCoverage.cmake b/cmake/CodeCoverage.cmake index ce1733988b..09ec3b9569 100644 --- a/cmake/CodeCoverage.cmake +++ b/cmake/CodeCoverage.cmake @@ -101,6 +101,9 @@ # 2025-05-12, Jingchen Wu # - add -fprofile-update=atomic to ensure atomic profile generation # +# 2025-08-28, Bronek Kozicki +# - fix "At least one COMMAND must be given" CMake warning from policy CMP0175 +# # USAGE: # # 1. Copy this file into your cmake modules path. @@ -446,7 +449,7 @@ function(setup_target_for_coverage_gcovr) # Show info where to find the report add_custom_command(TARGET ${Coverage_NAME} POST_BUILD - COMMAND ; + COMMAND echo COMMENT "Code coverage report saved in ${GCOVR_OUTPUT_FILE} formatted as ${Coverage_FORMAT}" ) endfunction() # setup_target_for_coverage_gcovr diff --git a/src/xrpld/ledger/detail/View.cpp b/src/xrpld/ledger/detail/View.cpp index 7c6e1d60f1..4f8a29d15c 100644 --- a/src/xrpld/ledger/detail/View.cpp +++ b/src/xrpld/ledger/detail/View.cpp @@ -3074,7 +3074,6 @@ rippleUnlockEscrowMPT( auto const delta = amount.mpt().value(); // Underflow check for subtraction - // LCOV_EXCL_START if (!canSubtract(STAmount(mptIssue, locked), STAmount(mptIssue, delta))) { // LCOV_EXCL_START JLOG(j.error()) From 1e37d00d6c176f8bce562f5ecb19c2cbc0cf098f Mon Sep 17 00:00:00 2001 From: Ayaz Salikhov Date: Thu, 28 Aug 2025 20:32:49 +0100 Subject: [PATCH 146/244] ci: Use XRPLF/prepare-runner action (#5740) * ci: Use XRPLF/prepare-runner action * Remove some old boost workaround --- .github/workflows/build-test.yml | 34 +++++++------------------------- cmake/deps/Boost.cmake | 6 ------ 2 files changed, 7 insertions(+), 33 deletions(-) diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml index 36145479e1..05b65edbfd 100644 --- a/.github/workflows/build-test.yml +++ b/.github/workflows/build-test.yml @@ -101,6 +101,7 @@ jobs: echo 'CMake arguments: ${{ matrix.cmake_args }}' echo 'CMake target: ${{ matrix.cmake_target }}' echo 'Config name: ${{ matrix.config_name }}' + - name: Clean workspace (MacOS) if: ${{ inputs.os == 'macos' }} run: | @@ -111,18 +112,12 @@ jobs: exit 1 fi find "${WORKSPACE}" -depth 1 | xargs rm -rfv + - name: Checkout repository uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - - name: Set up Python (Windows) - if: ${{ inputs.os == 'windows' }} - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 - with: - python-version: 3.13 - - name: Install build tools (Windows) - if: ${{ inputs.os == 'windows' }} - run: | - echo 'Installing build tools.' - pip install wheel conan + - name: Prepare runner + uses: XRPLF/actions/.github/actions/prepare-runner@638e0dc11ea230f91bd26622fb542116bb5254d5 + - name: Check configuration (Windows) if: ${{ inputs.os == 'windows' }} run: | @@ -134,11 +129,6 @@ jobs: echo 'Checking Conan version.' conan --version - - name: Install build tools (MacOS) - if: ${{ inputs.os == 'macos' }} - run: | - echo 'Installing build tools.' - brew install --quiet cmake conan ninja coreutils - name: Check configuration (Linux and MacOS) if: ${{ inputs.os == 'linux' || inputs.os == 'macos' }} run: | @@ -162,18 +152,7 @@ jobs: echo 'Checking nproc version.' nproc --version - - name: Set up Conan home directory (MacOS) - if: ${{ inputs.os == 'macos' }} - run: | - echo 'Setting up Conan home directory.' - export CONAN_HOME=${{ github.workspace }}/.conan - mkdir -p ${CONAN_HOME} - - name: Set up Conan home directory (Windows) - if: ${{ inputs.os == 'windows' }} - run: | - echo 'Setting up Conan home directory.' - set CONAN_HOME=${{ github.workspace }}\.conan - mkdir -p %CONAN_HOME% + - name: Set up Conan configuration run: | echo 'Installing configuration.' @@ -196,6 +175,7 @@ jobs: echo 'Listing Conan remotes.' conan remote list + - name: Build dependencies uses: ./.github/actions/build-deps with: diff --git a/cmake/deps/Boost.cmake b/cmake/deps/Boost.cmake index bde40c0ce5..e431e57b0c 100644 --- a/cmake/deps/Boost.cmake +++ b/cmake/deps/Boost.cmake @@ -14,12 +14,6 @@ find_package(Boost 1.82 REQUIRED add_library(ripple_boost INTERFACE) add_library(Ripple::boost ALIAS ripple_boost) -if(XCODE) - target_include_directories(ripple_boost BEFORE INTERFACE ${Boost_INCLUDE_DIRS}) - target_compile_options(ripple_boost INTERFACE --system-header-prefix="boost/") -else() - target_include_directories(ripple_boost SYSTEM BEFORE INTERFACE ${Boost_INCLUDE_DIRS}) -endif() target_link_libraries(ripple_boost INTERFACE From 6e814d7ebdaf245464cece678e098c6fe7da17a2 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Thu, 28 Aug 2025 16:33:11 -0400 Subject: [PATCH 147/244] chore: Run CI jobs in more situations, and add "passed" job (#5739) Test jobs will run if * Either the PR is non-draft or has the "DraftRunCI" label set *AND* * One of the following: * Certain files were changed *OR* * The PR is non-draft and has the "Ready to merge" flag *OR* * The workflow is being run from the merge queue. Additionally, a meta "passed" job was added that is dependent on all the other test jobs, so the required jobs list under branch protection rules only needs to specify "passed" to ensure that *either* all the test jobs pass *or* all the test jobs are skipped because they don't need to be run. This allows PRs that don't affect the build or binary to be merged without overriding. --- .github/workflows/on-pr.yml | 70 ++++++++++++++++++++++++------------- 1 file changed, 46 insertions(+), 24 deletions(-) diff --git a/.github/workflows/on-pr.yml b/.github/workflows/on-pr.yml index a5f1d60c42..a4bbfd0997 100644 --- a/.github/workflows/on-pr.yml +++ b/.github/workflows/on-pr.yml @@ -28,30 +28,26 @@ env: CONAN_REMOTE_URL: https://conan.ripplex.io jobs: - # This job determines whether the workflow should run. It runs when the PR is - # not a draft or has the 'DraftRunCI' label. + # This job determines whether the rest of the workflow should run. It runs + # when the PR is not a draft (which should also cover merge-group) or + # has the 'DraftRunCI' label. should-run: if: ${{ !github.event.pull_request.draft || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }} runs-on: ubuntu-latest - steps: - - name: No-op - run: true - - # This job checks whether any files have changed that should cause the next - # jobs to run. We do it this way rather than using `paths` in the `on:` - # section, because all required checks must pass, even for changes that do not - # modify anything that affects those checks. We would therefore like to make - # the checks required only if the job runs, but GitHub does not support that - # directly. By always executing the workflow on new commits and by using the - # changed-files action below, we ensure that Github considers any skipped jobs - # to have passed, and in turn the required checks as well. - any-changed: - needs: should-run - runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - name: Determine changed files + # This step checks whether any files have changed that should + # cause the next jobs to run. We do it this way rather than + # using `paths` in the `on:` section, because all required + # checks must pass, even for changes that do not modify anything + # that affects those checks. We would therefore like to make the + # checks required only if the job runs, but GitHub does not + # support that directly. By always executing the workflow on new + # commits and by using the changed-files action below, we ensure + # that Github considers any skipped jobs to have passed, and in + # turn the required checks as well. id: changes uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5 with: @@ -79,24 +75,40 @@ jobs: tests/** CMakeLists.txt conanfile.py + - name: Check whether to run + # This step determines whether the rest of the workflow should + # run. The rest of the workflow will run if this job runs AND at + # least one of: + # * Any of the files checked in the `changes` step were modified + # * The PR is NOT a draft and is labeled "Ready to merge" + # * The workflow is running from the merge queue + id: go + env: + FILES: ${{ steps.changes.outputs.any_changed }} + DRAFT: ${{ github.event.pull_request.draft }} + READY: ${{ contains(github.event.pull_request.labels.*.name, 'Ready to merge') }} + MERGE: ${{ github.event_name == 'merge_group' }} + run: | + echo "go=${{ (env.DRAFT != 'true' && env.READY == 'true') || env.FILES == 'true' || env.MERGE == 'true' }}" >> "${GITHUB_OUTPUT}" + cat "${GITHUB_OUTPUT}" outputs: - changed: ${{ steps.changes.outputs.any_changed }} + go: ${{ steps.go.outputs.go == 'true' }} check-format: - needs: any-changed - if: needs.any-changed.outputs.changed == 'true' + needs: should-run + if: needs.should-run.outputs.go == 'true' uses: ./.github/workflows/check-format.yml check-levelization: - needs: any-changed - if: needs.any-changed.outputs.changed == 'true' + needs: should-run + if: needs.should-run.outputs.go == 'true' uses: ./.github/workflows/check-levelization.yml # This job works around the limitation that GitHub Actions does not support # using environment variables as inputs for reusable workflows. generate-outputs: - needs: any-changed - if: needs.any-changed.outputs.changed == 'true' + needs: should-run + if: needs.should-run.outputs.go == 'true' runs-on: ubuntu-latest steps: - name: No-op @@ -130,3 +142,13 @@ jobs: clio_notify_token: ${{ secrets.CLIO_NOTIFY_TOKEN }} conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }} conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }} + + passed: + needs: + - build-test + - check-format + - check-levelization + runs-on: ubuntu-latest + steps: + - name: No-op + run: true From e4fdf3315805c7678500bd7c2babad77cf66148a Mon Sep 17 00:00:00 2001 From: Vito Tumas <5780819+Tapanito@users.noreply.github.com> Date: Fri, 29 Aug 2025 02:00:38 +0200 Subject: [PATCH 148/244] adds additional logging to differentiate why connections were refused (#5690) This is a follow-up to PR #5664 that further improves the specificity of logging for refused peer connections. The previous changes did not account for several key scenarios, leading to potentially misleading log messages. It addresses the following - Inbound Disabled: Connections are now explicitly logged as rejected when the server is not configured to accept inbound peers. Previously, this was logged as the server being "full," which was technically correct but lacked diagnostic clarity. - Duplicate Connections: The logging now distinguishes between two types of duplicate connection refusals: - When a peer with the same node public key is already connected (duplicate connection). - When a connection is rejected because the limit for connections from a single IP address has been reached. These changes provide more accurate and actionable diagnostic information when analyzing peer connection behavior. --- src/test/overlay/tx_reduce_relay_test.cpp | 2 +- src/test/peerfinder/PeerFinder_test.cpp | 274 ++++++++++++++---- src/xrpld/overlay/detail/OverlayImpl.cpp | 11 +- src/xrpld/peerfinder/PeerfinderManager.h | 27 +- src/xrpld/peerfinder/detail/Counts.h | 2 +- src/xrpld/peerfinder/detail/Logic.h | 26 +- .../peerfinder/detail/PeerfinderConfig.cpp | 11 + .../peerfinder/detail/PeerfinderManager.cpp | 4 +- 8 files changed, 280 insertions(+), 77 deletions(-) diff --git a/src/test/overlay/tx_reduce_relay_test.cpp b/src/test/overlay/tx_reduce_relay_test.cpp index 0c67fd581c..83b3013514 100644 --- a/src/test/overlay/tx_reduce_relay_test.cpp +++ b/src/test/overlay/tx_reduce_relay_test.cpp @@ -183,7 +183,7 @@ private: boost::asio::ip::make_address("172.1.1." + std::to_string(rid_))); PublicKey key(std::get<0>(randomKeyPair(KeyType::ed25519))); auto consumer = overlay.resourceManager().newInboundEndpoint(remote); - auto slot = overlay.peerFinder().new_inbound_slot(local, remote); + auto [slot, _] = overlay.peerFinder().new_inbound_slot(local, remote); auto const peer = std::make_shared( env.app(), slot, diff --git a/src/test/peerfinder/PeerFinder_test.cpp b/src/test/peerfinder/PeerFinder_test.cpp index f35cbbdaae..64a1eb5091 100644 --- a/src/test/peerfinder/PeerFinder_test.cpp +++ b/src/test/peerfinder/PeerFinder_test.cpp @@ -20,6 +20,7 @@ #include #include +#include #include #include @@ -98,7 +99,7 @@ public: if (!list.empty()) { BEAST_EXPECT(list.size() == 1); - auto const slot = logic.new_outbound_slot(list.front()); + auto const [slot, _] = logic.new_outbound_slot(list.front()); BEAST_EXPECT(logic.onConnected( slot, beast::IP::Endpoint::from_string("65.0.0.2:5"))); logic.on_closed(slot); @@ -139,7 +140,7 @@ public: if (!list.empty()) { BEAST_EXPECT(list.size() == 1); - auto const slot = logic.new_outbound_slot(list.front()); + auto const [slot, _] = logic.new_outbound_slot(list.front()); if (!BEAST_EXPECT(logic.onConnected( slot, beast::IP::Endpoint::from_string("65.0.0.2:5")))) return; @@ -158,6 +159,7 @@ public: BEAST_EXPECT(n <= (seconds + 59) / 60); } + // test accepting an incoming slot for an already existing outgoing slot void test_duplicateOutIn() { @@ -166,8 +168,6 @@ public: TestChecker checker; TestStopwatch clock; Logic logic(clock, store, checker, journal_); - logic.addFixedPeer( - "test", beast::IP::Endpoint::from_string("65.0.0.1:5")); { Config c; c.autoConnect = false; @@ -176,28 +176,24 @@ public: logic.config(c); } - auto const list = logic.autoconnect(); - if (BEAST_EXPECT(!list.empty())) - { - BEAST_EXPECT(list.size() == 1); - auto const remote = list.front(); - auto const slot1 = logic.new_outbound_slot(remote); - if (BEAST_EXPECT(slot1 != nullptr)) - { - BEAST_EXPECT( - logic.connectedAddresses_.count(remote.address()) == 1); - auto const local = - beast::IP::Endpoint::from_string("65.0.0.2:1024"); - auto const slot2 = logic.new_inbound_slot(local, remote); - BEAST_EXPECT( - logic.connectedAddresses_.count(remote.address()) == 1); - if (!BEAST_EXPECT(slot2 == nullptr)) - logic.on_closed(slot2); - logic.on_closed(slot1); - } - } + auto const remote = beast::IP::Endpoint::from_string("65.0.0.1:5"); + auto const [slot1, r] = logic.new_outbound_slot(remote); + BEAST_EXPECT(slot1 != nullptr); + BEAST_EXPECT(r == Result::success); + BEAST_EXPECT(logic.connectedAddresses_.count(remote.address()) == 1); + + auto const local = beast::IP::Endpoint::from_string("65.0.0.2:1024"); + auto const [slot2, r2] = logic.new_inbound_slot(local, remote); + BEAST_EXPECT(logic.connectedAddresses_.count(remote.address()) == 1); + BEAST_EXPECT(r2 == Result::duplicatePeer); + + if (!BEAST_EXPECT(slot2 == nullptr)) + logic.on_closed(slot2); + + logic.on_closed(slot1); } + // test establishing outgoing slot for an already existing incoming slot void test_duplicateInOut() { @@ -206,8 +202,6 @@ public: TestChecker checker; TestStopwatch clock; Logic logic(clock, store, checker, journal_); - logic.addFixedPeer( - "test", beast::IP::Endpoint::from_string("65.0.0.1:5")); { Config c; c.autoConnect = false; @@ -216,33 +210,202 @@ public: logic.config(c); } - auto const list = logic.autoconnect(); - if (BEAST_EXPECT(!list.empty())) + auto const remote = beast::IP::Endpoint::from_string("65.0.0.1:5"); + auto const local = beast::IP::Endpoint::from_string("65.0.0.2:1024"); + + auto const [slot1, r] = logic.new_inbound_slot(local, remote); + BEAST_EXPECT(slot1 != nullptr); + BEAST_EXPECT(r == Result::success); + BEAST_EXPECT(logic.connectedAddresses_.count(remote.address()) == 1); + + auto const [slot2, r2] = logic.new_outbound_slot(remote); + BEAST_EXPECT(r2 == Result::duplicatePeer); + BEAST_EXPECT(logic.connectedAddresses_.count(remote.address()) == 1); + if (!BEAST_EXPECT(slot2 == nullptr)) + logic.on_closed(slot2); + logic.on_closed(slot1); + } + + void + test_peerLimitExceeded() + { + testcase("peer limit exceeded"); + TestStore store; + TestChecker checker; + TestStopwatch clock; + Logic logic(clock, store, checker, journal_); { - BEAST_EXPECT(list.size() == 1); - auto const remote = list.front(); - auto const local = - beast::IP::Endpoint::from_string("65.0.0.2:1024"); - auto const slot1 = logic.new_inbound_slot(local, remote); - if (BEAST_EXPECT(slot1 != nullptr)) - { - BEAST_EXPECT( - logic.connectedAddresses_.count(remote.address()) == 1); - auto const slot2 = logic.new_outbound_slot(remote); - BEAST_EXPECT( - logic.connectedAddresses_.count(remote.address()) == 1); - if (!BEAST_EXPECT(slot2 == nullptr)) - logic.on_closed(slot2); - logic.on_closed(slot1); - } + Config c; + c.autoConnect = false; + c.listeningPort = 1024; + c.ipLimit = 2; + logic.config(c); } + + auto const local = beast::IP::Endpoint::from_string("65.0.0.2:1024"); + auto const [slot, r] = logic.new_inbound_slot( + local, beast::IP::Endpoint::from_string("55.104.0.2:1025")); + BEAST_EXPECT(slot != nullptr); + BEAST_EXPECT(r == Result::success); + + auto const [slot1, r1] = logic.new_inbound_slot( + local, beast::IP::Endpoint::from_string("55.104.0.2:1026")); + BEAST_EXPECT(slot1 != nullptr); + BEAST_EXPECT(r1 == Result::success); + + auto const [slot2, r2] = logic.new_inbound_slot( + local, beast::IP::Endpoint::from_string("55.104.0.2:1027")); + BEAST_EXPECT(r2 == Result::ipLimitExceeded); + + if (!BEAST_EXPECT(slot2 == nullptr)) + logic.on_closed(slot2); + logic.on_closed(slot1); + logic.on_closed(slot); + } + + void + test_activate_duplicate_peer() + { + testcase("test activate duplicate peer"); + TestStore store; + TestChecker checker; + TestStopwatch clock; + Logic logic(clock, store, checker, journal_); + { + Config c; + c.autoConnect = false; + c.listeningPort = 1024; + c.ipLimit = 2; + logic.config(c); + } + + auto const local = beast::IP::Endpoint::from_string("65.0.0.2:1024"); + + PublicKey const pk1(randomKeyPair(KeyType::secp256k1).first); + + auto const [slot, rSlot] = logic.new_outbound_slot( + beast::IP::Endpoint::from_string("55.104.0.2:1025")); + BEAST_EXPECT(slot != nullptr); + BEAST_EXPECT(rSlot == Result::success); + + auto const [slot2, r2Slot] = logic.new_outbound_slot( + beast::IP::Endpoint::from_string("55.104.0.2:1026")); + BEAST_EXPECT(slot2 != nullptr); + BEAST_EXPECT(r2Slot == Result::success); + + BEAST_EXPECT(logic.onConnected(slot, local)); + BEAST_EXPECT(logic.onConnected(slot2, local)); + + BEAST_EXPECT(logic.activate(slot, pk1, false) == Result::success); + + // activating a different slot with the same node ID (pk) must fail + BEAST_EXPECT( + logic.activate(slot2, pk1, false) == Result::duplicatePeer); + + logic.on_closed(slot); + + // accept the same key for a new slot after removing the old slot + BEAST_EXPECT(logic.activate(slot2, pk1, false) == Result::success); + logic.on_closed(slot2); + } + + void + test_activate_inbound_disabled() + { + testcase("test activate inbound disabled"); + TestStore store; + TestChecker checker; + TestStopwatch clock; + Logic logic(clock, store, checker, journal_); + { + Config c; + c.autoConnect = false; + c.listeningPort = 1024; + c.ipLimit = 2; + logic.config(c); + } + + PublicKey const pk1(randomKeyPair(KeyType::secp256k1).first); + auto const local = beast::IP::Endpoint::from_string("65.0.0.2:1024"); + + auto const [slot, rSlot] = logic.new_inbound_slot( + local, beast::IP::Endpoint::from_string("55.104.0.2:1025")); + BEAST_EXPECT(slot != nullptr); + BEAST_EXPECT(rSlot == Result::success); + + BEAST_EXPECT( + logic.activate(slot, pk1, false) == Result::inboundDisabled); + + { + Config c; + c.autoConnect = false; + c.listeningPort = 1024; + c.ipLimit = 2; + c.inPeers = 1; + logic.config(c); + } + // new inbound slot must succeed when inbound connections are enabled + BEAST_EXPECT(logic.activate(slot, pk1, false) == Result::success); + + // creating a new inbound slot must succeed as IP Limit is not exceeded + auto const [slot2, r2Slot] = logic.new_inbound_slot( + local, beast::IP::Endpoint::from_string("55.104.0.2:1026")); + BEAST_EXPECT(slot2 != nullptr); + BEAST_EXPECT(r2Slot == Result::success); + + PublicKey const pk2(randomKeyPair(KeyType::secp256k1).first); + + // an inbound slot exceeding inPeers limit must fail + BEAST_EXPECT(logic.activate(slot2, pk2, false) == Result::full); + + logic.on_closed(slot2); + logic.on_closed(slot); + } + + void + test_addFixedPeer_no_port() + { + testcase("test addFixedPeer no port"); + TestStore store; + TestChecker checker; + TestStopwatch clock; + Logic logic(clock, store, checker, journal_); + try + { + logic.addFixedPeer( + "test", beast::IP::Endpoint::from_string("65.0.0.2")); + fail("invalid endpoint successfully added"); + } + catch (std::runtime_error const& e) + { + pass(); + } + } + + void + test_onConnected_self_connection() + { + testcase("test onConnected self connection"); + TestStore store; + TestChecker checker; + TestStopwatch clock; + Logic logic(clock, store, checker, journal_); + + auto const local = beast::IP::Endpoint::from_string("65.0.0.2:1234"); + auto const [slot, r] = logic.new_outbound_slot(local); + BEAST_EXPECT(slot != nullptr); + BEAST_EXPECT(r == Result::success); + + // Must fail when a slot is to our own IP address + BEAST_EXPECT(!logic.onConnected(slot, local)); + logic.on_closed(slot); } void test_config() { - // if peers_max is configured then peers_in_max and peers_out_max are - // ignored + // if peers_max is configured then peers_in_max and peers_out_max + // are ignored auto run = [&](std::string const& test, std::optional maxPeers, std::optional maxIn, @@ -282,13 +445,21 @@ public: Counts counts; counts.onConfig(config); BEAST_EXPECT( - counts.out_max() == expectOut && - counts.inboundSlots() == expectIn && + counts.out_max() == expectOut && counts.in_max() == expectIn && config.ipLimit == expectIpLimit); + + TestStore store; + TestChecker checker; + TestStopwatch clock; + Logic logic(clock, store, checker, journal_); + logic.config(config); + + BEAST_EXPECT(logic.config() == config); }; // if max_peers == 0 => maxPeers = 21, - // else if max_peers < 10 => maxPeers = 10 else maxPeers = max_peers + // else if max_peers < 10 => maxPeers = 10 else maxPeers = + // max_peers // expectOut => if legacy => max(0.15 * maxPeers, 10), // if legacy && !wantIncoming => maxPeers else max_out_peers // expectIn => if legacy && wantIncoming => maxPeers - outPeers @@ -364,6 +535,11 @@ public: test_duplicateInOut(); test_config(); test_invalid_config(); + test_peerLimitExceeded(); + test_activate_duplicate_peer(); + test_activate_inbound_disabled(); + test_addFixedPeer_no_port(); + test_onConnected_self_connection(); } }; diff --git a/src/xrpld/overlay/detail/OverlayImpl.cpp b/src/xrpld/overlay/detail/OverlayImpl.cpp index f2c683b69f..8d295faace 100644 --- a/src/xrpld/overlay/detail/OverlayImpl.cpp +++ b/src/xrpld/overlay/detail/OverlayImpl.cpp @@ -195,14 +195,16 @@ OverlayImpl::onHandoff( if (consumer.disconnect(journal)) return handoff; - auto const slot = m_peerFinder->new_inbound_slot( + auto const [slot, result] = m_peerFinder->new_inbound_slot( beast::IPAddressConversion::from_asio(local_endpoint), beast::IPAddressConversion::from_asio(remote_endpoint)); if (slot == nullptr) { - // self-connect, close + // connection refused either IP limit exceeded or self-connect handoff.moved = false; + JLOG(journal.debug()) + << "Peer " << remote_endpoint << " refused, " << to_string(result); return handoff; } @@ -402,10 +404,11 @@ OverlayImpl::connect(beast::IP::Endpoint const& remote_endpoint) return; } - auto const slot = peerFinder().new_outbound_slot(remote_endpoint); + auto const [slot, result] = peerFinder().new_outbound_slot(remote_endpoint); if (slot == nullptr) { - JLOG(journal_.debug()) << "Connect: No slot for " << remote_endpoint; + JLOG(journal_.debug()) << "Connect: No slot for " << remote_endpoint + << ": " << to_string(result); return; } diff --git a/src/xrpld/peerfinder/PeerfinderManager.h b/src/xrpld/peerfinder/PeerfinderManager.h index f399251c38..b7ea738a81 100644 --- a/src/xrpld/peerfinder/PeerfinderManager.h +++ b/src/xrpld/peerfinder/PeerfinderManager.h @@ -109,6 +109,9 @@ struct Config std::uint16_t port, bool validationPublicKey, int ipLimit); + + friend bool + operator==(Config const& lhs, Config const& rhs); }; //------------------------------------------------------------------------------ @@ -136,7 +139,13 @@ using Endpoints = std::vector; //------------------------------------------------------------------------------ /** Possible results from activating a slot. */ -enum class Result { duplicate, full, success }; +enum class Result { + inboundDisabled, + duplicatePeer, + ipLimitExceeded, + full, + success +}; /** * @brief Converts a `Result` enum value to its string representation. @@ -157,12 +166,16 @@ to_string(Result result) noexcept { switch (result) { - case Result::success: - return "success"; - case Result::duplicate: - return "duplicate connection"; + case Result::inboundDisabled: + return "inbound disabled"; + case Result::duplicatePeer: + return "peer already connected"; + case Result::ipLimitExceeded: + return "ip limit exceeded"; case Result::full: return "slots full"; + case Result::success: + return "success"; } return "unknown"; @@ -234,7 +247,7 @@ public: If nullptr is returned, then the slot could not be assigned. Usually this is because of a detected self-connection. */ - virtual std::shared_ptr + virtual std::pair, Result> new_inbound_slot( beast::IP::Endpoint const& local_endpoint, beast::IP::Endpoint const& remote_endpoint) = 0; @@ -243,7 +256,7 @@ public: If nullptr is returned, then the slot could not be assigned. Usually this is because of a duplicate connection. */ - virtual std::shared_ptr + virtual std::pair, Result> new_outbound_slot(beast::IP::Endpoint const& remote_endpoint) = 0; /** Called when mtENDPOINTS is received. */ diff --git a/src/xrpld/peerfinder/detail/Counts.h b/src/xrpld/peerfinder/detail/Counts.h index c91b27b026..821431c5bb 100644 --- a/src/xrpld/peerfinder/detail/Counts.h +++ b/src/xrpld/peerfinder/detail/Counts.h @@ -163,7 +163,7 @@ public: /** Returns the total number of inbound slots. */ int - inboundSlots() const + in_max() const { return m_in_max; } diff --git a/src/xrpld/peerfinder/detail/Logic.h b/src/xrpld/peerfinder/detail/Logic.h index e23bbc29e1..4b92a1d143 100644 --- a/src/xrpld/peerfinder/detail/Logic.h +++ b/src/xrpld/peerfinder/detail/Logic.h @@ -172,9 +172,7 @@ public: void addFixedPeer(std::string const& name, beast::IP::Endpoint const& ep) { - std::vector v; - v.push_back(ep); - addFixedPeer(name, v); + addFixedPeer(name, std::vector{ep}); } void @@ -261,7 +259,7 @@ public: //-------------------------------------------------------------------------- - SlotImp::ptr + std::pair new_inbound_slot( beast::IP::Endpoint const& local_endpoint, beast::IP::Endpoint const& remote_endpoint) @@ -277,12 +275,12 @@ public: { auto const count = connectedAddresses_.count(remote_endpoint.address()); - if (count > config_.ipLimit) + if (count + 1 > config_.ipLimit) { JLOG(m_journal.debug()) << beast::leftw(18) << "Logic dropping inbound " << remote_endpoint << " because of ip limits."; - return SlotImp::ptr(); + return {SlotImp::ptr(), Result::ipLimitExceeded}; } } @@ -292,7 +290,7 @@ public: JLOG(m_journal.debug()) << beast::leftw(18) << "Logic dropping " << remote_endpoint << " as duplicate incoming"; - return SlotImp::ptr(); + return {SlotImp::ptr(), Result::duplicatePeer}; } // Create the slot @@ -314,11 +312,11 @@ public: // Update counts counts_.add(*slot); - return result.first->second; + return {result.first->second, Result::success}; } // Can't check for self-connect because we don't know the local endpoint - SlotImp::ptr + std::pair new_outbound_slot(beast::IP::Endpoint const& remote_endpoint) { JLOG(m_journal.debug()) @@ -332,7 +330,7 @@ public: JLOG(m_journal.debug()) << beast::leftw(18) << "Logic dropping " << remote_endpoint << " as duplicate connect"; - return SlotImp::ptr(); + return {SlotImp::ptr(), Result::duplicatePeer}; } // Create the slot @@ -353,7 +351,7 @@ public: // Update counts counts_.add(*slot); - return result.first->second; + return {result.first->second, Result::success}; } bool @@ -417,7 +415,7 @@ public: // Check for duplicate connection by key if (keys_.find(key) != keys_.end()) - return Result::duplicate; + return Result::duplicatePeer; // If the peer belongs to a cluster or is reserved, // update the slot to reflect that. @@ -430,6 +428,8 @@ public: { if (!slot->inbound()) bootcache_.on_success(slot->remote_endpoint()); + if (slot->inbound() && counts_.in_max() == 0) + return Result::inboundDisabled; return Result::full; } @@ -651,7 +651,7 @@ public: // 2. We have slots // 3. We haven't failed the firewalled test // - if (config_.wantIncoming && counts_.inboundSlots() > 0) + if (config_.wantIncoming && counts_.in_max() > 0) { Endpoint ep; ep.hops = 0; diff --git a/src/xrpld/peerfinder/detail/PeerfinderConfig.cpp b/src/xrpld/peerfinder/detail/PeerfinderConfig.cpp index 3075224189..30eb778770 100644 --- a/src/xrpld/peerfinder/detail/PeerfinderConfig.cpp +++ b/src/xrpld/peerfinder/detail/PeerfinderConfig.cpp @@ -34,6 +34,17 @@ Config::Config() { } +bool +operator==(Config const& lhs, Config const& rhs) +{ + return lhs.autoConnect == rhs.autoConnect && + lhs.peerPrivate == rhs.peerPrivate && + lhs.wantIncoming == rhs.wantIncoming && lhs.inPeers == rhs.inPeers && + lhs.maxPeers == rhs.maxPeers && lhs.outPeers == rhs.outPeers && + lhs.features == lhs.features && lhs.ipLimit == rhs.ipLimit && + lhs.listeningPort == rhs.listeningPort; +} + std::size_t Config::calcOutPeers() const { diff --git a/src/xrpld/peerfinder/detail/PeerfinderManager.cpp b/src/xrpld/peerfinder/detail/PeerfinderManager.cpp index 205df67fa6..462820cca2 100644 --- a/src/xrpld/peerfinder/detail/PeerfinderManager.cpp +++ b/src/xrpld/peerfinder/detail/PeerfinderManager.cpp @@ -125,7 +125,7 @@ public: //-------------------------------------------------------------------------- - std::shared_ptr + std::pair, Result> new_inbound_slot( beast::IP::Endpoint const& local_endpoint, beast::IP::Endpoint const& remote_endpoint) override @@ -133,7 +133,7 @@ public: return m_logic.new_inbound_slot(local_endpoint, remote_endpoint); } - std::shared_ptr + std::pair, Result> new_outbound_slot(beast::IP::Endpoint const& remote_endpoint) override { return m_logic.new_outbound_slot(remote_endpoint); From e0b9812fc590fb87796b8dae7253aeca6f1737e7 Mon Sep 17 00:00:00 2001 From: Mayukha Vadari Date: Fri, 29 Aug 2025 15:52:09 -0400 Subject: [PATCH 149/244] Refactor `ledger_entry` RPC source code and tests (#5237) This is a major refactor of LedgerEntry.cpp. It adds a number of helper functions to make the code easier to maintain. It also splits up the ledger and ledger_entry tests into different files, and cleans up the ledger_entry tests to make them easier to write and maintain. This refactor also caught a few bugs in some of the other RPC processing, so those are fixed along the way. --- include/xrpl/protocol/ErrorCodes.h | 7 +- include/xrpl/protocol/jss.h | 4 + src/libxrpl/json/json_value.cpp | 5 +- src/libxrpl/protocol/ErrorCodes.cpp | 5 +- src/libxrpl/protocol/STXChainBridge.cpp | 19 +- src/test/app/Vault_test.cpp | 12 - src/test/jtx/impl/xchain_bridge.cpp | 16 +- src/test/rpc/LedgerEntry_test.cpp | 1869 ++++++++----------- src/xrpld/rpc/detail/RPCHelpers.cpp | 17 +- src/xrpld/rpc/handlers/LedgerEntry.cpp | 1171 +++++------- src/xrpld/rpc/handlers/LedgerEntryHelpers.h | 299 +++ 11 files changed, 1577 insertions(+), 1847 deletions(-) create mode 100644 src/xrpld/rpc/handlers/LedgerEntryHelpers.h diff --git a/include/xrpl/protocol/ErrorCodes.h b/include/xrpl/protocol/ErrorCodes.h index f06b927566..5da3ad0b33 100644 --- a/include/xrpl/protocol/ErrorCodes.h +++ b/include/xrpl/protocol/ErrorCodes.h @@ -157,7 +157,12 @@ enum error_code_i { // Pathfinding rpcDOMAIN_MALFORMED = 97, - rpcLAST = rpcDOMAIN_MALFORMED // rpcLAST should always equal the last code. + // ledger_entry + rpcENTRY_NOT_FOUND = 98, + rpcUNEXPECTED_LEDGER_TYPE = 99, + + rpcLAST = + rpcUNEXPECTED_LEDGER_TYPE // rpcLAST should always equal the last code. }; /** Codes returned in the `warnings` array of certain RPC commands. diff --git a/include/xrpl/protocol/jss.h b/include/xrpl/protocol/jss.h index 67a045fa58..68d2497aca 100644 --- a/include/xrpl/protocol/jss.h +++ b/include/xrpl/protocol/jss.h @@ -68,9 +68,13 @@ JSS(Flags); // in/out: TransactionSign; field. JSS(Holder); // field. JSS(Invalid); // JSS(Issuer); // in: Credential transactions +JSS(IssuingChainDoor); // field. +JSS(IssuingChainIssue); // field. JSS(LastLedgerSequence); // in: TransactionSign; field JSS(LastUpdateTime); // field. JSS(LimitAmount); // field. +JSS(LockingChainDoor); // field. +JSS(LockingChainIssue); // field. JSS(NetworkID); // field. JSS(LPTokenOut); // in: AMM Liquidity Provider deposit tokens JSS(LPTokenIn); // in: AMM Liquidity Provider withdraw tokens diff --git a/src/libxrpl/json/json_value.cpp b/src/libxrpl/json/json_value.cpp index a1e0a04875..1df8f6cf31 100644 --- a/src/libxrpl/json/json_value.cpp +++ b/src/libxrpl/json/json_value.cpp @@ -24,6 +24,7 @@ #include #include +#include #include #include #include @@ -685,7 +686,9 @@ Value::isConvertibleTo(ValueType other) const (other == intValue && value_.real_ >= minInt && value_.real_ <= maxInt) || (other == uintValue && value_.real_ >= 0 && - value_.real_ <= maxUInt) || + value_.real_ <= maxUInt && + std::fabs(round(value_.real_) - value_.real_) < + std::numeric_limits::epsilon()) || other == realValue || other == stringValue || other == booleanValue; diff --git a/src/libxrpl/protocol/ErrorCodes.cpp b/src/libxrpl/protocol/ErrorCodes.cpp index 3109f51d05..ec295343ce 100644 --- a/src/libxrpl/protocol/ErrorCodes.cpp +++ b/src/libxrpl/protocol/ErrorCodes.cpp @@ -117,7 +117,10 @@ constexpr static ErrorInfo unorderedErrorInfos[]{ {rpcORACLE_MALFORMED, "oracleMalformed", "Oracle request is malformed.", 400}, {rpcBAD_CREDENTIALS, "badCredentials", "Credentials do not exist, are not accepted, or have expired.", 400}, {rpcTX_SIGNED, "transactionSigned", "Transaction should not be signed.", 400}, - {rpcDOMAIN_MALFORMED, "domainMalformed", "Domain is malformed.", 400}}; + {rpcDOMAIN_MALFORMED, "domainMalformed", "Domain is malformed.", 400}, + {rpcENTRY_NOT_FOUND, "entryNotFound", "Entry not found.", 400}, + {rpcUNEXPECTED_LEDGER_TYPE, "unexpectedLedgerType", "Unexpected ledger type.", 400}, +}; // clang-format on // Sort and validate unorderedErrorInfos at compile time. Should be diff --git a/src/libxrpl/protocol/STXChainBridge.cpp b/src/libxrpl/protocol/STXChainBridge.cpp index fb192d82d6..e835735f08 100644 --- a/src/libxrpl/protocol/STXChainBridge.cpp +++ b/src/libxrpl/protocol/STXChainBridge.cpp @@ -27,6 +27,7 @@ #include #include #include +#include #include @@ -98,12 +99,10 @@ STXChainBridge::STXChainBridge(SField const& name, Json::Value const& v) }; checkExtra(v); - Json::Value const& lockingChainDoorStr = - v[sfLockingChainDoor.getJsonName()]; - Json::Value const& lockingChainIssue = v[sfLockingChainIssue.getJsonName()]; - Json::Value const& issuingChainDoorStr = - v[sfIssuingChainDoor.getJsonName()]; - Json::Value const& issuingChainIssue = v[sfIssuingChainIssue.getJsonName()]; + Json::Value const& lockingChainDoorStr = v[jss::LockingChainDoor]; + Json::Value const& lockingChainIssue = v[jss::LockingChainIssue]; + Json::Value const& issuingChainDoorStr = v[jss::IssuingChainDoor]; + Json::Value const& issuingChainIssue = v[jss::IssuingChainIssue]; if (!lockingChainDoorStr.isString()) { @@ -161,10 +160,10 @@ Json::Value STXChainBridge::getJson(JsonOptions jo) const { Json::Value v; - v[sfLockingChainDoor.getJsonName()] = lockingChainDoor_.getJson(jo); - v[sfLockingChainIssue.getJsonName()] = lockingChainIssue_.getJson(jo); - v[sfIssuingChainDoor.getJsonName()] = issuingChainDoor_.getJson(jo); - v[sfIssuingChainIssue.getJsonName()] = issuingChainIssue_.getJson(jo); + v[jss::LockingChainDoor] = lockingChainDoor_.getJson(jo); + v[jss::LockingChainIssue] = lockingChainIssue_.getJson(jo); + v[jss::IssuingChainDoor] = issuingChainDoor_.getJson(jo); + v[jss::IssuingChainIssue] = issuingChainIssue_.getJson(jo); return v; } diff --git a/src/test/app/Vault_test.cpp b/src/test/app/Vault_test.cpp index 7ea38db2b1..7add8b3eda 100644 --- a/src/test/app/Vault_test.cpp +++ b/src/test/app/Vault_test.cpp @@ -3028,18 +3028,6 @@ class Vault_test : public beast::unit_test::suite "malformedRequest"); } - { - testcase("RPC ledger_entry zero seq"); - Json::Value jvParams; - jvParams[jss::ledger_index] = jss::validated; - jvParams[jss::vault][jss::owner] = issuer.human(); - jvParams[jss::vault][jss::seq] = 0; - auto jvVault = env.rpc("json", "ledger_entry", to_string(jvParams)); - BEAST_EXPECT( - jvVault[jss::result][jss::error].asString() == - "malformedRequest"); - } - { testcase("RPC ledger_entry negative seq"); Json::Value jvParams; diff --git a/src/test/jtx/impl/xchain_bridge.cpp b/src/test/jtx/impl/xchain_bridge.cpp index 6f167d7508..9e8fa4795f 100644 --- a/src/test/jtx/impl/xchain_bridge.cpp +++ b/src/test/jtx/impl/xchain_bridge.cpp @@ -44,10 +44,10 @@ bridge( Issue const& issuingChainIssue) { Json::Value jv; - jv[sfLockingChainDoor.getJsonName()] = lockingChainDoor.human(); - jv[sfLockingChainIssue.getJsonName()] = to_json(lockingChainIssue); - jv[sfIssuingChainDoor.getJsonName()] = issuingChainDoor.human(); - jv[sfIssuingChainIssue.getJsonName()] = to_json(issuingChainIssue); + jv[jss::LockingChainDoor] = lockingChainDoor.human(); + jv[jss::LockingChainIssue] = to_json(lockingChainIssue); + jv[jss::IssuingChainDoor] = issuingChainDoor.human(); + jv[jss::IssuingChainIssue] = to_json(issuingChainIssue); return jv; } @@ -60,10 +60,10 @@ bridge_rpc( Issue const& issuingChainIssue) { Json::Value jv; - jv[sfLockingChainDoor.getJsonName()] = lockingChainDoor.human(); - jv[sfLockingChainIssue.getJsonName()] = to_json(lockingChainIssue); - jv[sfIssuingChainDoor.getJsonName()] = issuingChainDoor.human(); - jv[sfIssuingChainIssue.getJsonName()] = to_json(issuingChainIssue); + jv[jss::LockingChainDoor] = lockingChainDoor.human(); + jv[jss::LockingChainIssue] = to_json(lockingChainIssue); + jv[jss::IssuingChainDoor] = issuingChainDoor.human(); + jv[jss::IssuingChainIssue] = to_json(issuingChainIssue); return jv; } diff --git a/src/test/rpc/LedgerEntry_test.cpp b/src/test/rpc/LedgerEntry_test.cpp index 89cb7b72eb..a88f6ab612 100644 --- a/src/test/rpc/LedgerEntry_test.cpp +++ b/src/test/rpc/LedgerEntry_test.cpp @@ -31,40 +31,435 @@ #include #include +#if (defined(__clang_major__) && __clang_major__ < 15) +#include +using source_location = std::experimental::source_location; +#else +#include +using std::source_location; +#endif namespace ripple { namespace test { +enum class FieldType { + AccountField, + BlobField, + ArrayField, + CurrencyField, + HashField, + HashOrObjectField, + ObjectField, + StringField, + TwoAccountArrayField, + UInt32Field, + UInt64Field, +}; + +std::vector> mappings{ + {jss::account, FieldType::AccountField}, + {jss::accounts, FieldType::TwoAccountArrayField}, + {jss::authorize, FieldType::AccountField}, + {jss::authorized, FieldType::AccountField}, + {jss::credential_type, FieldType::BlobField}, + {jss::currency, FieldType::CurrencyField}, + {jss::issuer, FieldType::AccountField}, + {jss::oracle_document_id, FieldType::UInt32Field}, + {jss::owner, FieldType::AccountField}, + {jss::seq, FieldType::UInt32Field}, + {jss::subject, FieldType::AccountField}, + {jss::ticket_seq, FieldType::UInt32Field}, +}; + +FieldType +getFieldType(Json::StaticString fieldName) +{ + auto it = std::ranges::find_if(mappings, [&fieldName](auto const& pair) { + return pair.first == fieldName; + }); + if (it != mappings.end()) + { + return it->second; + } + else + { + Throw( + "`mappings` is missing field " + std::string(fieldName.c_str())); + } +} + +std::string +getTypeName(FieldType typeID) +{ + switch (typeID) + { + case FieldType::UInt32Field: + return "number"; + case FieldType::UInt64Field: + return "number"; + case FieldType::HashField: + return "hex string"; + case FieldType::AccountField: + return "AccountID"; + case FieldType::BlobField: + return "hex string"; + case FieldType::CurrencyField: + return "Currency"; + case FieldType::ArrayField: + return "array"; + case FieldType::HashOrObjectField: + return "hex string or object"; + case FieldType::TwoAccountArrayField: + return "length-2 array of Accounts"; + default: + Throw( + "unknown type " + std::to_string(static_cast(typeID))); + } +} + class LedgerEntry_test : public beast::unit_test::suite { void checkErrorValue( Json::Value const& jv, std::string const& err, - std::string const& msg) + std::string const& msg, + source_location const location = source_location::current()) { if (BEAST_EXPECT(jv.isMember(jss::status))) - BEAST_EXPECT(jv[jss::status] == "error"); + BEAST_EXPECTS( + jv[jss::status] == "error", std::to_string(location.line())); if (BEAST_EXPECT(jv.isMember(jss::error))) - BEAST_EXPECT(jv[jss::error] == err); + BEAST_EXPECTS( + jv[jss::error] == err, + "Expected error " + err + ", received " + + jv[jss::error].asString() + ", at line " + + std::to_string(location.line()) + ", " + + jv.toStyledString()); if (msg.empty()) { - BEAST_EXPECT( + BEAST_EXPECTS( jv[jss::error_message] == Json::nullValue || - jv[jss::error_message] == ""); + jv[jss::error_message] == "", + "Expected no error message, received \"" + + jv[jss::error_message].asString() + "\", at line " + + std::to_string(location.line()) + ", " + + jv.toStyledString()); } else if (BEAST_EXPECT(jv.isMember(jss::error_message))) - BEAST_EXPECT(jv[jss::error_message] == msg); + BEAST_EXPECTS( + jv[jss::error_message] == msg, + "Expected error message \"" + msg + "\", received \"" + + jv[jss::error_message].asString() + "\", at line " + + std::to_string(location.line()) + ", " + + jv.toStyledString()); } - // Corrupt a valid address by replacing the 10th character with '!'. - // '!' is not part of the ripple alphabet. - std::string - makeBadAddress(std::string good) + std::vector + getBadValues(FieldType fieldType) { - std::string ret = std::move(good); - ret.replace(10, 1, 1, '!'); - return ret; + static Json::Value const injectObject = []() { + Json::Value obj(Json::objectValue); + obj[jss::account] = "rhigTLJJyXXSRUyRCQtqi1NoAZZzZnS4KU"; + obj[jss::ledger_index] = "validated"; + return obj; + }(); + static Json::Value const injectArray = []() { + Json::Value arr(Json::arrayValue); + arr[0u] = "rhigTLJJyXXSRUyRCQtqi1NoAZZzZnS4KU"; + arr[1u] = "validated"; + return arr; + }(); + static std::array const allBadValues = { + "", // 0 + true, // 1 + 1, // 2 + "1", // 3 + -1, // 4 + 1.1, // 5 + "-1", // 6 + "abcdef", // 7 + "ABCDEF", // 8 + "12KK", // 9 + "0123456789ABCDEFGH", // 10 + "rJxKV9e9p6wiPw!!!!xrJ4X1n98LosPL1sgcJW", // 11 + "rPSTrR5yEr11uMkfsz1kHCp9jK4aoa3Avv", // 12 + "n9K2isxwTxcSHJKxMkJznDoWXAUs7NNy49H9Fknz1pC7oHAH3kH9", // 13 + "USD", // 14 + "USDollars", // 15 + "5233D68B4D44388F98559DE42903767803EFA7C1F8D01413FC16EE6B01403D" + "6D", // 16 + Json::arrayValue, // 17 + Json::objectValue, // 18 + injectObject, // 19 + injectArray // 20 + }; + + auto remove = + [&](std::vector indices) -> std::vector { + std::unordered_set indexSet( + indices.begin(), indices.end()); + std::vector values; + values.reserve(allBadValues.size() - indexSet.size()); + for (std::size_t i = 0; i < allBadValues.size(); ++i) + { + if (indexSet.find(i) == indexSet.end()) + { + values.push_back(allBadValues[i]); + } + } + return values; + }; + + static auto const& badUInt32Values = remove({2, 3}); + static auto const& badUInt64Values = remove({2, 3}); + static auto const& badHashValues = remove({2, 3, 7, 8, 16}); + static auto const& badAccountValues = remove({12}); + static auto const& badBlobValues = remove({3, 7, 8, 16}); + static auto const& badCurrencyValues = remove({14}); + static auto const& badArrayValues = remove({17, 20}); + static auto const& badIndexValues = remove({12, 16, 18, 19}); + + switch (fieldType) + { + case FieldType::UInt32Field: + return badUInt32Values; + case FieldType::UInt64Field: + return badUInt64Values; + case FieldType::HashField: + return badHashValues; + case FieldType::AccountField: + return badAccountValues; + case FieldType::BlobField: + return badBlobValues; + case FieldType::CurrencyField: + return badCurrencyValues; + case FieldType::ArrayField: + case FieldType::TwoAccountArrayField: + return badArrayValues; + case FieldType::HashOrObjectField: + return badIndexValues; + default: + Throw( + "unknown type " + + std::to_string(static_cast(fieldType))); + } + } + + Json::Value + getCorrectValue(Json::StaticString fieldName) + { + static Json::Value const twoAccountArray = []() { + Json::Value arr(Json::arrayValue); + arr[0u] = "rhigTLJJyXXSRUyRCQtqi1NoAZZzZnS4KU"; + arr[1u] = "r4MrUGTdB57duTnRs6KbsRGQXgkseGb1b5"; + return arr; + }(); + + auto const typeID = getFieldType(fieldName); + switch (typeID) + { + case FieldType::UInt32Field: + return 1; + case FieldType::UInt64Field: + return 1; + case FieldType::HashField: + return "5233D68B4D44388F98559DE42903767803EFA7C1F8D01413FC16EE6" + "B01403D6D"; + case FieldType::AccountField: + return "r4MrUGTdB57duTnRs6KbsRGQXgkseGb1b5"; + case FieldType::BlobField: + return "ABCDEF"; + case FieldType::CurrencyField: + return "USD"; + case FieldType::ArrayField: + return Json::arrayValue; + case FieldType::HashOrObjectField: + return "5233D68B4D44388F98559DE42903767803EFA7C1F8D01413FC16EE6" + "B01403D6D"; + case FieldType::TwoAccountArrayField: + return twoAccountArray; + default: + Throw( + "unknown type " + + std::to_string(static_cast(typeID))); + } + } + + void + testMalformedField( + test::jtx::Env& env, + Json::Value correctRequest, + Json::StaticString const fieldName, + FieldType const typeID, + std::string const& expectedError, + bool required = true, + source_location const location = source_location::current()) + { + forAllApiVersions([&, this](unsigned apiVersion) { + if (required) + { + correctRequest.removeMember(fieldName); + Json::Value const jrr = env.rpc( + apiVersion, + "json", + "ledger_entry", + to_string(correctRequest))[jss::result]; + if (apiVersion < 2u) + checkErrorValue(jrr, "unknownOption", "", location); + else + checkErrorValue( + jrr, + "invalidParams", + "No ledger_entry params provided.", + location); + } + auto tryField = [&](Json::Value fieldValue) -> void { + correctRequest[fieldName] = fieldValue; + Json::Value const jrr = env.rpc( + apiVersion, + "json", + "ledger_entry", + to_string(correctRequest))[jss::result]; + auto const expectedErrMsg = + RPC::expected_field_message(fieldName, getTypeName(typeID)); + checkErrorValue(jrr, expectedError, expectedErrMsg, location); + }; + + auto const& badValues = getBadValues(typeID); + for (auto const& value : badValues) + { + tryField(value); + } + if (required) + { + tryField(Json::nullValue); + } + }); + } + + void + testMalformedSubfield( + test::jtx::Env& env, + Json::Value correctRequest, + Json::StaticString parentFieldName, + Json::StaticString fieldName, + FieldType typeID, + std::string const& expectedError, + bool required = true, + source_location const location = source_location::current()) + { + forAllApiVersions([&, this](unsigned apiVersion) { + if (required) + { + correctRequest[parentFieldName].removeMember(fieldName); + Json::Value const jrr = env.rpc( + apiVersion, + "json", + "ledger_entry", + to_string(correctRequest))[jss::result]; + checkErrorValue( + jrr, + "malformedRequest", + RPC::missing_field_message(fieldName.c_str()), + location); + + correctRequest[parentFieldName][fieldName] = Json::nullValue; + Json::Value const jrr2 = env.rpc( + apiVersion, + "json", + "ledger_entry", + to_string(correctRequest))[jss::result]; + checkErrorValue( + jrr2, + "malformedRequest", + RPC::missing_field_message(fieldName.c_str()), + location); + } + auto tryField = [&](Json::Value fieldValue) -> void { + correctRequest[parentFieldName][fieldName] = fieldValue; + + Json::Value const jrr = env.rpc( + apiVersion, + "json", + "ledger_entry", + to_string(correctRequest))[jss::result]; + checkErrorValue( + jrr, + expectedError, + RPC::expected_field_message(fieldName, getTypeName(typeID)), + location); + }; + + auto const& badValues = getBadValues(typeID); + for (auto const& value : badValues) + { + tryField(value); + } + }); + } + + // No subfields + void + runLedgerEntryTest( + test::jtx::Env& env, + Json::StaticString const& parentField, + source_location const location = source_location::current()) + { + testMalformedField( + env, + Json::Value{}, + parentField, + FieldType::HashField, + "malformedRequest", + true, + location); + } + + struct Subfield + { + Json::StaticString fieldName; + std::string malformedErrorMsg; + bool required = true; + }; + + void + runLedgerEntryTest( + test::jtx::Env& env, + Json::StaticString const& parentField, + std::vector const& subfields, + source_location const location = source_location::current()) + { + testMalformedField( + env, + Json::Value{}, + parentField, + FieldType::HashOrObjectField, + "malformedRequest", + true, + location); + + Json::Value correctOutput; + correctOutput[parentField] = Json::objectValue; + for (auto const& subfield : subfields) + { + correctOutput[parentField][subfield.fieldName] = + getCorrectValue(subfield.fieldName); + } + + for (auto const& subfield : subfields) + { + auto const fieldType = getFieldType(subfield.fieldName); + testMalformedSubfield( + env, + correctOutput, + parentField, + subfield.fieldName, + fieldType, + subfield.malformedErrorMsg, + subfield.required, + location); + } } void @@ -76,7 +471,6 @@ class LedgerEntry_test : public beast::unit_test::suite Account const alice{"alice"}; env.fund(XRP(10000), alice); env.close(); - { // Missing ledger_entry ledger_hash Json::Value jvParams; @@ -88,6 +482,33 @@ class LedgerEntry_test : public beast::unit_test::suite "json", "ledger_entry", to_string(jvParams))[jss::result]; checkErrorValue(jrr, "lgrNotFound", "ledgerNotFound"); } + { + // Missing ledger_entry ledger_hash + Json::Value jvParams; + jvParams[jss::account_root] = alice.human(); + auto const typeId = FieldType::HashField; + + forAllApiVersions([&, this](unsigned apiVersion) { + auto tryField = [&](Json::Value fieldValue) -> void { + jvParams[jss::ledger_hash] = fieldValue; + Json::Value const jrr = env.rpc( + apiVersion, + "json", + "ledger_entry", + to_string(jvParams))[jss::result]; + auto const expectedErrMsg = fieldValue.isString() + ? "ledgerHashMalformed" + : "ledgerHashNotString"; + checkErrorValue(jrr, "invalidParams", expectedErrMsg); + }; + + auto const& badValues = getBadValues(typeId); + for (auto const& value : badValues) + { + tryField(value); + } + }); + } { // ask for an zero index @@ -95,17 +516,38 @@ class LedgerEntry_test : public beast::unit_test::suite jvParams[jss::ledger_index] = "validated"; jvParams[jss::index] = "00000000000000000000000000000000000000000000000000000000000000" - "0000"; + "00"; auto const jrr = env.rpc( "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); + checkErrorValue(jrr, "entryNotFound", "Entry not found."); } + + forAllApiVersions([&, this](unsigned apiVersion) { + // "features" is not an option supported by ledger_entry. + { + Json::Value jvParams = Json::objectValue; + jvParams[jss::features] = + "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + "AAAAAAAAAA"; + jvParams[jss::api_version] = apiVersion; + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + + if (apiVersion < 2u) + checkErrorValue(jrr, "unknownOption", ""); + else + checkErrorValue( + jrr, + "invalidParams", + "No ledger_entry params provided."); + } + }); } void testLedgerEntryAccountRoot() { - testcase("ledger_entry Request AccountRoot"); + testcase("AccountRoot"); using namespace test::jtx; auto cfg = envconfig(); @@ -176,13 +618,26 @@ class LedgerEntry_test : public beast::unit_test::suite BEAST_EXPECT(jrr[jss::node][sfBalance.jsonName] == "10000000000"); } { - // Request using a corrupted AccountID. + // Check alias Json::Value jvParams; - jvParams[jss::account_root] = makeBadAddress(alice.human()); + jvParams[jss::account] = alice.human(); jvParams[jss::ledger_hash] = ledgerHash; Json::Value const jrr = env.rpc( "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedAddress", ""); + BEAST_EXPECT(jrr.isMember(jss::node)); + BEAST_EXPECT(jrr[jss::node][jss::Account] == alice.human()); + BEAST_EXPECT(jrr[jss::node][sfBalance.jsonName] == "10000000000"); + accountRootIndex = jrr[jss::index].asString(); + } + { + // Check malformed cases + Json::Value jvParams; + testMalformedField( + env, + jvParams, + jss::account_root, + FieldType::AccountField, + "malformedAddress"); } { // Request an account that is not in the ledger. @@ -191,14 +646,14 @@ class LedgerEntry_test : public beast::unit_test::suite jvParams[jss::ledger_hash] = ledgerHash; Json::Value const jrr = env.rpc( "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "entryNotFound", ""); + checkErrorValue(jrr, "entryNotFound", "Entry not found."); } } void testLedgerEntryCheck() { - testcase("ledger_entry Request Check"); + testcase("Check"); using namespace test::jtx; Env env{*this}; Account const alice{"alice"}; @@ -238,14 +693,19 @@ class LedgerEntry_test : public beast::unit_test::suite jvParams[jss::ledger_hash] = ledgerHash; Json::Value const jrr = env.rpc( "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "unexpectedLedgerType", ""); + checkErrorValue( + jrr, "unexpectedLedgerType", "Unexpected ledger type."); + } + { + // Check malformed cases + runLedgerEntryTest(env, jss::check); } } void testLedgerEntryCredentials() { - testcase("ledger_entry credentials"); + testcase("Credentials"); using namespace test::jtx; @@ -287,163 +747,33 @@ class LedgerEntry_test : public beast::unit_test::suite jss::Credential); } - { - // Fail, index not a hash - auto const jv = credentials::ledgerEntry(env, ""); - checkErrorValue(jv[jss::result], "malformedRequest", ""); - } - { // Fail, credential doesn't exist auto const jv = credentials::ledgerEntry( env, "48004829F915654A81B11C4AB8218D96FED67F209B58328A72314FB6EA288B" "E4"); - checkErrorValue(jv[jss::result], "entryNotFound", ""); + checkErrorValue( + jv[jss::result], "entryNotFound", "Entry not found."); } { - // Fail, invalid subject - Json::Value jv; - jv[jss::ledger_index] = jss::validated; - jv[jss::credential][jss::subject] = 42; - jv[jss::credential][jss::issuer] = issuer.human(); - jv[jss::credential][jss::credential_type] = - strHex(std::string_view(credType)); - auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - - { - // Fail, invalid issuer - Json::Value jv; - jv[jss::ledger_index] = jss::validated; - jv[jss::credential][jss::subject] = alice.human(); - jv[jss::credential][jss::issuer] = 42; - jv[jss::credential][jss::credential_type] = - strHex(std::string_view(credType)); - auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - - { - // Fail, invalid credentials type - Json::Value jv; - jv[jss::ledger_index] = jss::validated; - jv[jss::credential][jss::subject] = alice.human(); - jv[jss::credential][jss::issuer] = issuer.human(); - jv[jss::credential][jss::credential_type] = 42; - auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - - { - // Fail, empty subject - Json::Value jv; - jv[jss::ledger_index] = jss::validated; - jv[jss::credential][jss::subject] = ""; - jv[jss::credential][jss::issuer] = issuer.human(); - jv[jss::credential][jss::credential_type] = - strHex(std::string_view(credType)); - auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - - { - // Fail, empty issuer - Json::Value jv; - jv[jss::ledger_index] = jss::validated; - jv[jss::credential][jss::subject] = alice.human(); - jv[jss::credential][jss::issuer] = ""; - jv[jss::credential][jss::credential_type] = - strHex(std::string_view(credType)); - auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - - { - // Fail, empty credentials type - Json::Value jv; - jv[jss::ledger_index] = jss::validated; - jv[jss::credential][jss::subject] = alice.human(); - jv[jss::credential][jss::issuer] = issuer.human(); - jv[jss::credential][jss::credential_type] = ""; - auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - - { - // Fail, no subject - Json::Value jv; - jv[jss::ledger_index] = jss::validated; - jv[jss::credential][jss::issuer] = issuer.human(); - jv[jss::credential][jss::credential_type] = - strHex(std::string_view(credType)); - auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - - { - // Fail, no issuer - Json::Value jv; - jv[jss::ledger_index] = jss::validated; - jv[jss::credential][jss::subject] = alice.human(); - jv[jss::credential][jss::credential_type] = - strHex(std::string_view(credType)); - auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - - { - // Fail, no credentials type - Json::Value jv; - jv[jss::ledger_index] = jss::validated; - jv[jss::credential][jss::subject] = alice.human(); - jv[jss::credential][jss::issuer] = issuer.human(); - auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - - { - // Fail, not AccountID subject - Json::Value jv; - jv[jss::ledger_index] = jss::validated; - jv[jss::credential][jss::subject] = "wehsdbvasbdfvj"; - jv[jss::credential][jss::issuer] = issuer.human(); - jv[jss::credential][jss::credential_type] = - strHex(std::string_view(credType)); - auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - - { - // Fail, not AccountID issuer - Json::Value jv; - jv[jss::ledger_index] = jss::validated; - jv[jss::credential][jss::subject] = alice.human(); - jv[jss::credential][jss::issuer] = "c4p93ugndfbsiu"; - jv[jss::credential][jss::credential_type] = - strHex(std::string_view(credType)); - auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - - { - // Fail, credentials type isn't hex encoded - Json::Value jv; - jv[jss::ledger_index] = jss::validated; - jv[jss::credential][jss::subject] = alice.human(); - jv[jss::credential][jss::issuer] = issuer.human(); - jv[jss::credential][jss::credential_type] = "12KK"; - auto const jrr = env.rpc("json", "ledger_entry", to_string(jv)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); + // Check all malformed cases + runLedgerEntryTest( + env, + jss::credential, + { + {jss::subject, "malformedRequest"}, + {jss::issuer, "malformedRequest"}, + {jss::credential_type, "malformedRequest"}, + }); } } void testLedgerEntryDelegate() { - testcase("ledger_entry Delegate"); + testcase("Delegate"); using namespace test::jtx; @@ -482,78 +812,23 @@ class LedgerEntry_test : public beast::unit_test::suite BEAST_EXPECT(jrr[jss::node][sfAccount.jsonName] == alice.human()); BEAST_EXPECT(jrr[jss::node][sfAuthorize.jsonName] == bob.human()); } + { - // Malformed request: delegate neither object nor string. - Json::Value jvParams; - jvParams[jss::delegate] = 5; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Malformed request: delegate not hex string. - Json::Value jvParams; - jvParams[jss::delegate] = "0123456789ABCDEFG"; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Malformed request: account not a string - Json::Value jvParams; - jvParams[jss::delegate][jss::account] = 5; - jvParams[jss::delegate][jss::authorize] = bob.human(); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedAddress", ""); - } - { - // Malformed request: authorize not a string - Json::Value jvParams; - jvParams[jss::delegate][jss::account] = alice.human(); - jvParams[jss::delegate][jss::authorize] = 5; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedAddress", ""); - } - { - // this lambda function is used test malformed account and authroize - auto testMalformedAccount = - [&](std::optional const& account, - std::optional const& authorize, - std::string const& error) { - Json::Value jvParams; - jvParams[jss::ledger_hash] = ledgerHash; - if (account) - jvParams[jss::delegate][jss::account] = *account; - if (authorize) - jvParams[jss::delegate][jss::authorize] = *authorize; - auto const jrr = env.rpc( - "json", - "ledger_entry", - to_string(jvParams))[jss::result]; - checkErrorValue(jrr, error, ""); - }; - // missing account - testMalformedAccount(std::nullopt, bob.human(), "malformedRequest"); - // missing authorize - testMalformedAccount( - alice.human(), std::nullopt, "malformedRequest"); - // malformed account - testMalformedAccount("-", bob.human(), "malformedAddress"); - // malformed authorize - testMalformedAccount(alice.human(), "-", "malformedAddress"); + // Check all malformed cases + runLedgerEntryTest( + env, + jss::delegate, + { + {jss::account, "malformedAddress"}, + {jss::authorize, "malformedAddress"}, + }); } } void testLedgerEntryDepositPreauth() { - testcase("ledger_entry Deposit Preauth"); + testcase("Deposit Preauth"); using namespace test::jtx; @@ -600,91 +875,21 @@ class LedgerEntry_test : public beast::unit_test::suite BEAST_EXPECT(jrr[jss::node][sfAuthorize.jsonName] == becky.human()); } { - // Malformed request: deposit_preauth neither object nor string. - Json::Value jvParams; - jvParams[jss::deposit_preauth] = -5; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Malformed request: deposit_preauth not hex string. - Json::Value jvParams; - jvParams[jss::deposit_preauth] = "0123456789ABCDEFG"; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Malformed request: missing [jss::deposit_preauth][jss::owner] - Json::Value jvParams; - jvParams[jss::deposit_preauth][jss::authorized] = becky.human(); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Malformed request: [jss::deposit_preauth][jss::owner] not string. - Json::Value jvParams; - jvParams[jss::deposit_preauth][jss::owner] = 7; - jvParams[jss::deposit_preauth][jss::authorized] = becky.human(); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Malformed: missing [jss::deposit_preauth][jss::authorized] - Json::Value jvParams; - jvParams[jss::deposit_preauth][jss::owner] = alice.human(); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Malformed: [jss::deposit_preauth][jss::authorized] not string. - Json::Value jvParams; - jvParams[jss::deposit_preauth][jss::owner] = alice.human(); - jvParams[jss::deposit_preauth][jss::authorized] = 47; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Malformed: [jss::deposit_preauth][jss::owner] is malformed. - Json::Value jvParams; - jvParams[jss::deposit_preauth][jss::owner] = - "rP6P9ypfAmc!pw8SZHNwM4nvZHFXDraQas"; - - jvParams[jss::deposit_preauth][jss::authorized] = becky.human(); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedOwner", ""); - } - { - // Malformed: [jss::deposit_preauth][jss::authorized] is malformed. - Json::Value jvParams; - jvParams[jss::deposit_preauth][jss::owner] = alice.human(); - jvParams[jss::deposit_preauth][jss::authorized] = - "rP6P9ypfAmc!pw8SZHNwM4nvZHFXDraQas"; - - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedAuthorized", ""); + // test all missing/malformed field cases + runLedgerEntryTest( + env, + jss::deposit_preauth, + { + {jss::owner, "malformedOwner"}, + {jss::authorized, "malformedAuthorized", false}, + }); } } void testLedgerEntryDepositPreauthCred() { - testcase("ledger_entry Deposit Preauth with credentials"); + testcase("Deposit Preauth with credentials"); using namespace test::jtx; @@ -739,19 +944,30 @@ class LedgerEntry_test : public beast::unit_test::suite jvParams[jss::ledger_index] = jss::validated; jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - jvParams[jss::deposit_preauth][jss::authorized_credentials] = - Json::arrayValue; - auto& arr( - jvParams[jss::deposit_preauth][jss::authorized_credentials]); + auto tryField = [&](Json::Value fieldValue) -> void { + Json::Value arr = Json::arrayValue; + Json::Value jo; + jo[jss::issuer] = fieldValue; + jo[jss::credential_type] = strHex(std::string_view(credType)); + arr.append(jo); + jvParams[jss::deposit_preauth][jss::authorized_credentials] = + arr; - Json::Value jo; - jo[jss::issuer] = to_string(xrpAccount()); - jo[jss::credential_type] = strHex(std::string_view(credType)); - arr.append(std::move(jo)); - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + auto const expectedErrMsg = fieldValue.isNull() + ? RPC::missing_field_message(jss::issuer.c_str()) + : RPC::expected_field_message(jss::issuer, "AccountID"); + checkErrorValue( + jrr, "malformedAuthorizedCredentials", expectedErrMsg); + }; + + auto const& badValues = getBadValues(FieldType::AccountField); + for (auto const& value : badValues) + { + tryField(value); + } + tryField(Json::nullValue); } { @@ -773,7 +989,10 @@ class LedgerEntry_test : public beast::unit_test::suite auto const jrr = env.rpc("json", "ledger_entry", to_string(jvParams)); checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); + jrr[jss::result], + "malformedAuthorizedCredentials", + RPC::expected_field_message( + jss::authorized_credentials, "array")); } { @@ -782,20 +1001,31 @@ class LedgerEntry_test : public beast::unit_test::suite jvParams[jss::ledger_index] = jss::validated; jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - jvParams[jss::deposit_preauth][jss::authorized_credentials] = - Json::arrayValue; - auto& arr( - jvParams[jss::deposit_preauth][jss::authorized_credentials]); + auto tryField = [&](Json::Value fieldValue) -> void { + Json::Value arr = Json::arrayValue; + Json::Value jo; + jo[jss::issuer] = issuer.human(); + jo[jss::credential_type] = fieldValue; + arr.append(jo); + jvParams[jss::deposit_preauth][jss::authorized_credentials] = + arr; - Json::Value jo; - jo[jss::issuer] = issuer.human(); - jo[jss::credential_type] = ""; - arr.append(std::move(jo)); + Json::Value const jrr = env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + auto const expectedErrMsg = fieldValue.isNull() + ? RPC::missing_field_message(jss::credential_type.c_str()) + : RPC::expected_field_message( + jss::credential_type, "hex string"); + checkErrorValue( + jrr, "malformedAuthorizedCredentials", expectedErrMsg); + }; - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); + auto const& badValues = getBadValues(FieldType::BlobField); + for (auto const& value : badValues) + { + tryField(value); + } + tryField(Json::nullValue); } { @@ -817,7 +1047,11 @@ class LedgerEntry_test : public beast::unit_test::suite auto const jrr = env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); + checkErrorValue( + jrr[jss::result], + "malformedRequest", + "Must have exactly one of `authorized` and " + "`authorized_credentials`."); } { @@ -825,11 +1059,14 @@ class LedgerEntry_test : public beast::unit_test::suite Json::Value jvParams; jvParams[jss::ledger_index] = jss::validated; jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - jvParams[jss::deposit_preauth][jss::authorized_credentials] = 42; - - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); + testMalformedSubfield( + env, + jvParams, + jss::deposit_preauth, + jss::authorized_credentials, + FieldType::ArrayField, + "malformedAuthorizedCredentials", + false); } { @@ -846,7 +1083,9 @@ class LedgerEntry_test : public beast::unit_test::suite auto const jrr = env.rpc("json", "ledger_entry", to_string(jvParams)); checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); + jrr[jss::result], + "malformedAuthorizedCredentials", + "Invalid field 'authorized_credentials', not array."); } { @@ -865,7 +1104,9 @@ class LedgerEntry_test : public beast::unit_test::suite auto const jrr = env.rpc("json", "ledger_entry", to_string(jvParams)); checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); + jrr[jss::result], + "malformedAuthorizedCredentials", + "Invalid field 'authorized_credentials', not array."); } { @@ -879,13 +1120,14 @@ class LedgerEntry_test : public beast::unit_test::suite auto const jrr = env.rpc("json", "ledger_entry", to_string(jvParams)); checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); + jrr[jss::result], + "malformedAuthorizedCredentials", + "Invalid field 'authorized_credentials', not array."); } { // Failed, authorized_credentials is too long - - static std::string_view const credTypes[] = { + static std::array const credTypes = { "cred1", "cred2", "cred3", @@ -908,205 +1150,27 @@ class LedgerEntry_test : public beast::unit_test::suite auto& arr( jvParams[jss::deposit_preauth][jss::authorized_credentials]); - for (unsigned i = 0; i < sizeof(credTypes) / sizeof(credTypes[0]); - ++i) + for (auto cred : credTypes) { Json::Value jo; jo[jss::issuer] = issuer.human(); - jo[jss::credential_type] = - strHex(std::string_view(credTypes[i])); + jo[jss::credential_type] = strHex(std::string_view(cred)); arr.append(std::move(jo)); } auto const jrr = env.rpc("json", "ledger_entry", to_string(jvParams)); checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); - } - - { - // Failed, issuer is not set - Json::Value jvParams; - jvParams[jss::ledger_index] = jss::validated; - jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - - jvParams[jss::deposit_preauth][jss::authorized_credentials] = - Json::arrayValue; - auto& arr( - jvParams[jss::deposit_preauth][jss::authorized_credentials]); - - Json::Value jo; - jo[jss::credential_type] = strHex(std::string_view(credType)); - arr.append(std::move(jo)); - - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); - } - - { - // Failed, issuer isn't string - Json::Value jvParams; - jvParams[jss::ledger_index] = jss::validated; - jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - - jvParams[jss::deposit_preauth][jss::authorized_credentials] = - Json::arrayValue; - auto& arr( - jvParams[jss::deposit_preauth][jss::authorized_credentials]); - - Json::Value jo; - jo[jss::issuer] = 42; - jo[jss::credential_type] = strHex(std::string_view(credType)); - arr.append(std::move(jo)); - - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); - } - - { - // Failed, issuer is an array - Json::Value jvParams; - jvParams[jss::ledger_index] = jss::validated; - jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - - jvParams[jss::deposit_preauth][jss::authorized_credentials] = - Json::arrayValue; - auto& arr( - jvParams[jss::deposit_preauth][jss::authorized_credentials]); - - Json::Value jo; - Json::Value payload = Json::arrayValue; - payload.append(42); - jo[jss::issuer] = std::move(payload); - jo[jss::credential_type] = strHex(std::string_view(credType)); - arr.append(std::move(jo)); - - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); - } - - { - // Failed, issuer isn't valid encoded account - Json::Value jvParams; - jvParams[jss::ledger_index] = jss::validated; - jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - - jvParams[jss::deposit_preauth][jss::authorized_credentials] = - Json::arrayValue; - auto& arr( - jvParams[jss::deposit_preauth][jss::authorized_credentials]); - - Json::Value jo; - jo[jss::issuer] = "invalid_account"; - jo[jss::credential_type] = strHex(std::string_view(credType)); - arr.append(std::move(jo)); - - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); - } - - { - // Failed, credential_type is not set - Json::Value jvParams; - jvParams[jss::ledger_index] = jss::validated; - jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - - jvParams[jss::deposit_preauth][jss::authorized_credentials] = - Json::arrayValue; - auto& arr( - jvParams[jss::deposit_preauth][jss::authorized_credentials]); - - Json::Value jo; - jo[jss::issuer] = issuer.human(); - arr.append(std::move(jo)); - - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); - } - - { - // Failed, credential_type isn't string - Json::Value jvParams; - jvParams[jss::ledger_index] = jss::validated; - jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - - jvParams[jss::deposit_preauth][jss::authorized_credentials] = - Json::arrayValue; - auto& arr( - jvParams[jss::deposit_preauth][jss::authorized_credentials]); - - Json::Value jo; - jo[jss::issuer] = issuer.human(); - jo[jss::credential_type] = 42; - arr.append(std::move(jo)); - - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); - } - - { - // Failed, credential_type is an array - Json::Value jvParams; - jvParams[jss::ledger_index] = jss::validated; - jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - - jvParams[jss::deposit_preauth][jss::authorized_credentials] = - Json::arrayValue; - auto& arr( - jvParams[jss::deposit_preauth][jss::authorized_credentials]); - - Json::Value jo; - jo[jss::issuer] = issuer.human(); - Json::Value payload = Json::arrayValue; - payload.append(42); - jo[jss::credential_type] = std::move(payload); - arr.append(std::move(jo)); - - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); - } - - { - // Failed, credential_type isn't hex encoded - Json::Value jvParams; - jvParams[jss::ledger_index] = jss::validated; - jvParams[jss::deposit_preauth][jss::owner] = bob.human(); - - jvParams[jss::deposit_preauth][jss::authorized_credentials] = - Json::arrayValue; - auto& arr( - jvParams[jss::deposit_preauth][jss::authorized_credentials]); - - Json::Value jo; - jo[jss::issuer] = issuer.human(); - jo[jss::credential_type] = "12KK"; - arr.append(std::move(jo)); - - auto const jrr = - env.rpc("json", "ledger_entry", to_string(jvParams)); - checkErrorValue( - jrr[jss::result], "malformedAuthorizedCredentials", ""); + jrr[jss::result], + "malformedAuthorizedCredentials", + "Invalid field 'authorized_credentials', not array."); } } void testLedgerEntryDirectory() { - testcase("ledger_entry Request Directory"); + testcase("Directory"); using namespace test::jtx; Env env{*this}; Account const alice{"alice"}; @@ -1188,39 +1252,48 @@ class LedgerEntry_test : public beast::unit_test::suite BEAST_EXPECT(jrr[jss::node][sfIndexes.jsonName].size() == 2); } { - // Null directory argument. + // Bad directory argument. Json::Value jvParams; - jvParams[jss::directory] = Json::nullValue; jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); + testMalformedField( + env, + jvParams, + jss::directory, + FieldType::HashOrObjectField, + "malformedRequest"); } { // Non-integer sub_index. Json::Value jvParams; jvParams[jss::directory] = Json::objectValue; jvParams[jss::directory][jss::dir_root] = dirRootIndex; - jvParams[jss::directory][jss::sub_index] = 1.5; jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); + testMalformedSubfield( + env, + jvParams, + jss::directory, + jss::sub_index, + FieldType::UInt64Field, + "malformedRequest", + false); } { // Malformed owner entry. Json::Value jvParams; jvParams[jss::directory] = Json::objectValue; - std::string const badAddress = makeBadAddress(alice.human()); - jvParams[jss::directory][jss::owner] = badAddress; jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedAddress", ""); + testMalformedSubfield( + env, + jvParams, + jss::directory, + jss::owner, + FieldType::AccountField, + "malformedAddress", + false); } { - // Malformed directory object. Specify both dir_root and owner. + // Malformed directory object. Specifies both dir_root and owner. Json::Value jvParams; jvParams[jss::directory] = Json::objectValue; jvParams[jss::directory][jss::owner] = alice.human(); @@ -1228,7 +1301,10 @@ class LedgerEntry_test : public beast::unit_test::suite jvParams[jss::ledger_hash] = ledgerHash; Json::Value const jrr = env.rpc( "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); + checkErrorValue( + jrr, + "malformedRequest", + "Must have exactly one of `owner` and `dir_root` fields."); } { // Incomplete directory object. Missing both dir_root and owner. @@ -1238,14 +1314,17 @@ class LedgerEntry_test : public beast::unit_test::suite jvParams[jss::ledger_hash] = ledgerHash; Json::Value const jrr = env.rpc( "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); + checkErrorValue( + jrr, + "malformedRequest", + "Must have exactly one of `owner` and `dir_root` fields."); } } void testLedgerEntryEscrow() { - testcase("ledger_entry Request Escrow"); + testcase("Escrow"); using namespace test::jtx; Env env{*this}; Account const alice{"alice"}; @@ -1296,56 +1375,18 @@ class LedgerEntry_test : public beast::unit_test::suite jrr[jss::node][jss::Amount] == XRP(333).value().getText()); } { - // Malformed owner entry. - Json::Value jvParams; - jvParams[jss::escrow] = Json::objectValue; - - std::string const badAddress = makeBadAddress(alice.human()); - jvParams[jss::escrow][jss::owner] = badAddress; - jvParams[jss::escrow][jss::seq] = env.seq(alice) - 1; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedOwner", ""); - } - { - // Missing owner. - Json::Value jvParams; - jvParams[jss::escrow] = Json::objectValue; - jvParams[jss::escrow][jss::seq] = env.seq(alice) - 1; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Missing sequence. - Json::Value jvParams; - jvParams[jss::escrow] = Json::objectValue; - jvParams[jss::escrow][jss::owner] = alice.human(); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Non-integer sequence. - Json::Value jvParams; - jvParams[jss::escrow] = Json::objectValue; - jvParams[jss::escrow][jss::owner] = alice.human(); - jvParams[jss::escrow][jss::seq] = - std::to_string(env.seq(alice) - 1); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); + // Malformed escrow fields + runLedgerEntryTest( + env, + jss::escrow, + {{jss::owner, "malformedOwner"}, {jss::seq, "malformedSeq"}}); } } void testLedgerEntryOffer() { - testcase("ledger_entry Request Offer"); + testcase("Offer"); using namespace test::jtx; Env env{*this}; Account const alice{"alice"}; @@ -1379,56 +1420,21 @@ class LedgerEntry_test : public beast::unit_test::suite "json", "ledger_entry", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr[jss::node][jss::TakerGets] == "322000000"); } - { - // Malformed account entry. - Json::Value jvParams; - jvParams[jss::offer] = Json::objectValue; - std::string const badAddress = makeBadAddress(alice.human()); - jvParams[jss::offer][jss::account] = badAddress; - jvParams[jss::offer][jss::seq] = env.seq(alice) - 1; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedAddress", ""); - } { - // Malformed offer object. Missing account member. - Json::Value jvParams; - jvParams[jss::offer] = Json::objectValue; - jvParams[jss::offer][jss::seq] = env.seq(alice) - 1; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Malformed offer object. Missing seq member. - Json::Value jvParams; - jvParams[jss::offer] = Json::objectValue; - jvParams[jss::offer][jss::account] = alice.human(); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Malformed offer object. Non-integral seq member. - Json::Value jvParams; - jvParams[jss::offer] = Json::objectValue; - jvParams[jss::offer][jss::account] = alice.human(); - jvParams[jss::offer][jss::seq] = std::to_string(env.seq(alice) - 1); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); + // Malformed offer fields + runLedgerEntryTest( + env, + jss::offer, + {{jss::account, "malformedAddress"}, + {jss::seq, "malformedRequest"}}); } } void testLedgerEntryPayChan() { - testcase("ledger_entry Request Pay Chan"); + testcase("Pay Chan"); using namespace test::jtx; using namespace std::literals::chrono_literals; Env env{*this}; @@ -1478,14 +1484,19 @@ class LedgerEntry_test : public beast::unit_test::suite jvParams[jss::ledger_hash] = ledgerHash; Json::Value const jrr = env.rpc( "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "entryNotFound", ""); + checkErrorValue(jrr, "entryNotFound", "Entry not found."); + } + + { + // Malformed paychan field + runLedgerEntryTest(env, jss::payment_channel); } } void testLedgerEntryRippleState() { - testcase("ledger_entry Request RippleState"); + testcase("RippleState"); using namespace test::jtx; Env env{*this}; Account const alice{"alice"}; @@ -1521,36 +1532,14 @@ class LedgerEntry_test : public beast::unit_test::suite jrr[jss::node][sfHighLimit.jsonName][jss::value] == "999"); } { - // ripple_state is not an object. - Json::Value jvParams; - jvParams[fieldName] = "ripple_state"; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // ripple_state.currency is missing. - Json::Value jvParams; - jvParams[fieldName] = Json::objectValue; - jvParams[fieldName][jss::accounts] = Json::arrayValue; - jvParams[fieldName][jss::accounts][0u] = alice.human(); - jvParams[fieldName][jss::accounts][1u] = gw.human(); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // ripple_state accounts is not an array. - Json::Value jvParams; - jvParams[fieldName] = Json::objectValue; - jvParams[fieldName][jss::accounts] = 2; - jvParams[fieldName][jss::currency] = "USD"; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); + // test basic malformed scenarios + runLedgerEntryTest( + env, + fieldName, + { + {jss::accounts, "malformedRequest"}, + {jss::currency, "malformedCurrency"}, + }); } { // ripple_state one of the accounts is missing. @@ -1562,7 +1551,11 @@ class LedgerEntry_test : public beast::unit_test::suite jvParams[jss::ledger_hash] = ledgerHash; Json::Value const jrr = env.rpc( "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); + checkErrorValue( + jrr, + "malformedRequest", + "Invalid field 'accounts', not length-2 array of " + "Accounts."); } { // ripple_state more than 2 accounts. @@ -1576,33 +1569,60 @@ class LedgerEntry_test : public beast::unit_test::suite jvParams[jss::ledger_hash] = ledgerHash; Json::Value const jrr = env.rpc( "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); + checkErrorValue( + jrr, + "malformedRequest", + "Invalid field 'accounts', not length-2 array of " + "Accounts."); } { - // ripple_state account[0] is not a string. + // ripple_state account[0] / account[1] is not an account. Json::Value jvParams; jvParams[fieldName] = Json::objectValue; - jvParams[fieldName][jss::accounts] = Json::arrayValue; - jvParams[fieldName][jss::accounts][0u] = 44; - jvParams[fieldName][jss::accounts][1u] = gw.human(); - jvParams[fieldName][jss::currency] = "USD"; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // ripple_state account[1] is not a string. - Json::Value jvParams; - jvParams[fieldName] = Json::objectValue; - jvParams[fieldName][jss::accounts] = Json::arrayValue; - jvParams[fieldName][jss::accounts][0u] = alice.human(); - jvParams[fieldName][jss::accounts][1u] = 21; - jvParams[fieldName][jss::currency] = "USD"; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); + auto tryField = [&](Json::Value badAccount) -> void { + { + // account[0] + jvParams[fieldName][jss::accounts] = Json::arrayValue; + jvParams[fieldName][jss::accounts][0u] = badAccount; + jvParams[fieldName][jss::accounts][1u] = gw.human(); + jvParams[fieldName][jss::currency] = "USD"; + + Json::Value const jrr = env.rpc( + "json", + "ledger_entry", + to_string(jvParams))[jss::result]; + checkErrorValue( + jrr, + "malformedAddress", + RPC::expected_field_message( + jss::accounts, "array of Accounts")); + } + + { + // account[1] + jvParams[fieldName][jss::accounts] = Json::arrayValue; + jvParams[fieldName][jss::accounts][0u] = alice.human(); + jvParams[fieldName][jss::accounts][1u] = badAccount; + jvParams[fieldName][jss::currency] = "USD"; + + Json::Value const jrr = env.rpc( + "json", + "ledger_entry", + to_string(jvParams))[jss::result]; + checkErrorValue( + jrr, + "malformedAddress", + RPC::expected_field_message( + jss::accounts, "array of Accounts")); + } + }; + + auto const& badValues = getBadValues(FieldType::AccountField); + for (auto const& value : badValues) + { + tryField(value); + } + tryField(Json::nullValue); } { // ripple_state account[0] == account[1]. @@ -1615,48 +1635,10 @@ class LedgerEntry_test : public beast::unit_test::suite jvParams[jss::ledger_hash] = ledgerHash; Json::Value const jrr = env.rpc( "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // ripple_state malformed account[0]. - Json::Value jvParams; - jvParams[fieldName] = Json::objectValue; - jvParams[fieldName][jss::accounts] = Json::arrayValue; - jvParams[fieldName][jss::accounts][0u] = - makeBadAddress(alice.human()); - jvParams[fieldName][jss::accounts][1u] = gw.human(); - jvParams[fieldName][jss::currency] = "USD"; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedAddress", ""); - } - { - // ripple_state malformed account[1]. - Json::Value jvParams; - jvParams[fieldName] = Json::objectValue; - jvParams[fieldName][jss::accounts] = Json::arrayValue; - jvParams[fieldName][jss::accounts][0u] = alice.human(); - jvParams[fieldName][jss::accounts][1u] = - makeBadAddress(gw.human()); - jvParams[fieldName][jss::currency] = "USD"; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedAddress", ""); - } - { - // ripple_state malformed currency. - Json::Value jvParams; - jvParams[fieldName] = Json::objectValue; - jvParams[fieldName][jss::accounts] = Json::arrayValue; - jvParams[fieldName][jss::accounts][0u] = alice.human(); - jvParams[fieldName][jss::accounts][1u] = gw.human(); - jvParams[fieldName][jss::currency] = "USDollars"; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedCurrency", ""); + checkErrorValue( + jrr, + "malformedRequest", + "Cannot have a trustline to self."); } } } @@ -1664,7 +1646,7 @@ class LedgerEntry_test : public beast::unit_test::suite void testLedgerEntryTicket() { - testcase("ledger_entry Request Ticket"); + testcase("Ticket"); using namespace test::jtx; Env env{*this}; env.close(); @@ -1686,7 +1668,7 @@ class LedgerEntry_test : public beast::unit_test::suite jvParams[jss::ledger_hash] = ledgerHash; Json::Value const jrr = env.rpc( "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "entryNotFound", ""); + checkErrorValue(jrr, "entryNotFound", "Entry not found."); } { // First real ticket requested by index. @@ -1721,7 +1703,7 @@ class LedgerEntry_test : public beast::unit_test::suite jvParams[jss::ledger_hash] = ledgerHash; Json::Value const jrr = env.rpc( "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "entryNotFound", ""); + checkErrorValue(jrr, "entryNotFound", "Entry not found."); } { // Request a ticket using an account root entry. @@ -1730,59 +1712,26 @@ class LedgerEntry_test : public beast::unit_test::suite jvParams[jss::ledger_hash] = ledgerHash; Json::Value const jrr = env.rpc( "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "unexpectedLedgerType", ""); + checkErrorValue( + jrr, "unexpectedLedgerType", "Unexpected ledger type."); } - { - // Malformed account entry. - Json::Value jvParams; - jvParams[jss::ticket] = Json::objectValue; - std::string const badAddress = makeBadAddress(env.master.human()); - jvParams[jss::ticket][jss::account] = badAddress; - jvParams[jss::ticket][jss::ticket_seq] = env.seq(env.master) - 1; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedAddress", ""); - } { - // Malformed ticket object. Missing account member. - Json::Value jvParams; - jvParams[jss::ticket] = Json::objectValue; - jvParams[jss::ticket][jss::ticket_seq] = env.seq(env.master) - 1; - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Malformed ticket object. Missing seq member. - Json::Value jvParams; - jvParams[jss::ticket] = Json::objectValue; - jvParams[jss::ticket][jss::account] = env.master.human(); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); - } - { - // Malformed ticket object. Non-integral seq member. - Json::Value jvParams; - jvParams[jss::ticket] = Json::objectValue; - jvParams[jss::ticket][jss::account] = env.master.human(); - jvParams[jss::ticket][jss::ticket_seq] = - std::to_string(env.seq(env.master) - 1); - jvParams[jss::ledger_hash] = ledgerHash; - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "malformedRequest", ""); + // test basic malformed scenarios + runLedgerEntryTest( + env, + jss::ticket, + { + {jss::account, "malformedAddress"}, + {jss::ticket_seq, "malformedRequest"}, + }); } } void testLedgerEntryDID() { - testcase("ledger_entry Request DID"); + testcase("DID"); using namespace test::jtx; using namespace std::literals::chrono_literals; Env env{*this}; @@ -1826,230 +1775,17 @@ class LedgerEntry_test : public beast::unit_test::suite jvParams[jss::ledger_hash] = ledgerHash; Json::Value const jrr = env.rpc( "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "entryNotFound", ""); + checkErrorValue(jrr, "entryNotFound", "Entry not found."); } - } - - void - testLedgerEntryInvalidParams(unsigned int apiVersion) - { - testcase( - "ledger_entry Request With Invalid Parameters v" + - std::to_string(apiVersion)); - using namespace test::jtx; - Env env{*this}; - - std::string const ledgerHash{to_string(env.closed()->info().hash)}; - - auto makeParams = [&apiVersion](std::function f) { - Json::Value params; - params[jss::api_version] = apiVersion; - f(params); - return params; - }; - // "features" is not an option supported by ledger_entry. { - auto const jvParams = - makeParams([&ledgerHash](Json::Value& jvParams) { - jvParams[jss::features] = ledgerHash; - jvParams[jss::ledger_hash] = ledgerHash; - }); - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - if (apiVersion < 2u) - checkErrorValue(jrr, "unknownOption", ""); - else - checkErrorValue(jrr, "invalidParams", ""); - } - Json::Value const injectObject = []() { - Json::Value obj(Json::objectValue); - obj[jss::account] = "rhigTLJJyXXSRUyRCQtqi1NoAZZzZnS4KU"; - obj[jss::ledger_index] = "validated"; - return obj; - }(); - Json::Value const injectArray = []() { - Json::Value arr(Json::arrayValue); - arr[0u] = "rhigTLJJyXXSRUyRCQtqi1NoAZZzZnS4KU"; - arr[1u] = "validated"; - return arr; - }(); - - // invalid input for fields that can handle an object, but can't handle - // an array - for (auto const& field : - {jss::directory, jss::escrow, jss::offer, jss::ticket, jss::amm}) - { - auto const jvParams = - makeParams([&field, &injectArray](Json::Value& jvParams) { - jvParams[field] = injectArray; - }); - - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - if (apiVersion < 2u) - checkErrorValue(jrr, "internal", "Internal error."); - else - checkErrorValue(jrr, "invalidParams", ""); - } - // Fields that can handle objects just fine - for (auto const& field : - {jss::directory, jss::escrow, jss::offer, jss::ticket, jss::amm}) - { - auto const jvParams = - makeParams([&field, &injectObject](Json::Value& jvParams) { - jvParams[field] = injectObject; - }); - - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - checkErrorValue(jrr, "malformedRequest", ""); - } - - for (auto const& inject : {injectObject, injectArray}) - { - // invalid input for fields that can't handle an object or an array - for (auto const& field : - {jss::index, - jss::account_root, - jss::check, - jss::payment_channel}) - { - auto const jvParams = - makeParams([&field, &inject](Json::Value& jvParams) { - jvParams[field] = inject; - }); - - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - if (apiVersion < 2u) - checkErrorValue(jrr, "internal", "Internal error."); - else - checkErrorValue(jrr, "invalidParams", ""); - } - // directory sub-fields - for (auto const& field : {jss::dir_root, jss::owner}) - { - auto const jvParams = - makeParams([&field, &inject](Json::Value& jvParams) { - jvParams[jss::directory][field] = inject; - }); - - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - if (apiVersion < 2u) - checkErrorValue(jrr, "internal", "Internal error."); - else - checkErrorValue(jrr, "invalidParams", ""); - } - // escrow sub-fields - { - auto const jvParams = - makeParams([&inject](Json::Value& jvParams) { - jvParams[jss::escrow][jss::owner] = inject; - jvParams[jss::escrow][jss::seq] = 99; - }); - - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - if (apiVersion < 2u) - checkErrorValue(jrr, "internal", "Internal error."); - else - checkErrorValue(jrr, "invalidParams", ""); - } - // offer sub-fields - { - auto const jvParams = - makeParams([&inject](Json::Value& jvParams) { - jvParams[jss::offer][jss::account] = inject; - jvParams[jss::offer][jss::seq] = 99; - }); - - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - if (apiVersion < 2u) - checkErrorValue(jrr, "internal", "Internal error."); - else - checkErrorValue(jrr, "invalidParams", ""); - } - // ripple_state sub-fields - { - auto const jvParams = - makeParams([&inject](Json::Value& jvParams) { - Json::Value rs(Json::objectValue); - rs[jss::currency] = "FOO"; - rs[jss::accounts] = Json::Value(Json::arrayValue); - rs[jss::accounts][0u] = - "rhigTLJJyXXSRUyRCQtqi1NoAZZzZnS4KU"; - rs[jss::accounts][1u] = - "rKssEq6pg1KbqEqAFnua5mFAL6Ggpsh2wv"; - rs[jss::currency] = inject; - jvParams[jss::ripple_state] = std::move(rs); - }); - - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - if (apiVersion < 2u) - checkErrorValue(jrr, "internal", "Internal error."); - else - checkErrorValue(jrr, "invalidParams", ""); - } - // ticket sub-fields - { - auto const jvParams = - makeParams([&inject](Json::Value& jvParams) { - jvParams[jss::ticket][jss::account] = inject; - jvParams[jss::ticket][jss::ticket_seq] = 99; - }); - - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - if (apiVersion < 2u) - checkErrorValue(jrr, "internal", "Internal error."); - else - checkErrorValue(jrr, "invalidParams", ""); - } - - // Fields that can handle malformed inputs just fine - for (auto const& field : {jss::nft_page, jss::deposit_preauth}) - { - auto const jvParams = - makeParams([&field, &inject](Json::Value& jvParams) { - jvParams[field] = inject; - }); - - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - checkErrorValue(jrr, "malformedRequest", ""); - } - // Subfields of deposit_preauth that can handle malformed inputs - // fine - for (auto const& field : {jss::owner, jss::authorized}) - { - auto const jvParams = - makeParams([&field, &inject](Json::Value& jvParams) { - auto pa = Json::Value(Json::objectValue); - pa[jss::owner] = "rhigTLJJyXXSRUyRCQtqi1NoAZZzZnS4KU"; - pa[jss::authorized] = - "rKssEq6pg1KbqEqAFnua5mFAL6Ggpsh2wv"; - pa[field] = inject; - jvParams[jss::deposit_preauth] = std::move(pa); - }); - - Json::Value const jrr = env.rpc( - "json", "ledger_entry", to_string(jvParams))[jss::result]; - - checkErrorValue(jrr, "malformedRequest", ""); - } + // Malformed DID index + Json::Value jvParams; + testMalformedField( + env, + jvParams, + jss::did, + FieldType::AccountField, + "malformedAddress"); } } @@ -2068,28 +1804,16 @@ class LedgerEntry_test : public beast::unit_test::suite {.owner = owner, .fee = static_cast(env.current()->fees().base.drops())}); - // Malformed document id - auto res = Oracle::ledgerEntry(env, owner, NoneTag); - BEAST_EXPECT(res[jss::error].asString() == "invalidParams"); - std::vector invalid = {-1, 1.2, "", "Invalid"}; - for (auto const& v : invalid) { - auto const res = Oracle::ledgerEntry(env, owner, v); - BEAST_EXPECT(res[jss::error].asString() == "malformedDocumentID"); + // test basic malformed scenarios + runLedgerEntryTest( + env, + jss::oracle, + { + {jss::account, "malformedAccount"}, + {jss::oracle_document_id, "malformedDocumentID"}, + }); } - // Missing document id - res = Oracle::ledgerEntry(env, owner, std::nullopt); - BEAST_EXPECT(res[jss::error].asString() == "malformedRequest"); - - // Missing account - res = Oracle::ledgerEntry(env, std::nullopt, 1); - BEAST_EXPECT(res[jss::error].asString() == "malformedRequest"); - - // Malformed account - std::string malfAccount = to_string(owner.id()); - malfAccount.replace(10, 1, 1, '!'); - res = Oracle::ledgerEntry(env, malfAccount, 1); - BEAST_EXPECT(res[jss::error].asString() == "malformedAddress"); } void @@ -2144,7 +1868,7 @@ class LedgerEntry_test : public beast::unit_test::suite void testLedgerEntryMPT() { - testcase("ledger_entry Request MPT"); + testcase("MPT"); using namespace test::jtx; using namespace std::literals::chrono_literals; Env env{*this}; @@ -2185,7 +1909,7 @@ class LedgerEntry_test : public beast::unit_test::suite jvParams[jss::ledger_hash] = ledgerHash; Json::Value const jrr = env.rpc( "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "entryNotFound", ""); + checkErrorValue(jrr, "entryNotFound", "Entry not found."); } { // Request the MPToken using its owner + mptIssuanceID. @@ -2210,14 +1934,24 @@ class LedgerEntry_test : public beast::unit_test::suite jvParams[jss::ledger_hash] = ledgerHash; Json::Value const jrr = env.rpc( "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "entryNotFound", ""); + checkErrorValue(jrr, "entryNotFound", "Entry not found."); + } + { + // Malformed MPTIssuance index + Json::Value jvParams; + testMalformedField( + env, + jvParams, + jss::mptoken, + FieldType::HashOrObjectField, + "malformedRequest"); } } void testLedgerEntryPermissionedDomain() { - testcase("ledger_entry PermissionedDomain"); + testcase("PermissionedDomain"); using namespace test::jtx; @@ -2278,73 +2012,25 @@ class LedgerEntry_test : public beast::unit_test::suite "12F1F1F1F180D67377B2FAB292A31C922470326268D2B9B74CD1E582645B9A" "DE"; auto const jrr = env.rpc("json", "ledger_entry", to_string(params)); - checkErrorValue(jrr[jss::result], "entryNotFound", ""); + checkErrorValue( + jrr[jss::result], "entryNotFound", "Entry not found."); } - { - // Fail, invalid permissioned domain index - Json::Value params; - params[jss::ledger_index] = jss::validated; - params[jss::permissioned_domain] = "NotAHexString"; - auto const jrr = env.rpc("json", "ledger_entry", to_string(params)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - - { - // Fail, permissioned domain is not an object - Json::Value params; - params[jss::ledger_index] = jss::validated; - params[jss::permissioned_domain] = 10; - auto const jrr = env.rpc("json", "ledger_entry", to_string(params)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); - } - - { - // Fail, invalid account - Json::Value params; - params[jss::ledger_index] = jss::validated; - params[jss::permissioned_domain][jss::account] = 1; - params[jss::permissioned_domain][jss::seq] = seq; - auto const jrr = env.rpc("json", "ledger_entry", to_string(params)); - checkErrorValue(jrr[jss::result], "malformedAddress", ""); - } - - { - // Fail, account is an object - Json::Value params; - params[jss::ledger_index] = jss::validated; - params[jss::permissioned_domain][jss::account] = - Json::Value{Json::ValueType::objectValue}; - params[jss::permissioned_domain][jss::seq] = seq; - auto const jrr = env.rpc("json", "ledger_entry", to_string(params)); - checkErrorValue(jrr[jss::result], "malformedAddress", ""); - } - - { - // Fail, no account - Json::Value params; - params[jss::ledger_index] = jss::validated; - params[jss::permissioned_domain][jss::account] = ""; - params[jss::permissioned_domain][jss::seq] = seq; - auto const jrr = env.rpc("json", "ledger_entry", to_string(params)); - checkErrorValue(jrr[jss::result], "malformedAddress", ""); - } - - { - // Fail, invalid sequence - Json::Value params; - params[jss::ledger_index] = jss::validated; - params[jss::permissioned_domain][jss::account] = alice.human(); - params[jss::permissioned_domain][jss::seq] = "12g"; - auto const jrr = env.rpc("json", "ledger_entry", to_string(params)); - checkErrorValue(jrr[jss::result], "malformedRequest", ""); + // test basic malformed scenarios + runLedgerEntryTest( + env, + jss::permissioned_domain, + { + {jss::account, "malformedAddress"}, + {jss::seq, "malformedRequest"}, + }); } } void testLedgerEntryCLI() { - testcase("ledger_entry command-line"); + testcase("command-line"); using namespace test::jtx; Env env{*this}; @@ -2391,9 +2077,6 @@ public: testLedgerEntryMPT(); testLedgerEntryPermissionedDomain(); testLedgerEntryCLI(); - - forAllApiVersions(std::bind_front( - &LedgerEntry_test::testLedgerEntryInvalidParams, this)); } }; @@ -2444,7 +2127,6 @@ class LedgerEntry_XChain_test : public beast::unit_test::suite, BEAST_EXPECT(jrr.isMember(jss::node)); auto r = jrr[jss::node]; - // std::cout << to_string(r) << '\n'; BEAST_EXPECT(r.isMember(jss::Account)); BEAST_EXPECT(r[jss::Account] == mcDoor.human()); @@ -2486,7 +2168,7 @@ class LedgerEntry_XChain_test : public beast::unit_test::suite, Json::Value const jrr = mcEnv.rpc( "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "entryNotFound", ""); + checkErrorValue(jrr, "entryNotFound", "Entry not found."); } { // create two claim ids and verify that the bridge counter was @@ -2500,7 +2182,6 @@ class LedgerEntry_XChain_test : public beast::unit_test::suite, Json::Value jvParams; jvParams[jss::bridge_account] = mcDoor.human(); jvParams[jss::bridge] = jvb; - // std::cout << to_string(jvParams) << '\n'; Json::Value const jrr = mcEnv.rpc( "json", "ledger_entry", to_string(jvParams))[jss::result]; @@ -2536,13 +2217,11 @@ class LedgerEntry_XChain_test : public beast::unit_test::suite, jvParams[jss::xchain_owned_claim_id] = jvXRPBridgeRPC; jvParams[jss::xchain_owned_claim_id][jss::xchain_owned_claim_id] = 1; - // std::cout << to_string(jvParams) << '\n'; Json::Value const jrr = scEnv.rpc( "json", "ledger_entry", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr.isMember(jss::node)); auto r = jrr[jss::node]; - // std::cout << to_string(r) << '\n'; BEAST_EXPECT(r.isMember(jss::Account)); BEAST_EXPECT(r[jss::Account] == scAlice.human()); @@ -2563,7 +2242,6 @@ class LedgerEntry_XChain_test : public beast::unit_test::suite, BEAST_EXPECT(jrr.isMember(jss::node)); auto r = jrr[jss::node]; - // std::cout << to_string(r) << '\n'; BEAST_EXPECT(r.isMember(jss::Account)); BEAST_EXPECT(r[jss::Account] == scBob.human()); @@ -2622,10 +2300,8 @@ class LedgerEntry_XChain_test : public beast::unit_test::suite, jvXRPBridgeRPC; jvParams[jss::xchain_owned_create_account_claim_id] [jss::xchain_owned_create_account_claim_id] = 1; - // std::cout << to_string(jvParams) << '\n'; Json::Value const jrr = scEnv.rpc( "json", "ledger_entry", to_string(jvParams))[jss::result]; - // std::cout << to_string(jrr) << '\n'; BEAST_EXPECT(jrr.isMember(jss::node)); auto r = jrr[jss::node]; @@ -2694,10 +2370,9 @@ class LedgerEntry_XChain_test : public beast::unit_test::suite, jvXRPBridgeRPC; jvParams[jss::xchain_owned_create_account_claim_id] [jss::xchain_owned_create_account_claim_id] = 1; - // std::cout << to_string(jvParams) << '\n'; Json::Value const jrr = scEnv.rpc( "json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "entryNotFound", ""); + checkErrorValue(jrr, "entryNotFound", "Entry not found."); } } diff --git a/src/xrpld/rpc/detail/RPCHelpers.cpp b/src/xrpld/rpc/detail/RPCHelpers.cpp index b98f31340a..52a69eb79e 100644 --- a/src/xrpld/rpc/detail/RPCHelpers.cpp +++ b/src/xrpld/rpc/detail/RPCHelpers.cpp @@ -190,7 +190,7 @@ getAccountObjects( auto& jvObjects = (jvResult[jss::account_objects] = Json::arrayValue); - // this is a mutable version of limit, used to seemlessly switch + // this is a mutable version of limit, used to seamlessly switch // to iterating directory entries when nftokenpages are exhausted uint32_t mlimit = limit; @@ -373,7 +373,7 @@ ledgerFromRequest(T& ledger, JsonContext& context) indexValue = legacyLedger; } - if (hashValue) + if (!hashValue.isNull()) { if (!hashValue.isString()) return {rpcINVALID_PARAMS, "ledgerHashNotString"}; @@ -384,6 +384,9 @@ ledgerFromRequest(T& ledger, JsonContext& context) return getLedger(ledger, ledgerHash, context); } + if (!indexValue.isConvertibleTo(Json::stringValue)) + return {rpcINVALID_PARAMS, "ledgerIndexMalformed"}; + auto const index = indexValue.asString(); if (index == "current" || index.empty()) @@ -395,11 +398,11 @@ ledgerFromRequest(T& ledger, JsonContext& context) if (index == "closed") return getLedger(ledger, LedgerShortcut::CLOSED, context); - std::uint32_t iVal; - if (beast::lexicalCastChecked(iVal, index)) - return getLedger(ledger, iVal, context); + std::uint32_t val; + if (!beast::lexicalCastChecked(val, index)) + return {rpcINVALID_PARAMS, "ledgerIndexMalformed"}; - return {rpcINVALID_PARAMS, "ledgerIndexMalformed"}; + return getLedger(ledger, val, context); } } // namespace @@ -586,7 +589,7 @@ getLedger(T& ledger, LedgerShortcut shortcut, Context& context) return Status::OK; } -// Explicit instantiaion of above three functions +// Explicit instantiation of above three functions template Status getLedger<>(std::shared_ptr&, uint32_t, Context&); diff --git a/src/xrpld/rpc/handlers/LedgerEntry.cpp b/src/xrpld/rpc/handlers/LedgerEntry.cpp index fb82788907..61a7e2fb2c 100644 --- a/src/xrpld/rpc/handlers/LedgerEntry.cpp +++ b/src/xrpld/rpc/handlers/LedgerEntry.cpp @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -38,50 +39,57 @@ namespace ripple { -static std::optional -parseIndex(Json::Value const& params, Json::Value& jvResult) +static Expected +parseObjectID( + Json::Value const& params, + Json::StaticString const fieldName, + std::string const& expectedType = "hex string or object") { - uint256 uNodeIndex; - if (!uNodeIndex.parseHex(params.asString())) + if (auto const uNodeIndex = LedgerEntryHelpers::parse(params)) { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; + return *uNodeIndex; } - - return uNodeIndex; + return LedgerEntryHelpers::invalidFieldError( + "malformedRequest", fieldName, expectedType); } -static std::optional -parseAccountRoot(Json::Value const& params, Json::Value& jvResult) +static Expected +parseIndex(Json::Value const& params, Json::StaticString const fieldName) { - auto const account = parseBase58(params.asString()); - if (!account || account->isZero()) - { - jvResult[jss::error] = "malformedAddress"; - return std::nullopt; - } - - return keylet::account(*account).key; + return parseObjectID(params, fieldName, "hex string"); } -static std::optional -parseAMM(Json::Value const& params, Json::Value& jvResult) +static Expected +parseAccountRoot(Json::Value const& params, Json::StaticString const fieldName) +{ + if (auto const account = LedgerEntryHelpers::parse(params)) + { + return keylet::account(*account).key; + } + + return LedgerEntryHelpers::invalidFieldError( + "malformedAddress", fieldName, "AccountID"); +} + +static Expected +parseAmendments(Json::Value const& params, Json::StaticString const fieldName) +{ + return parseObjectID(params, fieldName, "hex string"); +} + +static Expected +parseAMM(Json::Value const& params, Json::StaticString const fieldName) { if (!params.isObject()) { - uint256 uNodeIndex; - if (!uNodeIndex.parseHex(params.asString())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - return uNodeIndex; + return parseObjectID(params, fieldName); } - if (!params.isMember(jss::asset) || !params.isMember(jss::asset2)) + if (auto const value = + LedgerEntryHelpers::hasRequired(params, {jss::asset, jss::asset2}); + !value) { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; + return Unexpected(value.error()); } try @@ -92,135 +100,136 @@ parseAMM(Json::Value const& params, Json::Value& jvResult) } catch (std::runtime_error const&) { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; + return LedgerEntryHelpers::malformedError("malformedRequest", ""); } } -static std::optional -parseBridge(Json::Value const& params, Json::Value& jvResult) +static Expected +parseBridge(Json::Value const& params, Json::StaticString const fieldName) { - // return the keylet for the specified bridge or nullopt if the - // request is malformed - auto const maybeKeylet = [&]() -> std::optional { - try - { - if (!params.isMember(jss::bridge_account)) - return std::nullopt; - - auto const& jsBridgeAccount = params[jss::bridge_account]; - if (!jsBridgeAccount.isString()) - { - return std::nullopt; - } - - auto const account = - parseBase58(jsBridgeAccount.asString()); - if (!account || account->isZero()) - { - return std::nullopt; - } - - // This may throw and is the reason for the `try` block. The - // try block has a larger scope so the `bridge` variable - // doesn't need to be an optional. - STXChainBridge const bridge(params[jss::bridge]); - STXChainBridge::ChainType const chainType = - STXChainBridge::srcChain(account == bridge.lockingChainDoor()); - - if (account != bridge.door(chainType)) - return std::nullopt; - - return keylet::bridge(bridge, chainType); - } - catch (...) - { - return std::nullopt; - } - }(); - - if (maybeKeylet) + if (!params.isMember(jss::bridge)) { - return maybeKeylet->key; + return Unexpected(LedgerEntryHelpers::missingFieldError(jss::bridge)); } - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; + if (params[jss::bridge].isString()) + { + return parseObjectID(params, fieldName); + } + + auto const bridge = + LedgerEntryHelpers::parseBridgeFields(params[jss::bridge]); + if (!bridge) + return Unexpected(bridge.error()); + + auto const account = LedgerEntryHelpers::requiredAccountID( + params, jss::bridge_account, "malformedBridgeAccount"); + if (!account) + return Unexpected(account.error()); + + STXChainBridge::ChainType const chainType = + STXChainBridge::srcChain(account.value() == bridge->lockingChainDoor()); + if (account.value() != bridge->door(chainType)) + return LedgerEntryHelpers::malformedError("malformedRequest", ""); + + return keylet::bridge(*bridge, chainType).key; } -static std::optional -parseCheck(Json::Value const& params, Json::Value& jvResult) +static Expected +parseCheck(Json::Value const& params, Json::StaticString const fieldName) { - uint256 uNodeIndex; - if (!uNodeIndex.parseHex(params.asString())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - - return uNodeIndex; + return parseObjectID(params, fieldName, "hex string"); } -static std::optional -parseCredential(Json::Value const& cred, Json::Value& jvResult) +static Expected +parseCredential(Json::Value const& cred, Json::StaticString const fieldName) { - if (cred.isString()) + if (!cred.isObject()) { - uint256 uNodeIndex; - if (!uNodeIndex.parseHex(cred.asString())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - return uNodeIndex; + return parseObjectID(cred, fieldName); } - if ((!cred.isMember(jss::subject) || !cred[jss::subject].isString()) || - (!cred.isMember(jss::issuer) || !cred[jss::issuer].isString()) || - (!cred.isMember(jss::credential_type) || - !cred[jss::credential_type].isString())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } + auto const subject = LedgerEntryHelpers::requiredAccountID( + cred, jss::subject, "malformedRequest"); + if (!subject) + return Unexpected(subject.error()); - auto const subject = parseBase58(cred[jss::subject].asString()); - auto const issuer = parseBase58(cred[jss::issuer].asString()); - auto const credType = strUnHex(cred[jss::credential_type].asString()); + auto const issuer = LedgerEntryHelpers::requiredAccountID( + cred, jss::issuer, "malformedRequest"); + if (!issuer) + return Unexpected(issuer.error()); - if (!subject || subject->isZero() || !issuer || issuer->isZero() || - !credType || credType->empty()) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } + auto const credType = LedgerEntryHelpers::requiredHexBlob( + cred, + jss::credential_type, + maxCredentialTypeLength, + "malformedRequest"); + if (!credType) + return Unexpected(credType.error()); return keylet::credential( *subject, *issuer, Slice(credType->data(), credType->size())) .key; } -static STArray +static Expected +parseDelegate(Json::Value const& params, Json::StaticString const fieldName) +{ + if (!params.isObject()) + { + return parseObjectID(params, fieldName); + } + + auto const account = LedgerEntryHelpers::requiredAccountID( + params, jss::account, "malformedAddress"); + if (!account) + return Unexpected(account.error()); + + auto const authorize = LedgerEntryHelpers::requiredAccountID( + params, jss::authorize, "malformedAddress"); + if (!authorize) + return Unexpected(authorize.error()); + + return keylet::delegate(*account, *authorize).key; +} + +static Expected parseAuthorizeCredentials(Json::Value const& jv) { + if (!jv.isArray()) + return LedgerEntryHelpers::invalidFieldError( + "malformedAuthorizedCredentials", + jss::authorized_credentials, + "array"); STArray arr(sfAuthorizeCredentials, jv.size()); for (auto const& jo : jv) { - if (!jo.isObject() || // - !jo.isMember(jss::issuer) || !jo[jss::issuer].isString() || - !jo.isMember(jss::credential_type) || - !jo[jss::credential_type].isString()) - return {}; + if (!jo.isObject()) + return LedgerEntryHelpers::invalidFieldError( + "malformedAuthorizedCredentials", + jss::authorized_credentials, + "array"); + if (auto const value = LedgerEntryHelpers::hasRequired( + jo, + {jss::issuer, jss::credential_type}, + "malformedAuthorizedCredentials"); + !value) + { + return Unexpected(value.error()); + } - auto const issuer = parseBase58(jo[jss::issuer].asString()); - if (!issuer || !*issuer) - return {}; + auto const issuer = LedgerEntryHelpers::requiredAccountID( + jo, jss::issuer, "malformedAuthorizedCredentials"); + if (!issuer) + return Unexpected(issuer.error()); - auto const credentialType = - strUnHex(jo[jss::credential_type].asString()); - if (!credentialType || credentialType->empty() || - credentialType->size() > maxCredentialTypeLength) - return {}; + auto const credentialType = LedgerEntryHelpers::requiredHexBlob( + jo, + jss::credential_type, + maxCredentialTypeLength, + "malformedAuthorizedCredentials"); + if (!credentialType) + return Unexpected(credentialType.error()); auto credential = STObject::makeInnerObject(sfCredential); credential.setAccountID(sfIssuer, *issuer); @@ -231,703 +240,450 @@ parseAuthorizeCredentials(Json::Value const& jv) return arr; } -static std::optional -parseDelegate(Json::Value const& params, Json::Value& jvResult) -{ - if (!params.isObject()) - { - uint256 uNodeIndex; - if (!params.isString() || !uNodeIndex.parseHex(params.asString())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - return uNodeIndex; - } - if (!params.isMember(jss::account) || !params.isMember(jss::authorize)) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - if (!params[jss::account].isString() || !params[jss::authorize].isString()) - { - jvResult[jss::error] = "malformedAddress"; - return std::nullopt; - } - auto const account = - parseBase58(params[jss::account].asString()); - if (!account) - { - jvResult[jss::error] = "malformedAddress"; - return std::nullopt; - } - auto const authorize = - parseBase58(params[jss::authorize].asString()); - if (!authorize) - { - jvResult[jss::error] = "malformedAddress"; - return std::nullopt; - } - return keylet::delegate(*account, *authorize).key; -} - -static std::optional -parseDepositPreauth(Json::Value const& dp, Json::Value& jvResult) +static Expected +parseDepositPreauth(Json::Value const& dp, Json::StaticString const fieldName) { if (!dp.isObject()) { - uint256 uNodeIndex; - if (!dp.isString() || !uNodeIndex.parseHex(dp.asString())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - return uNodeIndex; + return parseObjectID(dp, fieldName); } - // clang-format off - if ( - (!dp.isMember(jss::owner) || !dp[jss::owner].isString()) || - (dp.isMember(jss::authorized) == dp.isMember(jss::authorized_credentials)) || - (dp.isMember(jss::authorized) && !dp[jss::authorized].isString()) || - (dp.isMember(jss::authorized_credentials) && !dp[jss::authorized_credentials].isArray()) - ) - // clang-format on + if ((dp.isMember(jss::authorized) == + dp.isMember(jss::authorized_credentials))) { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; + return LedgerEntryHelpers::malformedError( + "malformedRequest", + "Must have exactly one of `authorized` and " + "`authorized_credentials`."); } - auto const owner = parseBase58(dp[jss::owner].asString()); + auto const owner = + LedgerEntryHelpers::requiredAccountID(dp, jss::owner, "malformedOwner"); if (!owner) { - jvResult[jss::error] = "malformedOwner"; - return std::nullopt; + return Unexpected(owner.error()); } if (dp.isMember(jss::authorized)) { - auto const authorized = - parseBase58(dp[jss::authorized].asString()); - if (!authorized) + if (auto const authorized = + LedgerEntryHelpers::parse(dp[jss::authorized])) { - jvResult[jss::error] = "malformedAuthorized"; - return std::nullopt; + return keylet::depositPreauth(*owner, *authorized).key; } - return keylet::depositPreauth(*owner, *authorized).key; + return LedgerEntryHelpers::invalidFieldError( + "malformedAuthorized", jss::authorized, "AccountID"); } auto const& ac(dp[jss::authorized_credentials]); - STArray const arr = parseAuthorizeCredentials(ac); - - if (arr.empty() || (arr.size() > maxCredentialsArraySize)) + auto const arr = parseAuthorizeCredentials(ac); + if (!arr.has_value()) + return Unexpected(arr.error()); + if (arr->empty() || (arr->size() > maxCredentialsArraySize)) { - jvResult[jss::error] = "malformedAuthorizedCredentials"; - return std::nullopt; + return LedgerEntryHelpers::invalidFieldError( + "malformedAuthorizedCredentials", + jss::authorized_credentials, + "array"); } - auto const& sorted = credentials::makeSorted(arr); + auto const& sorted = credentials::makeSorted(arr.value()); if (sorted.empty()) { - jvResult[jss::error] = "malformedAuthorizedCredentials"; - return std::nullopt; + // TODO: this error message is bad/inaccurate + return LedgerEntryHelpers::invalidFieldError( + "malformedAuthorizedCredentials", + jss::authorized_credentials, + "array"); } - return keylet::depositPreauth(*owner, sorted).key; + return keylet::depositPreauth(*owner, std::move(sorted)).key; } -static std::optional -parseDID(Json::Value const& params, Json::Value& jvResult) +static Expected +parseDID(Json::Value const& params, Json::StaticString const fieldName) { - auto const account = parseBase58(params.asString()); - if (!account || account->isZero()) + auto const account = LedgerEntryHelpers::parse(params); + if (!account) { - jvResult[jss::error] = "malformedAddress"; - return std::nullopt; + return LedgerEntryHelpers::invalidFieldError( + "malformedAddress", fieldName, "AccountID"); } return keylet::did(*account).key; } -static std::optional -parseDirectory(Json::Value const& params, Json::Value& jvResult) +static Expected +parseDirectoryNode( + Json::Value const& params, + Json::StaticString const fieldName) { - if (params.isNull()) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - if (!params.isObject()) { - uint256 uNodeIndex; - if (!uNodeIndex.parseHex(params.asString())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - return uNodeIndex; + return parseObjectID(params, fieldName); } - if (params.isMember(jss::sub_index) && !params[jss::sub_index].isIntegral()) + if (params.isMember(jss::sub_index) && + (!params[jss::sub_index].isConvertibleTo(Json::uintValue) || + params[jss::sub_index].isBool())) { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; + return LedgerEntryHelpers::invalidFieldError( + "malformedRequest", jss::sub_index, "number"); } - std::uint64_t uSubIndex = - params.isMember(jss::sub_index) ? params[jss::sub_index].asUInt() : 0; + if (params.isMember(jss::owner) == params.isMember(jss::dir_root)) + { + return LedgerEntryHelpers::malformedError( + "malformedRequest", + "Must have exactly one of `owner` and `dir_root` fields."); + } + + std::uint64_t uSubIndex = params.get(jss::sub_index, 0).asUInt(); if (params.isMember(jss::dir_root)) { - uint256 uDirRoot; - - if (params.isMember(jss::owner)) + if (auto const uDirRoot = + LedgerEntryHelpers::parse(params[jss::dir_root])) { - // May not specify both dir_root and owner. - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; + return keylet::page(*uDirRoot, uSubIndex).key; } - if (!uDirRoot.parseHex(params[jss::dir_root].asString())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - return keylet::page(uDirRoot, uSubIndex).key; + return LedgerEntryHelpers::invalidFieldError( + "malformedDirRoot", jss::dir_root, "hash"); } if (params.isMember(jss::owner)) { auto const ownerID = - parseBase58(params[jss::owner].asString()); - + LedgerEntryHelpers::parse(params[jss::owner]); if (!ownerID) { - jvResult[jss::error] = "malformedAddress"; - return std::nullopt; + return LedgerEntryHelpers::invalidFieldError( + "malformedAddress", jss::owner, "AccountID"); } return keylet::page(keylet::ownerDir(*ownerID), uSubIndex).key; } - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; + return LedgerEntryHelpers::malformedError("malformedRequest", ""); } -static std::optional -parseEscrow(Json::Value const& params, Json::Value& jvResult) +static Expected +parseEscrow(Json::Value const& params, Json::StaticString const fieldName) { if (!params.isObject()) { - uint256 uNodeIndex; - if (!uNodeIndex.parseHex(params.asString())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - - return uNodeIndex; + return parseObjectID(params, fieldName); } - if (!params.isMember(jss::owner) || !params.isMember(jss::seq) || - !params[jss::seq].isIntegral()) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - - auto const id = parseBase58(params[jss::owner].asString()); - + auto const id = LedgerEntryHelpers::requiredAccountID( + params, jss::owner, "malformedOwner"); if (!id) - { - jvResult[jss::error] = "malformedOwner"; - return std::nullopt; - } + return Unexpected(id.error()); + auto const seq = + LedgerEntryHelpers::requiredUInt32(params, jss::seq, "malformedSeq"); + if (!seq) + return Unexpected(seq.error()); - return keylet::escrow(*id, params[jss::seq].asUInt()).key; + return keylet::escrow(*id, *seq).key; } -static std::optional -parseMPToken(Json::Value const& mptJson, Json::Value& jvResult) +static Expected +parseFeeSettings(Json::Value const& params, Json::StaticString const fieldName) { - if (!mptJson.isObject()) - { - uint256 uNodeIndex; - if (!uNodeIndex.parseHex(mptJson.asString())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - return uNodeIndex; - } - - if (!mptJson.isMember(jss::mpt_issuance_id) || - !mptJson.isMember(jss::account)) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - - try - { - auto const mptIssuanceIdStr = mptJson[jss::mpt_issuance_id].asString(); - - uint192 mptIssuanceID; - if (!mptIssuanceID.parseHex(mptIssuanceIdStr)) - Throw("Cannot parse mpt_issuance_id"); - - auto const account = - parseBase58(mptJson[jss::account].asString()); - - if (!account || account->isZero()) - { - jvResult[jss::error] = "malformedAddress"; - return std::nullopt; - } - - return keylet::mptoken(mptIssuanceID, *account).key; - } - catch (std::runtime_error const&) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } + return parseObjectID(params, fieldName, "hex string"); } -static std::optional +static Expected +parseLedgerHashes(Json::Value const& params, Json::StaticString const fieldName) +{ + return parseObjectID(params, fieldName, "hex string"); +} + +static Expected +parseMPToken(Json::Value const& params, Json::StaticString const fieldName) +{ + if (!params.isObject()) + { + return parseObjectID(params, fieldName); + } + + auto const mptIssuanceID = LedgerEntryHelpers::requiredUInt192( + params, jss::mpt_issuance_id, "malformedMPTIssuanceID"); + if (!mptIssuanceID) + return Unexpected(mptIssuanceID.error()); + + auto const account = LedgerEntryHelpers::requiredAccountID( + params, jss::account, "malformedAccount"); + if (!account) + return Unexpected(account.error()); + + return keylet::mptoken(*mptIssuanceID, *account).key; +} + +static Expected parseMPTokenIssuance( - Json::Value const& unparsedMPTIssuanceID, - Json::Value& jvResult) + Json::Value const& params, + Json::StaticString const fieldName) { - if (unparsedMPTIssuanceID.isString()) - { - uint192 mptIssuanceID; - if (!mptIssuanceID.parseHex(unparsedMPTIssuanceID.asString())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } + auto const mptIssuanceID = LedgerEntryHelpers::parse(params); + if (!mptIssuanceID) + return LedgerEntryHelpers::invalidFieldError( + "malformedMPTokenIssuance", fieldName, "Hash192"); - return keylet::mptIssuance(mptIssuanceID).key; - } - - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; + return keylet::mptIssuance(*mptIssuanceID).key; } -static std::optional -parseNFTokenPage(Json::Value const& params, Json::Value& jvResult) +static Expected +parseNFTokenOffer(Json::Value const& params, Json::StaticString const fieldName) { - if (params.isString()) - { - uint256 uNodeIndex; - if (!uNodeIndex.parseHex(params.asString())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - return uNodeIndex; - } - - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; + return parseObjectID(params, fieldName, "hex string"); } -static std::optional -parseOffer(Json::Value const& params, Json::Value& jvResult) +static Expected +parseNFTokenPage(Json::Value const& params, Json::StaticString const fieldName) +{ + return parseObjectID(params, fieldName, "hex string"); +} + +static Expected +parseNegativeUNL(Json::Value const& params, Json::StaticString const fieldName) +{ + return parseObjectID(params, fieldName, "hex string"); +} + +static Expected +parseOffer(Json::Value const& params, Json::StaticString const fieldName) { if (!params.isObject()) { - uint256 uNodeIndex; - if (!uNodeIndex.parseHex(params.asString())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - return uNodeIndex; + return parseObjectID(params, fieldName); } - if (!params.isMember(jss::account) || !params.isMember(jss::seq) || - !params[jss::seq].isIntegral()) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - - auto const id = parseBase58(params[jss::account].asString()); + auto const id = LedgerEntryHelpers::requiredAccountID( + params, jss::account, "malformedAddress"); if (!id) - { - jvResult[jss::error] = "malformedAddress"; - return std::nullopt; - } + return Unexpected(id.error()); - return keylet::offer(*id, params[jss::seq].asUInt()).key; + auto const seq = LedgerEntryHelpers::requiredUInt32( + params, jss::seq, "malformedRequest"); + if (!seq) + return Unexpected(seq.error()); + + return keylet::offer(*id, *seq).key; } -static std::optional -parseOracle(Json::Value const& params, Json::Value& jvResult) +static Expected +parseOracle(Json::Value const& params, Json::StaticString const fieldName) { if (!params.isObject()) { - uint256 uNodeIndex; - if (!uNodeIndex.parseHex(params.asString())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - return uNodeIndex; + return parseObjectID(params, fieldName); } - if (!params.isMember(jss::oracle_document_id) || - !params.isMember(jss::account)) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } + auto const id = LedgerEntryHelpers::requiredAccountID( + params, jss::account, "malformedAccount"); + if (!id) + return Unexpected(id.error()); - auto const& oracle = params; - auto const documentID = [&]() -> std::optional { - auto const id = oracle[jss::oracle_document_id]; - if (id.isUInt() || (id.isInt() && id.asInt() >= 0)) - return std::make_optional(id.asUInt()); + auto const seq = LedgerEntryHelpers::requiredUInt32( + params, jss::oracle_document_id, "malformedDocumentID"); + if (!seq) + return Unexpected(seq.error()); - if (id.isString()) - { - std::uint32_t v; - if (beast::lexicalCastChecked(v, id.asString())) - return std::make_optional(v); - } - - return std::nullopt; - }(); - - auto const account = - parseBase58(oracle[jss::account].asString()); - if (!account || account->isZero()) - { - jvResult[jss::error] = "malformedAddress"; - return std::nullopt; - } - - if (!documentID) - { - jvResult[jss::error] = "malformedDocumentID"; - return std::nullopt; - } - - return keylet::oracle(*account, *documentID).key; + return keylet::oracle(*id, *seq).key; } -static std::optional -parsePaymentChannel(Json::Value const& params, Json::Value& jvResult) +static Expected +parsePayChannel(Json::Value const& params, Json::StaticString const fieldName) { - uint256 uNodeIndex; - if (!uNodeIndex.parseHex(params.asString())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - - return uNodeIndex; + return parseObjectID(params, fieldName, "hex string"); } -static std::optional -parsePermissionedDomains(Json::Value const& pd, Json::Value& jvResult) +static Expected +parsePermissionedDomain( + Json::Value const& pd, + Json::StaticString const fieldName) { if (pd.isString()) { - auto const index = parseIndex(pd, jvResult); - return index; + return parseObjectID(pd, fieldName); } if (!pd.isObject()) { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; + return LedgerEntryHelpers::invalidFieldError( + "malformedRequest", fieldName, "hex string or object"); } - if (!pd.isMember(jss::account)) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - - if (!pd[jss::account].isString()) - { - jvResult[jss::error] = "malformedAddress"; - return std::nullopt; - } - - if (!pd.isMember(jss::seq) || - (pd[jss::seq].isInt() && pd[jss::seq].asInt() < 0) || - (!pd[jss::seq].isInt() && !pd[jss::seq].isUInt())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - - auto const account = parseBase58(pd[jss::account].asString()); + auto const account = LedgerEntryHelpers::requiredAccountID( + pd, jss::account, "malformedAddress"); if (!account) - { - jvResult[jss::error] = "malformedAddress"; - return std::nullopt; - } + return Unexpected(account.error()); + + auto const seq = + LedgerEntryHelpers::requiredUInt32(pd, jss::seq, "malformedRequest"); + if (!seq) + return Unexpected(seq.error()); return keylet::permissionedDomain(*account, pd[jss::seq].asUInt()).key; } -static std::optional -parseRippleState(Json::Value const& jvRippleState, Json::Value& jvResult) +static Expected +parseRippleState( + Json::Value const& jvRippleState, + Json::StaticString const fieldName) { Currency uCurrency; - if (!jvRippleState.isObject() || !jvRippleState.isMember(jss::currency) || - !jvRippleState.isMember(jss::accounts) || - !jvRippleState[jss::accounts].isArray() || - 2 != jvRippleState[jss::accounts].size() || - !jvRippleState[jss::accounts][0u].isString() || - !jvRippleState[jss::accounts][1u].isString() || - (jvRippleState[jss::accounts][0u].asString() == - jvRippleState[jss::accounts][1u].asString())) + if (!jvRippleState.isObject()) { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; + return parseObjectID(jvRippleState, fieldName); + } + + if (auto const value = LedgerEntryHelpers::hasRequired( + jvRippleState, {jss::currency, jss::accounts}); + !value) + { + return Unexpected(value.error()); + } + + if (!jvRippleState[jss::accounts].isArray() || + jvRippleState[jss::accounts].size() != 2) + { + return LedgerEntryHelpers::invalidFieldError( + "malformedRequest", jss::accounts, "length-2 array of Accounts"); } auto const id1 = - parseBase58(jvRippleState[jss::accounts][0u].asString()); + LedgerEntryHelpers::parse(jvRippleState[jss::accounts][0u]); auto const id2 = - parseBase58(jvRippleState[jss::accounts][1u].asString()); + LedgerEntryHelpers::parse(jvRippleState[jss::accounts][1u]); if (!id1 || !id2) { - jvResult[jss::error] = "malformedAddress"; - return std::nullopt; + return LedgerEntryHelpers::invalidFieldError( + "malformedAddress", jss::accounts, "array of Accounts"); + } + if (id1 == id2) + { + return LedgerEntryHelpers::malformedError( + "malformedRequest", "Cannot have a trustline to self."); } - if (!to_currency(uCurrency, jvRippleState[jss::currency].asString())) + if (!jvRippleState[jss::currency].isString() || + jvRippleState[jss::currency] == "" || + !to_currency(uCurrency, jvRippleState[jss::currency].asString())) { - jvResult[jss::error] = "malformedCurrency"; - return std::nullopt; + return LedgerEntryHelpers::invalidFieldError( + "malformedCurrency", jss::currency, "Currency"); } return keylet::line(*id1, *id2, uCurrency).key; } -static std::optional -parseTicket(Json::Value const& params, Json::Value& jvResult) +static Expected +parseSignerList(Json::Value const& params, Json::StaticString const fieldName) +{ + return parseObjectID(params, fieldName, "hex string"); +} + +static Expected +parseTicket(Json::Value const& params, Json::StaticString const fieldName) { if (!params.isObject()) { - uint256 uNodeIndex; - if (!uNodeIndex.parseHex(params.asString())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - return uNodeIndex; + return parseObjectID(params, fieldName); } - if (!params.isMember(jss::account) || !params.isMember(jss::ticket_seq) || - !params[jss::ticket_seq].isIntegral()) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - - auto const id = parseBase58(params[jss::account].asString()); + auto const id = LedgerEntryHelpers::requiredAccountID( + params, jss::account, "malformedAddress"); if (!id) - { - jvResult[jss::error] = "malformedAddress"; - return std::nullopt; - } + return Unexpected(id.error()); - return getTicketIndex(*id, params[jss::ticket_seq].asUInt()); + auto const seq = LedgerEntryHelpers::requiredUInt32( + params, jss::ticket_seq, "malformedRequest"); + if (!seq) + return Unexpected(seq.error()); + + return getTicketIndex(*id, *seq); } -static std::optional -parseVault(Json::Value const& params, Json::Value& jvResult) +static Expected +parseVault(Json::Value const& params, Json::StaticString const fieldName) { if (!params.isObject()) { - uint256 uNodeIndex; - if (!uNodeIndex.parseHex(params.asString())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - return uNodeIndex; + return parseObjectID(params, fieldName); } - if (!params.isMember(jss::owner) || !params.isMember(jss::seq) || - !(params[jss::seq].isInt() || params[jss::seq].isUInt()) || - params[jss::seq].asDouble() <= 0.0 || - params[jss::seq].asDouble() > double(Json::Value::maxUInt)) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - - auto const id = parseBase58(params[jss::owner].asString()); + auto const id = LedgerEntryHelpers::requiredAccountID( + params, jss::owner, "malformedOwner"); if (!id) - { - jvResult[jss::error] = "malformedOwner"; - return std::nullopt; - } + return Unexpected(id.error()); - return keylet::vault(*id, params[jss::seq].asUInt()).key; + auto const seq = LedgerEntryHelpers::requiredUInt32( + params, jss::seq, "malformedRequest"); + if (!seq) + return Unexpected(seq.error()); + + return keylet::vault(*id, *seq).key; } -static std::optional -parseXChainOwnedClaimID(Json::Value const& claim_id, Json::Value& jvResult) +static Expected +parseXChainOwnedClaimID( + Json::Value const& claim_id, + Json::StaticString const fieldName) { - if (claim_id.isString()) + if (!claim_id.isObject()) { - uint256 uNodeIndex; - // we accept a node id as specifier of a xchain claim id - if (!uNodeIndex.parseHex(claim_id.asString())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - return uNodeIndex; + return parseObjectID(claim_id, fieldName); } - if (!claim_id.isObject() || - !(claim_id.isMember(sfIssuingChainDoor.getJsonName()) && - claim_id[sfIssuingChainDoor.getJsonName()].isString()) || - !(claim_id.isMember(sfLockingChainDoor.getJsonName()) && - claim_id[sfLockingChainDoor.getJsonName()].isString()) || - !claim_id.isMember(sfIssuingChainIssue.getJsonName()) || - !claim_id.isMember(sfLockingChainIssue.getJsonName()) || - !claim_id.isMember(jss::xchain_owned_claim_id)) + auto const bridge_spec = LedgerEntryHelpers::parseBridgeFields(claim_id); + if (!bridge_spec) + return Unexpected(bridge_spec.error()); + + auto const seq = LedgerEntryHelpers::requiredUInt32( + claim_id, jss::xchain_owned_claim_id, "malformedXChainOwnedClaimID"); + if (!seq) { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; + return Unexpected(seq.error()); } - // if not specified with a node id, a claim_id is specified by - // four strings defining the bridge (locking_chain_door, - // locking_chain_issue, issuing_chain_door, issuing_chain_issue) - // and the claim id sequence number. - auto const lockingChainDoor = parseBase58( - claim_id[sfLockingChainDoor.getJsonName()].asString()); - auto const issuingChainDoor = parseBase58( - claim_id[sfIssuingChainDoor.getJsonName()].asString()); - Issue lockingChainIssue, issuingChainIssue; - bool valid = lockingChainDoor && issuingChainDoor; - - if (valid) - { - try - { - lockingChainIssue = - issueFromJson(claim_id[sfLockingChainIssue.getJsonName()]); - issuingChainIssue = - issueFromJson(claim_id[sfIssuingChainIssue.getJsonName()]); - } - catch (std::runtime_error const& ex) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - } - - if (valid && claim_id[jss::xchain_owned_claim_id].isIntegral()) - { - auto const seq = claim_id[jss::xchain_owned_claim_id].asUInt(); - - STXChainBridge bridge_spec( - *lockingChainDoor, - lockingChainIssue, - *issuingChainDoor, - issuingChainIssue); - Keylet keylet = keylet::xChainClaimID(bridge_spec, seq); - return keylet.key; - } - - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; + Keylet keylet = keylet::xChainClaimID(*bridge_spec, *seq); + return keylet.key; } -static std::optional +static Expected parseXChainOwnedCreateAccountClaimID( Json::Value const& claim_id, - Json::Value& jvResult) + Json::StaticString const fieldName) { - if (claim_id.isString()) + if (!claim_id.isObject()) { - uint256 uNodeIndex; - // we accept a node id as specifier of a xchain create account - // claim_id - if (!uNodeIndex.parseHex(claim_id.asString())) - { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; - } - return uNodeIndex; + return parseObjectID(claim_id, fieldName); } - if (!claim_id.isObject() || - !(claim_id.isMember(sfIssuingChainDoor.getJsonName()) && - claim_id[sfIssuingChainDoor.getJsonName()].isString()) || - !(claim_id.isMember(sfLockingChainDoor.getJsonName()) && - claim_id[sfLockingChainDoor.getJsonName()].isString()) || - !claim_id.isMember(sfIssuingChainIssue.getJsonName()) || - !claim_id.isMember(sfLockingChainIssue.getJsonName()) || - !claim_id.isMember(jss::xchain_owned_create_account_claim_id)) + auto const bridge_spec = LedgerEntryHelpers::parseBridgeFields(claim_id); + if (!bridge_spec) + return Unexpected(bridge_spec.error()); + + auto const seq = LedgerEntryHelpers::requiredUInt32( + claim_id, + jss::xchain_owned_create_account_claim_id, + "malformedXChainOwnedCreateAccountClaimID"); + if (!seq) { - jvResult[jss::error] = "malformedRequest"; - return std::nullopt; + return Unexpected(seq.error()); } - // if not specified with a node id, a create account claim_id is - // specified by four strings defining the bridge - // (locking_chain_door, locking_chain_issue, issuing_chain_door, - // issuing_chain_issue) and the create account claim id sequence - // number. - auto const lockingChainDoor = parseBase58( - claim_id[sfLockingChainDoor.getJsonName()].asString()); - auto const issuingChainDoor = parseBase58( - claim_id[sfIssuingChainDoor.getJsonName()].asString()); - Issue lockingChainIssue, issuingChainIssue; - bool valid = lockingChainDoor && issuingChainDoor; - if (valid) - { - try - { - lockingChainIssue = - issueFromJson(claim_id[sfLockingChainIssue.getJsonName()]); - issuingChainIssue = - issueFromJson(claim_id[sfIssuingChainIssue.getJsonName()]); - } - catch (std::runtime_error const& ex) - { - valid = false; - jvResult[jss::error] = "malformedRequest"; - } - } - - if (valid && - claim_id[jss::xchain_owned_create_account_claim_id].isIntegral()) - { - auto const seq = - claim_id[jss::xchain_owned_create_account_claim_id].asUInt(); - - STXChainBridge bridge_spec( - *lockingChainDoor, - lockingChainIssue, - *issuingChainDoor, - issuingChainIssue); - Keylet keylet = keylet::xChainCreateAccountClaimID(bridge_spec, seq); - return keylet.key; - } - - return std::nullopt; + Keylet keylet = keylet::xChainCreateAccountClaimID(*bridge_spec, *seq); + return keylet.key; } -using FunctionType = - std::function(Json::Value const&, Json::Value&)>; +using FunctionType = Expected (*)( + Json::Value const&, + Json::StaticString const); struct LedgerEntry { @@ -944,50 +700,49 @@ struct LedgerEntry Json::Value doLedgerEntry(RPC::JsonContext& context) { + static auto ledgerEntryParsers = std::to_array({ +#pragma push_macro("LEDGER_ENTRY") +#undef LEDGER_ENTRY + +#define LEDGER_ENTRY(tag, value, name, rpcName, fields) \ + {jss::rpcName, parse##name, tag}, + +#include + +#undef LEDGER_ENTRY +#pragma pop_macro("LEDGER_ENTRY") + {jss::index, parseIndex, ltANY}, + // aliases + {jss::account_root, parseAccountRoot, ltACCOUNT_ROOT}, + {jss::ripple_state, parseRippleState, ltRIPPLE_STATE}, + }); + + auto hasMoreThanOneMember = [&]() { + int count = 0; + + for (auto const& ledgerEntry : ledgerEntryParsers) + { + if (context.params.isMember(ledgerEntry.fieldName)) + { + count++; + if (count > 1) // Early exit if more than one is found + return true; + } + } + return false; // Return false if <= 1 is found + }(); + + if (hasMoreThanOneMember) + { + return RPC::make_param_error("Too many fields provided."); + } + std::shared_ptr lpLedger; auto jvResult = RPC::lookupLedger(lpLedger, context); if (!lpLedger) return jvResult; - static auto ledgerEntryParsers = std::to_array({ - {jss::index, parseIndex, ltANY}, - {jss::account_root, parseAccountRoot, ltACCOUNT_ROOT}, - // TODO: add amendments - {jss::amm, parseAMM, ltAMM}, - {jss::bridge, parseBridge, ltBRIDGE}, - {jss::check, parseCheck, ltCHECK}, - {jss::credential, parseCredential, ltCREDENTIAL}, - {jss::delegate, parseDelegate, ltDELEGATE}, - {jss::deposit_preauth, parseDepositPreauth, ltDEPOSIT_PREAUTH}, - {jss::did, parseDID, ltDID}, - {jss::directory, parseDirectory, ltDIR_NODE}, - {jss::escrow, parseEscrow, ltESCROW}, - // TODO: add fee, hashes - {jss::mpt_issuance, parseMPTokenIssuance, ltMPTOKEN_ISSUANCE}, - {jss::mptoken, parseMPToken, ltMPTOKEN}, - // TODO: add NFT Offers - {jss::nft_page, parseNFTokenPage, ltNFTOKEN_PAGE}, - // TODO: add NegativeUNL - {jss::offer, parseOffer, ltOFFER}, - {jss::oracle, parseOracle, ltORACLE}, - {jss::payment_channel, parsePaymentChannel, ltPAYCHAN}, - {jss::permissioned_domain, - parsePermissionedDomains, - ltPERMISSIONED_DOMAIN}, - {jss::ripple_state, parseRippleState, ltRIPPLE_STATE}, - // This is an alias, since the `ledger_data` filter uses jss::state - {jss::state, parseRippleState, ltRIPPLE_STATE}, - {jss::ticket, parseTicket, ltTICKET}, - {jss::xchain_owned_claim_id, - parseXChainOwnedClaimID, - ltXCHAIN_OWNED_CLAIM_ID}, - {jss::xchain_owned_create_account_claim_id, - parseXChainOwnedCreateAccountClaimID, - ltXCHAIN_OWNED_CREATE_ACCOUNT_CLAIM_ID}, - {jss::vault, parseVault, ltVAULT}, - }); - uint256 uNodeIndex; LedgerEntryType expectedType = ltANY; @@ -1006,34 +761,33 @@ doLedgerEntry(RPC::JsonContext& context) Json::Value const& params = ledgerEntry.fieldName == jss::bridge ? context.params : context.params[ledgerEntry.fieldName]; - uNodeIndex = ledgerEntry.parseFunction(params, jvResult) - .value_or(beast::zero); - if (jvResult.isMember(jss::error)) - { - return jvResult; - } + auto const result = + ledgerEntry.parseFunction(params, ledgerEntry.fieldName); + if (!result) + return result.error(); + + uNodeIndex = result.value(); found = true; break; } } - if (!found) { if (context.apiVersion < 2u) + { jvResult[jss::error] = "unknownOption"; - else - jvResult[jss::error] = "invalidParams"; - return jvResult; + return jvResult; + } + return RPC::make_param_error("No ledger_entry params provided."); } } catch (Json::error& e) { if (context.apiVersion > 1u) { - // For apiVersion 2 onwards, any parsing failures that throw this - // exception return an invalidParam error. - jvResult[jss::error] = "invalidParams"; - return jvResult; + // For apiVersion 2 onwards, any parsing failures that throw + // this exception return an invalidParam error. + return RPC::make_error(rpcINVALID_PARAMS); } else throw; @@ -1041,8 +795,7 @@ doLedgerEntry(RPC::JsonContext& context) if (uNodeIndex.isZero()) { - jvResult[jss::error] = "entryNotFound"; - return jvResult; + return RPC::make_error(rpcENTRY_NOT_FOUND); } auto const sleNode = lpLedger->read(keylet::unchecked(uNodeIndex)); @@ -1054,14 +807,12 @@ doLedgerEntry(RPC::JsonContext& context) if (!sleNode) { // Not found. - jvResult[jss::error] = "entryNotFound"; - return jvResult; + return RPC::make_error(rpcENTRY_NOT_FOUND); } if ((expectedType != ltANY) && (expectedType != sleNode->getType())) { - jvResult[jss::error] = "unexpectedLedgerType"; - return jvResult; + return RPC::make_error(rpcUNEXPECTED_LEDGER_TYPE); } if (bNodeBinary) @@ -1091,7 +842,7 @@ doLedgerEntryGrpc( grpc::Status status = grpc::Status::OK; std::shared_ptr ledger; - if (auto const status = RPC::ledgerFromRequest(ledger, context)) + if (auto status = RPC::ledgerFromRequest(ledger, context)) { grpc::Status errorStatus; if (status.toErrorCode() == rpcINVALID_PARAMS) diff --git a/src/xrpld/rpc/handlers/LedgerEntryHelpers.h b/src/xrpld/rpc/handlers/LedgerEntryHelpers.h new file mode 100644 index 0000000000..12b99dbbff --- /dev/null +++ b/src/xrpld/rpc/handlers/LedgerEntryHelpers.h @@ -0,0 +1,299 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2012-2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace ripple { + +namespace LedgerEntryHelpers { + +Unexpected +missingFieldError( + Json::StaticString const field, + std::optional err = std::nullopt) +{ + Json::Value json = Json::objectValue; + auto error = RPC::missing_field_message(std::string(field.c_str())); + json[jss::error] = err.value_or("malformedRequest"); + json[jss::error_code] = rpcINVALID_PARAMS; + json[jss::error_message] = std::move(error); + return Unexpected(json); +} + +Unexpected +invalidFieldError( + std::string const& err, + Json::StaticString const field, + std::string const& type) +{ + Json::Value json = Json::objectValue; + auto error = RPC::expected_field_message(field, type); + json[jss::error] = err; + json[jss::error_code] = rpcINVALID_PARAMS; + json[jss::error_message] = std::move(error); + return Unexpected(json); +} + +Unexpected +malformedError(std::string const& err, std::string const& message) +{ + Json::Value json = Json::objectValue; + json[jss::error] = err; + json[jss::error_code] = rpcINVALID_PARAMS; + json[jss::error_message] = message; + return Unexpected(json); +} + +Expected +hasRequired( + Json::Value const& params, + std::initializer_list fields, + std::optional err = std::nullopt) +{ + for (auto const field : fields) + { + if (!params.isMember(field) || params[field].isNull()) + { + return missingFieldError(field, err); + } + } + return true; +} + +template +std::optional +parse(Json::Value const& param); + +template +Expected +required( + Json::Value const& params, + Json::StaticString const fieldName, + std::string const& err, + std::string const& expectedType) +{ + if (!params.isMember(fieldName) || params[fieldName].isNull()) + { + return missingFieldError(fieldName); + } + if (auto obj = parse(params[fieldName])) + { + return *obj; + } + return invalidFieldError(err, fieldName, expectedType); +} + +template <> +std::optional +parse(Json::Value const& param) +{ + if (!param.isString()) + return std::nullopt; + + auto const account = parseBase58(param.asString()); + if (!account || account->isZero()) + { + return std::nullopt; + } + + return account; +} + +Expected +requiredAccountID( + Json::Value const& params, + Json::StaticString const fieldName, + std::string const& err) +{ + return required(params, fieldName, err, "AccountID"); +} + +std::optional +parseHexBlob(Json::Value const& param, std::size_t maxLength) +{ + if (!param.isString()) + return std::nullopt; + + auto const blob = strUnHex(param.asString()); + if (!blob || blob->empty() || blob->size() > maxLength) + return std::nullopt; + + return blob; +} + +Expected +requiredHexBlob( + Json::Value const& params, + Json::StaticString const fieldName, + std::size_t maxLength, + std::string const& err) +{ + if (!params.isMember(fieldName) || params[fieldName].isNull()) + { + return missingFieldError(fieldName); + } + if (auto blob = parseHexBlob(params[fieldName], maxLength)) + { + return *blob; + } + return invalidFieldError(err, fieldName, "hex string"); +} + +template <> +std::optional +parse(Json::Value const& param) +{ + if (param.isUInt() || (param.isInt() && param.asInt() >= 0)) + return param.asUInt(); + + if (param.isString()) + { + std::uint32_t v; + if (beast::lexicalCastChecked(v, param.asString())) + return v; + } + + return std::nullopt; +} + +Expected +requiredUInt32( + Json::Value const& params, + Json::StaticString const fieldName, + std::string const& err) +{ + return required(params, fieldName, err, "number"); +} + +template <> +std::optional +parse(Json::Value const& param) +{ + uint256 uNodeIndex; + if (!param.isString() || !uNodeIndex.parseHex(param.asString())) + { + return std::nullopt; + } + + return uNodeIndex; +} + +Expected +requiredUInt256( + Json::Value const& params, + Json::StaticString const fieldName, + std::string const& err) +{ + return required(params, fieldName, err, "Hash256"); +} + +template <> +std::optional +parse(Json::Value const& param) +{ + uint192 field; + if (!param.isString() || !field.parseHex(param.asString())) + { + return std::nullopt; + } + + return field; +} + +Expected +requiredUInt192( + Json::Value const& params, + Json::StaticString const fieldName, + std::string const& err) +{ + return required(params, fieldName, err, "Hash192"); +} + +Expected +parseBridgeFields(Json::Value const& params) +{ + if (auto const value = hasRequired( + params, + {jss::LockingChainDoor, + jss::LockingChainIssue, + jss::IssuingChainDoor, + jss::IssuingChainIssue}); + !value) + { + return Unexpected(value.error()); + } + + auto const lockingChainDoor = requiredAccountID( + params, jss::LockingChainDoor, "malformedLockingChainDoor"); + if (!lockingChainDoor) + { + return Unexpected(lockingChainDoor.error()); + } + + auto const issuingChainDoor = requiredAccountID( + params, jss::IssuingChainDoor, "malformedIssuingChainDoor"); + if (!issuingChainDoor) + { + return Unexpected(issuingChainDoor.error()); + } + + Issue lockingChainIssue; + try + { + lockingChainIssue = issueFromJson(params[jss::LockingChainIssue]); + } + catch (std::runtime_error const& ex) + { + return invalidFieldError( + "malformedIssue", jss::LockingChainIssue, "Issue"); + } + + Issue issuingChainIssue; + try + { + issuingChainIssue = issueFromJson(params[jss::IssuingChainIssue]); + } + catch (std::runtime_error const& ex) + { + return invalidFieldError( + "malformedIssue", jss::IssuingChainIssue, "Issue"); + } + + return STXChainBridge( + *lockingChainDoor, + lockingChainIssue, + *issuingChainDoor, + issuingChainIssue); +} + +} // namespace LedgerEntryHelpers + +} // namespace ripple From 2e6f00aef250a3c6fd88b680794c2d78e93fe361 Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Wed, 3 Sep 2025 09:25:52 +0100 Subject: [PATCH 150/244] Add required disable_ccache option (#5756) --- .github/workflows/build-test.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml index 05b65edbfd..90e1d9853c 100644 --- a/.github/workflows/build-test.yml +++ b/.github/workflows/build-test.yml @@ -117,6 +117,8 @@ jobs: uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - name: Prepare runner uses: XRPLF/actions/.github/actions/prepare-runner@638e0dc11ea230f91bd26622fb542116bb5254d5 + with: + disable_ccache: false - name: Check configuration (Windows) if: ${{ inputs.os == 'windows' }} From 724e9b1313bda87199efdb05fe452d5b18def35e Mon Sep 17 00:00:00 2001 From: Ayaz Salikhov Date: Wed, 3 Sep 2025 11:24:07 +0100 Subject: [PATCH 151/244] chore: Use conan lockfile (#5751) * chore: Use conan lockfile * Add windows-specific dependencies as well * Add more info about lockfiles * Update lockfile to latest version * Update BUILD.md with conan install note --- .pre-commit-config.yaml | 3 ++- BUILD.md | 22 +++++++++++++++- conan.lock | 56 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 79 insertions(+), 2 deletions(-) create mode 100644 conan.lock diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3bd60b76d0..223c324a8c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -60,5 +60,6 @@ repos: exclude: | (?x)^( external/.*| - .github/scripts/levelization/results/.*\.txt + .github/scripts/levelization/results/.*\.txt| + conan\.lock )$ diff --git a/BUILD.md b/BUILD.md index c8ec31f826..6b1594bb5e 100644 --- a/BUILD.md +++ b/BUILD.md @@ -158,6 +158,10 @@ updated dependencies with the newer version. However, if we switch to a newer version that no longer requires a patch, no action is required on your part, as the new recipe will be automatically pulled from the official Conan Center. +> [!NOTE] +> You might need to add `--lockfile=""` to your `conan install` command +> to avoid automatic use of the existing `conan.lock` file when you run `conan export` manually on your machine + ### Conan profile tweaks #### Missing compiler version @@ -466,6 +470,21 @@ tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS'] The location of `rippled` binary in your build directory depends on your CMake generator. Pass `--help` to see the rest of the command line options. +#### Conan lockfile + +To achieve reproducible dependencies, we use [Conan lockfile](https://docs.conan.io/2/tutorial/versioning/lockfiles.html). + +The `conan.lock` file in the repository contains a "snapshot" of the current dependencies. +It is implicitly used when running `conan` commands, you don't need to specify it. + +You have to update this file every time you add a new dependency or change a revision or version of an existing dependency. + +To do that, run the following command in the repository root: + +```bash +conan lock create . -o '&:jemalloc=True' -o '&:rocksdb=True' +``` + ## Coverage report The coverage report is intended for developers using compilers GCC @@ -564,7 +583,8 @@ After any updates or changes to dependencies, you may need to do the following: ``` 3. Re-run [conan export](#patched-recipes) if needed. -4. Re-run [conan install](#build-and-test). +4. [Regenerate lockfile](#conan-lockfile). +5. Re-run [conan install](#build-and-test). ### `protobuf/port_def.inc` file not found diff --git a/conan.lock b/conan.lock new file mode 100644 index 0000000000..0f11f086b4 --- /dev/null +++ b/conan.lock @@ -0,0 +1,56 @@ +{ + "version": "0.5", + "requires": [ + "zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497", + "xxhash/0.8.3#681d36a0a6111fc56e5e45ea182c19cc%1756234289.683", + "sqlite3/3.49.1#8631739a4c9b93bd3d6b753bac548a63%1756234266.869", + "soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1756234262.318", + "snappy/1.1.10#968fef506ff261592ec30c574d4a7809%1756234314.246", + "rocksdb/10.0.1#85537f46e538974d67da0c3977de48ac%1756234304.347", + "re2/20230301#dfd6e2bf050eb90ddd8729cfb4c844a4%1756234257.976", + "protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1756234251.614", + "openssl/3.5.2#0c5a5e15ae569f45dff57adcf1770cf7%1756234259.61", + "nudb/2.0.9#c62cfd501e57055a7e0d8ee3d5e5427d%1756234237.107", + "lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1756234228.999", + "libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1756223727.64", + "libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1756230911.03", + "libarchive/3.8.1#5cf685686322e906cb42706ab7e099a8%1756234256.696", + "jemalloc/5.3.0#e951da9cf599e956cebc117880d2d9f8%1729241615.244", + "grpc/1.50.1#02291451d1e17200293a409410d1c4e1%1756234248.958", + "doctest/2.4.11#a4211dfc329a16ba9f280f9574025659%1756234220.819", + "date/3.0.4#f74bbba5a08fa388256688743136cb6f%1756234217.493", + "c-ares/1.34.5#b78b91e7cfb1f11ce777a285bbf169c6%1756234217.915", + "bzip2/1.0.8#00b4a4658791c1f06914e087f0e792f5%1756234261.716", + "boost/1.88.0#8852c0b72ce8271fb8ff7c53456d4983%1756223752.326", + "abseil/20230802.1#f0f91485b111dc9837a68972cb19ca7b%1756234220.907" + ], + "build_requires": [ + "zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497", + "strawberryperl/5.32.1.1#707032463aa0620fa17ec0d887f5fe41%1756234281.733", + "protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1756234251.614", + "nasm/2.16.01#31e26f2ee3c4346ecd347911bd126904%1756234232.901", + "msys2/cci.latest#5b73b10144f73cc5bfe0572ed9be39e1%1751977009.857", + "m4/1.4.19#b38ced39a01e31fef5435bc634461fd2%1700758725.451", + "cmake/3.31.8#dde3bde00bb843687e55aea5afa0e220%1756234232.89", + "b2/5.3.3#107c15377719889654eb9a162a673975%1756234226.28", + "automake/1.16.5#b91b7c384c3deaa9d535be02da14d04f%1755524470.56", + "autoconf/2.71#51077f068e61700d65bb05541ea1e4b0%1731054366.86" + ], + "python_requires": [], + "overrides": { + "protobuf/3.21.12": [ + null, + "protobuf/3.21.12" + ], + "lz4/1.9.4": [ + "lz4/1.10.0" + ], + "boost/1.83.0": [ + "boost/1.88.0" + ], + "sqlite3/3.44.2": [ + "sqlite3/3.49.1" + ] + }, + "config_requires": [] +} \ No newline at end of file From 32043463a8b59c12387268bd1a2b4165d70a96de Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Thu, 24 Jul 2025 19:08:46 -0400 Subject: [PATCH 152/244] Fix: Don't flag consensus as stalled prematurely (#5658) Fix stalled consensus detection to prevent false positives in situations where there are no disputed transactions. Stalled consensus detection was added to 2.5.0 in response to a network consensus halt that caused a round to run for over an hour. However, it has a flaw that makes it very easy to have false positives. Those false positives are usually mitigated by other checks that prevent them from having an effect, but there have been several instances of validators "running ahead" because there are circumstances where the other checks are "successful", allowing the stall state to be checked. --- src/test/consensus/Consensus_test.cpp | 183 ++++++++++++++++----- src/xrpld/app/consensus/RCLValidations.cpp | 2 +- src/xrpld/consensus/Consensus.cpp | 10 +- src/xrpld/consensus/Consensus.h | 24 ++- src/xrpld/consensus/DisputedTx.h | 27 ++- 5 files changed, 192 insertions(+), 54 deletions(-) diff --git a/src/test/consensus/Consensus_test.cpp b/src/test/consensus/Consensus_test.cpp index db56ab58c6..7899336a6f 100644 --- a/src/test/consensus/Consensus_test.cpp +++ b/src/test/consensus/Consensus_test.cpp @@ -1136,6 +1136,10 @@ public: ConsensusParms p; std::size_t peersUnchanged = 0; + auto logs = std::make_unique(beast::severities::kError); + auto j = logs->journal("Test"); + auto clog = std::make_unique(); + // Three cases: // 1 proposing, initial vote yes // 2 proposing, initial vote no @@ -1172,10 +1176,15 @@ public: BEAST_EXPECT(proposingFalse.getOurVote() == false); BEAST_EXPECT(followingTrue.getOurVote() == true); BEAST_EXPECT(followingFalse.getOurVote() == false); - BEAST_EXPECT(!proposingTrue.stalled(p, true, peersUnchanged)); - BEAST_EXPECT(!proposingFalse.stalled(p, true, peersUnchanged)); - BEAST_EXPECT(!followingTrue.stalled(p, false, peersUnchanged)); - BEAST_EXPECT(!followingFalse.stalled(p, false, peersUnchanged)); + BEAST_EXPECT( + !proposingTrue.stalled(p, true, peersUnchanged, j, clog)); + BEAST_EXPECT( + !proposingFalse.stalled(p, true, peersUnchanged, j, clog)); + BEAST_EXPECT( + !followingTrue.stalled(p, false, peersUnchanged, j, clog)); + BEAST_EXPECT( + !followingFalse.stalled(p, false, peersUnchanged, j, clog)); + BEAST_EXPECT(clog->str() == ""); // I'm in the majority, my vote should not change BEAST_EXPECT(!proposingTrue.updateVote(5, true, p)); @@ -1189,10 +1198,15 @@ public: BEAST_EXPECT(!followingFalse.updateVote(10, false, p)); peersUnchanged = 2; - BEAST_EXPECT(!proposingTrue.stalled(p, true, peersUnchanged)); - BEAST_EXPECT(!proposingFalse.stalled(p, true, peersUnchanged)); - BEAST_EXPECT(!followingTrue.stalled(p, false, peersUnchanged)); - BEAST_EXPECT(!followingFalse.stalled(p, false, peersUnchanged)); + BEAST_EXPECT( + !proposingTrue.stalled(p, true, peersUnchanged, j, clog)); + BEAST_EXPECT( + !proposingFalse.stalled(p, true, peersUnchanged, j, clog)); + BEAST_EXPECT( + !followingTrue.stalled(p, false, peersUnchanged, j, clog)); + BEAST_EXPECT( + !followingFalse.stalled(p, false, peersUnchanged, j, clog)); + BEAST_EXPECT(clog->str() == ""); // Right now, the vote is 51%. The requirement is about to jump to // 65% @@ -1282,10 +1296,15 @@ public: BEAST_EXPECT(followingFalse.getOurVote() == false); peersUnchanged = 3; - BEAST_EXPECT(!proposingTrue.stalled(p, true, peersUnchanged)); - BEAST_EXPECT(!proposingFalse.stalled(p, true, peersUnchanged)); - BEAST_EXPECT(!followingTrue.stalled(p, false, peersUnchanged)); - BEAST_EXPECT(!followingFalse.stalled(p, false, peersUnchanged)); + BEAST_EXPECT( + !proposingTrue.stalled(p, true, peersUnchanged, j, clog)); + BEAST_EXPECT( + !proposingFalse.stalled(p, true, peersUnchanged, j, clog)); + BEAST_EXPECT( + !followingTrue.stalled(p, false, peersUnchanged, j, clog)); + BEAST_EXPECT( + !followingFalse.stalled(p, false, peersUnchanged, j, clog)); + BEAST_EXPECT(clog->str() == ""); // Threshold jumps to 95% BEAST_EXPECT(proposingTrue.updateVote(220, true, p)); @@ -1322,12 +1341,60 @@ public: for (peersUnchanged = 0; peersUnchanged < 6; ++peersUnchanged) { - BEAST_EXPECT(!proposingTrue.stalled(p, true, peersUnchanged)); - BEAST_EXPECT(!proposingFalse.stalled(p, true, peersUnchanged)); - BEAST_EXPECT(!followingTrue.stalled(p, false, peersUnchanged)); - BEAST_EXPECT(!followingFalse.stalled(p, false, peersUnchanged)); + BEAST_EXPECT( + !proposingTrue.stalled(p, true, peersUnchanged, j, clog)); + BEAST_EXPECT( + !proposingFalse.stalled(p, true, peersUnchanged, j, clog)); + BEAST_EXPECT( + !followingTrue.stalled(p, false, peersUnchanged, j, clog)); + BEAST_EXPECT( + !followingFalse.stalled(p, false, peersUnchanged, j, clog)); + BEAST_EXPECT(clog->str() == ""); } + auto expectStalled = [this, &clog]( + int txid, + bool ourVote, + int ourTime, + int peerTime, + int support, + std::uint32_t line) { + using namespace std::string_literals; + + auto const s = clog->str(); + expect(s.find("stalled"), s, __FILE__, line); + expect( + s.starts_with("Transaction "s + std::to_string(txid)), + s, + __FILE__, + line); + expect( + s.find("voting "s + (ourVote ? "YES" : "NO")) != s.npos, + s, + __FILE__, + line); + expect( + s.find("for "s + std::to_string(ourTime) + " rounds."s) != + s.npos, + s, + __FILE__, + line); + expect( + s.find( + "votes in "s + std::to_string(peerTime) + " rounds.") != + s.npos, + s, + __FILE__, + line); + expect( + s.ends_with( + "has "s + std::to_string(support) + "% support. "s), + s, + __FILE__, + line); + clog = std::make_unique(); + }; + for (int i = 0; i < 1; ++i) { BEAST_EXPECT(!proposingTrue.updateVote(250 + 10 * i, true, p)); @@ -1342,22 +1409,34 @@ public: BEAST_EXPECT(followingFalse.getOurVote() == false); // true vote has changed recently, so not stalled - BEAST_EXPECT(!proposingTrue.stalled(p, true, 0)); + BEAST_EXPECT(!proposingTrue.stalled(p, true, 0, j, clog)); + BEAST_EXPECT(clog->str() == ""); // remaining votes have been unchanged in so long that we only // need to hit the second round at 95% to be stalled, regardless // of peers - BEAST_EXPECT(proposingFalse.stalled(p, true, 0)); - BEAST_EXPECT(followingTrue.stalled(p, false, 0)); - BEAST_EXPECT(followingFalse.stalled(p, false, 0)); + BEAST_EXPECT(proposingFalse.stalled(p, true, 0, j, clog)); + expectStalled(98, false, 11, 0, 2, __LINE__); + BEAST_EXPECT(followingTrue.stalled(p, false, 0, j, clog)); + expectStalled(97, true, 11, 0, 97, __LINE__); + BEAST_EXPECT(followingFalse.stalled(p, false, 0, j, clog)); + expectStalled(96, false, 11, 0, 3, __LINE__); // true vote has changed recently, so not stalled - BEAST_EXPECT(!proposingTrue.stalled(p, true, peersUnchanged)); + BEAST_EXPECT( + !proposingTrue.stalled(p, true, peersUnchanged, j, clog)); + BEAST_EXPECTS(clog->str() == "", clog->str()); // remaining votes have been unchanged in so long that we only // need to hit the second round at 95% to be stalled, regardless // of peers - BEAST_EXPECT(proposingFalse.stalled(p, true, peersUnchanged)); - BEAST_EXPECT(followingTrue.stalled(p, false, peersUnchanged)); - BEAST_EXPECT(followingFalse.stalled(p, false, peersUnchanged)); + BEAST_EXPECT( + proposingFalse.stalled(p, true, peersUnchanged, j, clog)); + expectStalled(98, false, 11, 6, 2, __LINE__); + BEAST_EXPECT( + followingTrue.stalled(p, false, peersUnchanged, j, clog)); + expectStalled(97, true, 11, 6, 97, __LINE__); + BEAST_EXPECT( + followingFalse.stalled(p, false, peersUnchanged, j, clog)); + expectStalled(96, false, 11, 6, 3, __LINE__); } for (int i = 1; i < 3; ++i) { @@ -1374,19 +1453,31 @@ public: // true vote changed 2 rounds ago, and peers are changing, so // not stalled - BEAST_EXPECT(!proposingTrue.stalled(p, true, 0)); + BEAST_EXPECT(!proposingTrue.stalled(p, true, 0, j, clog)); + BEAST_EXPECTS(clog->str() == "", clog->str()); // still stalled - BEAST_EXPECT(proposingFalse.stalled(p, true, 0)); - BEAST_EXPECT(followingTrue.stalled(p, false, 0)); - BEAST_EXPECT(followingFalse.stalled(p, false, 0)); + BEAST_EXPECT(proposingFalse.stalled(p, true, 0, j, clog)); + expectStalled(98, false, 11 + i, 0, 2, __LINE__); + BEAST_EXPECT(followingTrue.stalled(p, false, 0, j, clog)); + expectStalled(97, true, 11 + i, 0, 97, __LINE__); + BEAST_EXPECT(followingFalse.stalled(p, false, 0, j, clog)); + expectStalled(96, false, 11 + i, 0, 3, __LINE__); // true vote changed 2 rounds ago, and peers are NOT changing, // so stalled - BEAST_EXPECT(proposingTrue.stalled(p, true, peersUnchanged)); + BEAST_EXPECT( + proposingTrue.stalled(p, true, peersUnchanged, j, clog)); + expectStalled(99, true, 1 + i, 6, 97, __LINE__); // still stalled - BEAST_EXPECT(proposingFalse.stalled(p, true, peersUnchanged)); - BEAST_EXPECT(followingTrue.stalled(p, false, peersUnchanged)); - BEAST_EXPECT(followingFalse.stalled(p, false, peersUnchanged)); + BEAST_EXPECT( + proposingFalse.stalled(p, true, peersUnchanged, j, clog)); + expectStalled(98, false, 11 + i, 6, 2, __LINE__); + BEAST_EXPECT( + followingTrue.stalled(p, false, peersUnchanged, j, clog)); + expectStalled(97, true, 11 + i, 6, 97, __LINE__); + BEAST_EXPECT( + followingFalse.stalled(p, false, peersUnchanged, j, clog)); + expectStalled(96, false, 11 + i, 6, 3, __LINE__); } for (int i = 3; i < 5; ++i) { @@ -1401,15 +1492,27 @@ public: BEAST_EXPECT(followingTrue.getOurVote() == true); BEAST_EXPECT(followingFalse.getOurVote() == false); - BEAST_EXPECT(proposingTrue.stalled(p, true, 0)); - BEAST_EXPECT(proposingFalse.stalled(p, true, 0)); - BEAST_EXPECT(followingTrue.stalled(p, false, 0)); - BEAST_EXPECT(followingFalse.stalled(p, false, 0)); + BEAST_EXPECT(proposingTrue.stalled(p, true, 0, j, clog)); + expectStalled(99, true, 1 + i, 0, 97, __LINE__); + BEAST_EXPECT(proposingFalse.stalled(p, true, 0, j, clog)); + expectStalled(98, false, 11 + i, 0, 2, __LINE__); + BEAST_EXPECT(followingTrue.stalled(p, false, 0, j, clog)); + expectStalled(97, true, 11 + i, 0, 97, __LINE__); + BEAST_EXPECT(followingFalse.stalled(p, false, 0, j, clog)); + expectStalled(96, false, 11 + i, 0, 3, __LINE__); - BEAST_EXPECT(proposingTrue.stalled(p, true, peersUnchanged)); - BEAST_EXPECT(proposingFalse.stalled(p, true, peersUnchanged)); - BEAST_EXPECT(followingTrue.stalled(p, false, peersUnchanged)); - BEAST_EXPECT(followingFalse.stalled(p, false, peersUnchanged)); + BEAST_EXPECT( + proposingTrue.stalled(p, true, peersUnchanged, j, clog)); + expectStalled(99, true, 1 + i, 6, 97, __LINE__); + BEAST_EXPECT( + proposingFalse.stalled(p, true, peersUnchanged, j, clog)); + expectStalled(98, false, 11 + i, 6, 2, __LINE__); + BEAST_EXPECT( + followingTrue.stalled(p, false, peersUnchanged, j, clog)); + expectStalled(97, true, 11 + i, 6, 97, __LINE__); + BEAST_EXPECT( + followingFalse.stalled(p, false, peersUnchanged, j, clog)); + expectStalled(96, false, 11 + i, 6, 3, __LINE__); } } } diff --git a/src/xrpld/app/consensus/RCLValidations.cpp b/src/xrpld/app/consensus/RCLValidations.cpp index a04047c78a..5305c95357 100644 --- a/src/xrpld/app/consensus/RCLValidations.cpp +++ b/src/xrpld/app/consensus/RCLValidations.cpp @@ -136,7 +136,7 @@ RCLValidationsAdaptor::acquire(LedgerHash const& hash) if (!ledger) { - JLOG(j_.debug()) + JLOG(j_.warn()) << "Need validated ledger for preferred ledger analysis " << hash; Application* pApp = &app_; diff --git a/src/xrpld/consensus/Consensus.cpp b/src/xrpld/consensus/Consensus.cpp index fb57687df0..d4edb1445c 100644 --- a/src/xrpld/consensus/Consensus.cpp +++ b/src/xrpld/consensus/Consensus.cpp @@ -139,11 +139,11 @@ checkConsensusReached( return false; } - // We only get stalled when every disputed transaction unequivocally has 80% - // (minConsensusPct) agreement, either for or against. That is: either under - // 20% or over 80% consensus (repectively "nay" or "yay"). This prevents - // manipulation by a minority of byzantine peers of which transactions make - // the cut to get into the ledger. + // We only get stalled when there are disputed transactions and all of them + // unequivocally have 80% (minConsensusPct) agreement, either for or + // against. That is: either under 20% or over 80% consensus (repectively + // "nay" or "yay"). This prevents manipulation by a minority of byzantine + // peers of which transactions make the cut to get into the ledger. if (stalled) { CLOG(clog) << "consensus stalled. "; diff --git a/src/xrpld/consensus/Consensus.h b/src/xrpld/consensus/Consensus.h index f3265cf381..df6cedccff 100644 --- a/src/xrpld/consensus/Consensus.h +++ b/src/xrpld/consensus/Consensus.h @@ -84,8 +84,8 @@ shouldCloseLedger( agree @param stalled the network appears to be stalled, where neither we nor our peers have changed their vote on any disputes in a - while. This is undesirable, and will cause us to end consensus - without 80% agreement. + while. This is undesirable, and should be rare, and will cause us to + end consensus without 80% agreement. @param parms Consensus constant parameters @param proposing whether we should count ourselves @param j journal for logging @@ -1712,15 +1712,29 @@ Consensus::haveConsensus( << ", disagree=" << disagree; ConsensusParms const& parms = adaptor_.parms(); - // Stalling is BAD + // Stalling is BAD. It means that we have a consensus on the close time, so + // peers are talking, but we have disputed transactions that peers are + // unable or unwilling to come to agreement on one way or the other. bool const stalled = haveCloseTimeConsensus_ && + !result_->disputes.empty() && std::ranges::all_of(result_->disputes, - [this, &parms](auto const& dispute) { + [this, &parms, &clog](auto const& dispute) { return dispute.second.stalled( parms, mode_.get() == ConsensusMode::proposing, - peerUnchangedCounter_); + peerUnchangedCounter_, + j_, + clog); }); + if (stalled) + { + std::stringstream ss; + ss << "Consensus detects as stalled with " << (agree + disagree) << "/" + << prevProposers_ << " proposers, and " << result_->disputes.size() + << " stalled disputed transactions."; + JLOG(j_.error()) << ss.str(); + CLOG(clog) << ss.str(); + } // Determine if we actually have consensus or not result_->state = checkConsensus( diff --git a/src/xrpld/consensus/DisputedTx.h b/src/xrpld/consensus/DisputedTx.h index 4ed31b77ca..e774c8366c 100644 --- a/src/xrpld/consensus/DisputedTx.h +++ b/src/xrpld/consensus/DisputedTx.h @@ -85,7 +85,12 @@ public: //! Are we and our peers "stalled" where we probably won't change //! our vote? bool - stalled(ConsensusParms const& p, bool proposing, int peersUnchanged) const + stalled( + ConsensusParms const& p, + bool proposing, + int peersUnchanged, + beast::Journal j, + std::unique_ptr const& clog) const { // at() can throw, but the map is built by hand to ensure all valid // values are available. @@ -123,8 +128,24 @@ public: int const weight = support / total; // Returns true if the tx has more than minCONSENSUS_PCT (80) percent // agreement. Either voting for _or_ voting against the tx. - return weight > p.minCONSENSUS_PCT || - weight < (100 - p.minCONSENSUS_PCT); + bool const stalled = + weight > p.minCONSENSUS_PCT || weight < (100 - p.minCONSENSUS_PCT); + + if (stalled) + { + // stalling is an error condition for even a single + // transaction. + std::stringstream s; + s << "Transaction " << ID() << " is stalled. We have been voting " + << (getOurVote() ? "YES" : "NO") << " for " << currentVoteCounter_ + << " rounds. Peers have not changed their votes in " + << peersUnchanged << " rounds. The transaction has " << weight + << "% support. "; + JLOG(j_.error()) << s.str(); + CLOG(clog) << s.str(); + } + + return stalled; } //! The disputed transaction. From 16c2ff97ccc5e78fe450bd92c431322de75aa94e Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Wed, 3 Sep 2025 10:19:38 -0400 Subject: [PATCH 153/244] Set version to 2.5.1 --- src/libxrpl/protocol/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libxrpl/protocol/BuildInfo.cpp b/src/libxrpl/protocol/BuildInfo.cpp index 4cb6fbfd36..c996e1e2e9 100644 --- a/src/libxrpl/protocol/BuildInfo.cpp +++ b/src/libxrpl/protocol/BuildInfo.cpp @@ -36,7 +36,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "2.5.0" +char const* const versionString = "2.5.1" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From c38f2a3f2e44c00885c703fba035fb606c7a61c5 Mon Sep 17 00:00:00 2001 From: Jingchen Date: Wed, 3 Sep 2025 17:08:02 +0100 Subject: [PATCH 154/244] Fix coverage parameter (#5760) --- cmake/CodeCoverage.cmake | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmake/CodeCoverage.cmake b/cmake/CodeCoverage.cmake index 09ec3b9569..ec601de453 100644 --- a/cmake/CodeCoverage.cmake +++ b/cmake/CodeCoverage.cmake @@ -218,12 +218,12 @@ if(CMAKE_CXX_COMPILER_ID MATCHES "(GNU|Clang)") set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path") endif() - check_cxx_compiler_flag(-fprofile-update HAVE_cxx_fprofile_update) + check_cxx_compiler_flag(-fprofile-update=atomic HAVE_cxx_fprofile_update) if(HAVE_cxx_fprofile_update) set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-update=atomic") endif() - check_c_compiler_flag(-fprofile-update HAVE_c_fprofile_update) + check_c_compiler_flag(-fprofile-update=atomic HAVE_c_fprofile_update) if(HAVE_c_fprofile_update) set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-update=atomic") endif() From cf5f65b68eadf86be7b0f571dc0437830ddb4234 Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Thu, 4 Sep 2025 09:54:24 +0100 Subject: [PATCH 155/244] Add `Scale` to SingleAssetVault (#5652) * Add and Scale to VaultCreate * Add round-trip calculation to VaultDeposit VaultWithdraw and VaultClawback * Implement Number::truncate() for VaultClawback * Add rounding to DepositWithdraw * Disallow zero shares withdraw or deposit with tecPRECISION_LOSS * Return tecPATH_DRY on overflow when converting shares/assets * Remove empty shares MPToken in clawback or withdraw (except for vault owner) * Implicitly create shares MPToken for vault owner in VaultCreate * Review feedback: defensive checks in shares/assets calculations --------- Co-authored-by: Ed Hennis --- include/xrpl/basics/Number.h | 18 + include/xrpl/protocol/Protocol.h | 7 + .../xrpl/protocol/detail/ledger_entries.macro | 1 + .../xrpl/protocol/detail/transactions.macro | 1 + src/test/app/Vault_test.cpp | 1427 +++++++++++++++-- src/test/basics/Number_test.cpp | 25 + src/test/jtx/Account.h | 4 + src/test/jtx/impl/Account.cpp | 8 + src/xrpld/app/tx/detail/InvariantCheck.cpp | 6 + src/xrpld/app/tx/detail/VaultClawback.cpp | 173 +- src/xrpld/app/tx/detail/VaultCreate.cpp | 55 +- src/xrpld/app/tx/detail/VaultDelete.cpp | 22 +- src/xrpld/app/tx/detail/VaultDeposit.cpp | 85 +- src/xrpld/app/tx/detail/VaultSet.cpp | 6 +- src/xrpld/app/tx/detail/VaultWithdraw.cpp | 134 +- src/xrpld/ledger/View.h | 39 +- src/xrpld/ledger/detail/View.cpp | 79 +- 17 files changed, 1869 insertions(+), 221 deletions(-) diff --git a/include/xrpl/basics/Number.h b/include/xrpl/basics/Number.h index 9ee05bfb45..41c60d30a1 100644 --- a/include/xrpl/basics/Number.h +++ b/include/xrpl/basics/Number.h @@ -150,6 +150,24 @@ public: return (mantissa_ < 0) ? -1 : (mantissa_ ? 1 : 0); } + Number + truncate() const noexcept + { + if (exponent_ >= 0 || mantissa_ == 0) + return *this; + + Number ret = *this; + while (ret.exponent_ < 0 && ret.mantissa_ != 0) + { + ret.exponent_ += 1; + ret.mantissa_ /= rep(10); + } + // We are guaranteed that normalize() will never throw an exception + // because exponent is either negative or zero at this point. + ret.normalize(); + return ret; + } + friend constexpr bool operator>(Number const& x, Number const& y) noexcept { diff --git a/include/xrpl/protocol/Protocol.h b/include/xrpl/protocol/Protocol.h index 898fd06fbd..a0fcfee34c 100644 --- a/include/xrpl/protocol/Protocol.h +++ b/include/xrpl/protocol/Protocol.h @@ -122,6 +122,13 @@ std::size_t constexpr maxDataPayloadLength = 256; /** Vault withdrawal policies */ std::uint8_t constexpr vaultStrategyFirstComeFirstServe = 1; +/** Default IOU scale factor for a Vault */ +std::uint8_t constexpr vaultDefaultIOUScale = 6; +/** Maximum scale factor for a Vault. The number is chosen to ensure that +1 IOU can be always converted to shares. +10^19 > maxMPTokenAmount (2^64-1) > 10^18 */ +std::uint8_t constexpr vaultMaximumIOUScale = 18; + /** Maximum recursion depth for vault shares being put as an asset inside * another vault; counted from 0 */ std::uint8_t constexpr maxAssetCheckDepth = 5; diff --git a/include/xrpl/protocol/detail/ledger_entries.macro b/include/xrpl/protocol/detail/ledger_entries.macro index 967fb37b94..ac9ebc6069 100644 --- a/include/xrpl/protocol/detail/ledger_entries.macro +++ b/include/xrpl/protocol/detail/ledger_entries.macro @@ -499,6 +499,7 @@ LEDGER_ENTRY(ltVAULT, 0x0084, Vault, vault, ({ {sfLossUnrealized, soeREQUIRED}, {sfShareMPTID, soeREQUIRED}, {sfWithdrawalPolicy, soeREQUIRED}, + {sfScale, soeDEFAULT}, // no SharesTotal ever (use MPTIssuance.sfOutstandingAmount) // no PermissionedDomainID ever (use MPTIssuance.sfDomainID) })) diff --git a/include/xrpl/protocol/detail/transactions.macro b/include/xrpl/protocol/detail/transactions.macro index 89e9a16df5..1131e24f61 100644 --- a/include/xrpl/protocol/detail/transactions.macro +++ b/include/xrpl/protocol/detail/transactions.macro @@ -483,6 +483,7 @@ TRANSACTION(ttVAULT_CREATE, 65, VaultCreate, Delegation::delegatable, ({ {sfDomainID, soeOPTIONAL}, {sfWithdrawalPolicy, soeOPTIONAL}, {sfData, soeOPTIONAL}, + {sfScale, soeOPTIONAL}, })) /** This transaction updates a single asset vault. */ diff --git a/src/test/app/Vault_test.cpp b/src/test/app/Vault_test.cpp index 7add8b3eda..2216ff6421 100644 --- a/src/test/app/Vault_test.cpp +++ b/src/test/app/Vault_test.cpp @@ -19,11 +19,14 @@ #include #include +#include +#include #include #include #include +#include #include #include #include @@ -73,12 +76,32 @@ class Vault_test : public beast::unit_test::suite env(tx); env.close(); BEAST_EXPECT(env.le(keylet)); + std::uint64_t const scale = asset.raw().holds() ? 1 : 1e6; - auto const share = [&env, keylet = keylet, this]() -> PrettyAsset { + auto const [share, vaultAccount] = + [&env, + keylet = keylet, + asset, + this]() -> std::tuple { auto const vault = env.le(keylet); BEAST_EXPECT(vault != nullptr); - return MPTIssue(vault->at(sfShareMPTID)); + if (asset.raw().holds() && !asset.raw().native()) + BEAST_EXPECT(vault->at(sfScale) == 6); + else + BEAST_EXPECT(vault->at(sfScale) == 0); + auto const shares = + env.le(keylet::mptIssuance(vault->at(sfShareMPTID))); + BEAST_EXPECT(shares != nullptr); + if (asset.raw().holds() && !asset.raw().native()) + BEAST_EXPECT(shares->at(sfAssetScale) == 6); + else + BEAST_EXPECT(shares->at(sfAssetScale) == 0); + return { + MPTIssue(vault->at(sfShareMPTID)), + Account("vault", vault->at(sfAccount))}; }(); + auto const shares = share.raw().get(); + env.memoize(vaultAccount); // Several 3rd party accounts which cannot receive funds Account alice{"alice"}; @@ -96,6 +119,7 @@ class Vault_test : public beast::unit_test::suite .id = keylet.key, .amount = asset(10000)}); env(tx, ter(tecINSUFFICIENT_FUNDS)); + env.close(); } { @@ -105,6 +129,9 @@ class Vault_test : public beast::unit_test::suite .id = keylet.key, .amount = asset(50)}); env(tx); + env.close(); + BEAST_EXPECT( + env.balance(depositor, shares) == share(50 * scale)); } { @@ -114,12 +141,16 @@ class Vault_test : public beast::unit_test::suite .id = keylet.key, .amount = asset(50)}); env(tx); + env.close(); + BEAST_EXPECT( + env.balance(depositor, shares) == share(100 * scale)); } { testcase(prefix + " fail to delete non-empty vault"); auto tx = vault.del({.owner = owner, .id = keylet.key}); env(tx, ter(tecHAS_OBLIGATIONS)); + env.close(); } { @@ -127,6 +158,7 @@ class Vault_test : public beast::unit_test::suite auto tx = vault.set({.owner = issuer, .id = keylet.key}); tx[sfAssetsMaximum] = asset(50).number(); env(tx, ter(tecNO_PERMISSION)); + env.close(); } { @@ -135,6 +167,7 @@ class Vault_test : public beast::unit_test::suite auto tx = vault.set({.owner = owner, .id = keylet.key}); tx[sfAssetsMaximum] = asset(50).number(); env(tx, ter(tecLIMIT_EXCEEDED)); + env.close(); } { @@ -142,6 +175,7 @@ class Vault_test : public beast::unit_test::suite auto tx = vault.set({.owner = owner, .id = keylet.key}); tx[sfAssetsMaximum] = asset(150).number(); env(tx); + env.close(); } { @@ -149,6 +183,7 @@ class Vault_test : public beast::unit_test::suite auto tx = vault.set({.owner = owner, .id = keylet.key}); tx[sfData] = "0"; env(tx); + env.close(); } { @@ -156,6 +191,7 @@ class Vault_test : public beast::unit_test::suite auto tx = vault.set({.owner = owner, .id = keylet.key}); tx[sfDomainID] = to_string(base_uint<256>(42ul)); env(tx, ter{tecNO_PERMISSION}); + env.close(); } { @@ -165,6 +201,7 @@ class Vault_test : public beast::unit_test::suite .id = keylet.key, .amount = asset(100)}); env(tx, ter(tecLIMIT_EXCEEDED)); + env.close(); } { @@ -172,6 +209,7 @@ class Vault_test : public beast::unit_test::suite auto tx = vault.set({.owner = owner, .id = keylet.key}); tx[sfAssetsMaximum] = asset(0).number(); env(tx); + env.close(); } { @@ -190,6 +228,9 @@ class Vault_test : public beast::unit_test::suite .id = keylet.key, .amount = asset(100)}); env(tx); + env.close(); + BEAST_EXPECT( + env.balance(depositor, shares) == share(200 * scale)); } { @@ -202,6 +243,12 @@ class Vault_test : public beast::unit_test::suite .holder = depositor, .amount = asset(10)}); env(tx, code); + env.close(); + if (!asset.raw().native()) + { + BEAST_EXPECT( + env.balance(depositor, shares) == share(190 * scale)); + } } { @@ -211,6 +258,30 @@ class Vault_test : public beast::unit_test::suite auto tx = vault.clawback( {.issuer = issuer, .id = keylet.key, .holder = depositor}); env(tx, code); + env.close(); + if (!asset.raw().native()) + { + BEAST_EXPECT(env.balance(depositor, shares) == share(0)); + + { + auto tx = vault.clawback( + {.issuer = issuer, + .id = keylet.key, + .holder = depositor, + .amount = asset(10)}); + env(tx, ter{tecPRECISION_LOSS}); + env.close(); + } + + { + auto tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(10)}); + env(tx, ter{tecPRECISION_LOSS}); + env.close(); + } + } } if (!asset.raw().native()) @@ -221,6 +292,9 @@ class Vault_test : public beast::unit_test::suite .id = keylet.key, .amount = asset(200)}); env(tx); + env.close(); + BEAST_EXPECT( + env.balance(depositor, shares) == share(200 * scale)); } { @@ -232,6 +306,7 @@ class Vault_test : public beast::unit_test::suite .amount = asset(100)}); tx[sfDestination] = alice.human(); env(tx, ter{tecNO_PERMISSION}); + env.close(); } { @@ -242,6 +317,7 @@ class Vault_test : public beast::unit_test::suite .amount = asset(1000)}); tx[sfDestination] = "0"; env(tx, ter(temMALFORMED)); + env.close(); } { @@ -254,6 +330,7 @@ class Vault_test : public beast::unit_test::suite .amount = asset(1000)}); tx[sfDestinationTag] = "0"; env(tx, ter(temMALFORMED)); + env.close(); } if (!asset.raw().native()) @@ -267,6 +344,77 @@ class Vault_test : public beast::unit_test::suite tx[sfDestination] = erin.human(); env(tx, ter{asset.raw().holds() ? tecNO_LINE : tecNO_AUTH}); + env.close(); + } + + { + testcase( + prefix + + " fail to withdraw to 3rd party lsfRequireDestTag"); + auto tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + tx[sfDestination] = dave.human(); + env(tx, ter{tecDST_TAG_NEEDED}); + env.close(); + } + + { + testcase(prefix + " withdraw to authorized 3rd party"); + auto tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + tx[sfDestination] = charlie.human(); + env(tx); + env.close(); + BEAST_EXPECT( + env.balance(depositor, shares) == share(100 * scale)); + } + + { + testcase(prefix + " withdraw to issuer"); + auto tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(50)}); + tx[sfDestination] = issuer.human(); + env(tx); + env.close(); + BEAST_EXPECT( + env.balance(depositor, shares) == share(50 * scale)); + } + + { + testcase(prefix + " withdraw remaining assets"); + auto tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(50)}); + env(tx); + env.close(); + BEAST_EXPECT(env.balance(depositor, shares) == share(0)); + + if (!asset.raw().native()) + { + auto tx = vault.clawback( + {.issuer = issuer, + .id = keylet.key, + .holder = depositor, + .amount = asset(0)}); + env(tx, ter{tecPRECISION_LOSS}); + env.close(); + } + + { + auto tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = share(10)}); + env(tx, ter{tecINSUFFICIENT_FUNDS}); + env.close(); + } } if (!asset.raw().native() && asset.raw().holds()) @@ -280,10 +428,27 @@ class Vault_test : public beast::unit_test::suite auto tx = vault.deposit( {.depositor = erin, .id = keylet.key, .amount = asset(10)}); env(tx); - env(pay(erin, depositor, share(10))); + env.close(); + { + auto tx = pay(erin, depositor, share(10 * scale)); + + // depositor no longer has MPToken for shares + env(tx, ter{tecNO_AUTH}); + env.close(); + + // depositor will gain MPToken for shares again + env(vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(1)})); + env.close(); + + env(tx); + env.close(); + } testcase(prefix + " withdraw to authorized 3rd party"); - // Depositor withdraws shares, destined to Erin + // Depositor withdraws assets, destined to Erin tx = vault.withdraw( {.depositor = depositor, .id = keylet.key, @@ -292,52 +457,23 @@ class Vault_test : public beast::unit_test::suite env(tx); // Erin returns assets to issuer env(pay(erin, issuer, asset(10))); + env.close(); testcase(prefix + " fail to pay to unauthorized 3rd party"); env(trust(erin, asset(0))); + env.close(); + // Erin has MPToken but is no longer authorized to hold assets env(pay(depositor, erin, share(1)), ter{tecNO_LINE}); - } + env.close(); - { - testcase( - prefix + - " fail to withdraw to 3rd party lsfRequireDestTag"); - auto tx = vault.withdraw( + // Depositor withdraws remaining single asset + tx = vault.withdraw( {.depositor = depositor, .id = keylet.key, - .amount = asset(100)}); - tx[sfDestination] = dave.human(); - env(tx, ter{tecDST_TAG_NEEDED}); - } - - { - testcase(prefix + " withdraw to authorized 3rd party"); - auto tx = vault.withdraw( - {.depositor = depositor, - .id = keylet.key, - .amount = asset(100)}); - tx[sfDestination] = charlie.human(); - env(tx); - } - - { - testcase(prefix + " withdraw to issuer"); - auto tx = vault.withdraw( - {.depositor = depositor, - .id = keylet.key, - .amount = asset(50)}); - tx[sfDestination] = issuer.human(); - env(tx); - } - - { - testcase(prefix + " withdraw remaining assets"); - auto tx = vault.withdraw( - {.depositor = depositor, - .id = keylet.key, - .amount = asset(50)}); + .amount = asset(1)}); env(tx); + env.close(); } { @@ -740,6 +876,61 @@ class Vault_test : public beast::unit_test::suite } }); + testCase([&](Env& env, + Account const&, + Account const& owner, + Asset const& asset, + Vault& vault) { + testcase("create with Scale"); + + { + auto [tx, keylet] = + vault.create({.owner = owner, .asset = asset}); + tx[sfScale] = 255; + env(tx, ter(temMALFORMED)); + } + + { + auto [tx, keylet] = + vault.create({.owner = owner, .asset = asset}); + tx[sfScale] = 19; + env(tx, ter(temMALFORMED)); + } + + // accepted range from 0 to 18 + { + auto [tx, keylet] = + vault.create({.owner = owner, .asset = asset}); + tx[sfScale] = 18; + env(tx); + env.close(); + auto const sleVault = env.le(keylet); + BEAST_EXPECT(sleVault); + BEAST_EXPECT((*sleVault)[sfScale] == 18); + } + + { + auto [tx, keylet] = + vault.create({.owner = owner, .asset = asset}); + tx[sfScale] = 0; + env(tx); + env.close(); + auto const sleVault = env.le(keylet); + BEAST_EXPECT(sleVault); + BEAST_EXPECT((*sleVault)[sfScale] == 0); + } + + { + auto [tx, keylet] = + vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + auto const sleVault = env.le(keylet); + BEAST_EXPECT(sleVault); + BEAST_EXPECT((*sleVault)[sfScale] == 6); + } + }); + testCase([&](Env& env, Account const&, Account const& owner, @@ -1105,6 +1296,32 @@ class Vault_test : public beast::unit_test::suite testcase("non-existing domain"); env(tx, ter{tecOBJECT_NOT_FOUND}); }); + + testCase([this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + Asset const& asset, + Vault& vault) { + testcase("cannot set Scale=0"); + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + tx[sfScale] = 0; + env(tx, ter{temMALFORMED}); + }); + + testCase([this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + Asset const& asset, + Vault& vault) { + testcase("cannot set Scale=1"); + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + tx[sfScale] = 1; + env(tx, ter{temMALFORMED}); + }); } void @@ -1221,21 +1438,65 @@ class Vault_test : public beast::unit_test::suite { using namespace test::jtx; - Env env{*this, testable_amendments() | featureSingleAssetVault}; - Account issuer{"issuer"}; - Account owner{"owner"}; - Account depositor{"depositor"}; - env.fund(XRP(1000), issuer, owner, depositor); - env.close(); - Vault vault{env}; + auto testCase = [this](std::function test) { + Env env{*this, testable_amendments() | featureSingleAssetVault}; + Account issuer{"issuer"}; + Account owner{"owner"}; + Account depositor{"depositor"}; + env.fund(XRP(1000), issuer, owner, depositor); + env.close(); + Vault vault{env}; + MPTTester mptt{env, issuer, mptInitNoFund}; + // Locked because that is the default flag. + mptt.create(); + Asset asset = mptt.issuanceID(); - MPTTester mptt{env, issuer, mptInitNoFund}; + test(env, issuer, owner, depositor, asset, vault); + }; - // Locked because that is the default flag. - mptt.create(); - Asset asset = mptt.issuanceID(); - auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); - env(tx, ter(tecNO_AUTH)); + testCase([this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + Asset const& asset, + Vault& vault) { + testcase("MPT no authorization"); + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + env(tx, ter(tecNO_AUTH)); + }); + + testCase([this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + Asset const& asset, + Vault& vault) { + testcase("MPT cannot set Scale=0"); + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + tx[sfScale] = 0; + env(tx, ter{temMALFORMED}); + }); + + testCase([this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + Asset const& asset, + Vault& vault) { + testcase("MPT cannot set Scale=1"); + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + tx[sfScale] = 1; + env(tx, ter{temMALFORMED}); + }); } void @@ -1285,7 +1546,7 @@ class Vault_test : public beast::unit_test::suite jvVault[jss::result][jss::vault][sfAssetsTotal] == "100"); BEAST_EXPECT( jvVault[jss::result][jss::vault][jss::shares] - [sfOutstandingAmount] == "100"); + [sfOutstandingAmount] == "100000000"); // Vault pseudo-account return parseBase58( @@ -1324,7 +1585,7 @@ class Vault_test : public beast::unit_test::suite jvVault[jss::result][jss::vault][sfAssetsTotal] == "50"); BEAST_EXPECT( jvVault[jss::result][jss::vault][jss::shares] - [sfOutstandingAmount] == "50"); + [sfOutstandingAmount] == "50000000"); } { @@ -1508,6 +1769,10 @@ class Vault_test : public beast::unit_test::suite env(tx); env.close(); + // Clawback removed shares MPToken + auto const mptSle = env.le(keylet::mptoken(share, depositor.id())); + BEAST_EXPECT(mptSle == nullptr); + // Can delete empty vault, even if global lock tx = vault.del({.owner = owner, .id = keylet.key}); env(tx); @@ -1597,11 +1862,14 @@ class Vault_test : public beast::unit_test::suite vault.create({.owner = owner, .asset = asset}); env(tx); env.close(); + auto v = env.le(keylet); + BEAST_EXPECT(v); + MPTID share = (*v)[sfShareMPTID]; tx = vault.deposit( {.depositor = depositor, .id = keylet.key, - .amount = asset(1000)}); + .amount = asset(1000)}); // all assets held by depositor env(tx); env.close(); @@ -1629,8 +1897,14 @@ class Vault_test : public beast::unit_test::suite tx = vault.withdraw( {.depositor = depositor, .id = keylet.key, - .amount = asset(100)}); + .amount = asset(1000)}); env(tx); + env.close(); + + // Withdraw removed shares MPToken + auto const mptSle = + env.le(keylet::mptoken(share, depositor.id())); + BEAST_EXPECT(mptSle == nullptr); } }, {.requireAuth = false}); @@ -1702,6 +1976,96 @@ class Vault_test : public beast::unit_test::suite env(vault.del({.owner = owner, .id = keylet.key})); }); + testCase([this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + PrettyAsset const& asset, + Vault& vault, + MPTTester& mptt) { + testcase("MPT vault owner can receive shares unless unauthorized"); + + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + + tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(1000)}); + env(tx); + env.close(); + + auto const issuanceId = [&env](ripple::Keylet keylet) -> MPTID { + auto const vault = env.le(keylet); + return vault->at(sfShareMPTID); + }(keylet); + PrettyAsset shares = MPTIssue(issuanceId); + + { + // owner has MPToken for shares they did not explicitly create + env(pay(depositor, owner, shares(1))); + env.close(); + + tx = vault.withdraw( + {.depositor = owner, + .id = keylet.key, + .amount = shares(1)}); + env(tx); + env.close(); + + // owner's MPToken for vault shares not destroyed by withdraw + env(pay(depositor, owner, shares(1))); + env.close(); + + tx = vault.clawback( + {.issuer = issuer, + .id = keylet.key, + .holder = owner, + .amount = asset(0)}); + env(tx); + env.close(); + + // owner's MPToken for vault shares not destroyed by clawback + env(pay(depositor, owner, shares(1))); + env.close(); + + // pay back, so we can destroy owner's MPToken now + env(pay(owner, depositor, shares(1))); + env.close(); + + { + // explicitly destroy vault owners MPToken with zero balance + Json::Value jv; + jv[sfAccount] = owner.human(); + jv[sfMPTokenIssuanceID] = to_string(issuanceId); + jv[sfFlags] = tfMPTUnauthorize; + jv[sfTransactionType] = jss::MPTokenAuthorize; + env(jv); + env.close(); + } + + // owner no longer has MPToken for vault shares + tx = pay(depositor, owner, shares(1)); + env(tx, ter{tecNO_AUTH}); + env.close(); + + // destroy all remaining shares, so we can delete vault + tx = vault.clawback( + {.issuer = issuer, + .id = keylet.key, + .holder = depositor, + .amount = asset(0)}); + env(tx); + env.close(); + + // will soft fail destroying MPToken for vault owner + env(vault.del({.owner = owner, .id = keylet.key})); + env.close(); + } + }); + testCase( [this]( Env& env, @@ -1771,6 +2135,8 @@ class Vault_test : public beast::unit_test::suite // Withdrawal to other (authorized) accounts works tx[sfDestination] = issuer.human(); env(tx); + env.close(); + tx[sfDestination] = owner.human(); env(tx); env.close(); @@ -1792,6 +2158,7 @@ class Vault_test : public beast::unit_test::suite .holder = depositor, .amount = asset(800)}); env(tx); + env.close(); env(vault.del({.owner = owner, .id = keylet.key})); }); @@ -1901,18 +2268,16 @@ class Vault_test : public beast::unit_test::suite using namespace test::jtx; auto testCase = - [&, this]( - std::function vaultAccount, - Vault& vault, - PrettyAsset const& asset, - std::function issuanceId, - std::function vaultBalance)> - test) { + [&, + this](std::function vaultAccount, + Vault& vault, + PrettyAsset const& asset, + std::function issuanceId)> test) { Env env{*this, testable_amendments() | featureSingleAssetVault}; Account const owner{"owner"}; Account const issuer{"issuer"}; @@ -1929,33 +2294,13 @@ class Vault_test : public beast::unit_test::suite env(rate(issuer, 1.25)); env.close(); - auto const [tx, keylet] = - vault.create({.owner = owner, .asset = asset}); - env(tx); - env.close(); - auto const vaultAccount = - [&env](ripple::Keylet keylet) -> AccountID { - return env.le(keylet)->at(sfAccount); + [&env](ripple::Keylet keylet) -> Account { + return Account("vault", env.le(keylet)->at(sfAccount)); }; auto const issuanceId = [&env](ripple::Keylet keylet) -> MPTID { return env.le(keylet)->at(sfShareMPTID); }; - auto const vaultBalance = // - [&env, &vaultAccount, issue = asset.raw().get()]( - ripple::Keylet keylet) -> PrettyAmount { - auto const account = vaultAccount(keylet); - auto const sle = env.le(keylet::line(account, issue)); - if (sle == nullptr) - return { - STAmount(issue, 0), - env.lookup(issue.account).name()}; - auto amount = sle->getFieldAmount(sfBalance); - amount.setIssuer(issue.account); - if (account > issue.account) - amount.negate(); - return {amount, env.lookup(issue.account).name()}; - }; test( env, @@ -1965,8 +2310,7 @@ class Vault_test : public beast::unit_test::suite vaultAccount, vault, asset, - issuanceId, - vaultBalance); + issuanceId); }; testCase([&, this]( @@ -2029,8 +2373,7 @@ class Vault_test : public beast::unit_test::suite auto vaultAccount, Vault& vault, PrettyAsset const& asset, - auto issuanceId, - auto) { + auto issuanceId) { testcase("IOU frozen trust line to vault account"); auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); @@ -2101,7 +2444,9 @@ class Vault_test : public beast::unit_test::suite env.close(); env(vault.withdraw( - {.depositor = owner, .id = keylet.key, .amount = share(50)})); + {.depositor = owner, + .id = keylet.key, + .amount = share(50'000'000)})); env(vault.del({.owner = owner, .id = keylet.key})); env.close(); @@ -2112,11 +2457,10 @@ class Vault_test : public beast::unit_test::suite Account const& owner, Account const& issuer, Account const& charlie, - auto, + auto vaultAccount, Vault& vault, PrettyAsset const& asset, - auto issuanceId, - auto vaultBalance) { + auto issuanceId) { testcase("IOU transfer fees not applied"); auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); @@ -2132,7 +2476,8 @@ class Vault_test : public beast::unit_test::suite // transfer fees ignored on deposit BEAST_EXPECT(env.balance(owner, issue) == asset(100)); - BEAST_EXPECT(vaultBalance(keylet) == asset(100)); + BEAST_EXPECT( + env.balance(vaultAccount(keylet), issue) == asset(100)); { auto tx = vault.clawback( @@ -2146,20 +2491,22 @@ class Vault_test : public beast::unit_test::suite // transfer fees ignored on clawback BEAST_EXPECT(env.balance(owner, issue) == asset(100)); - BEAST_EXPECT(vaultBalance(keylet) == asset(50)); + BEAST_EXPECT(env.balance(vaultAccount(keylet), issue) == asset(50)); env(vault.withdraw( - {.depositor = owner, .id = keylet.key, .amount = share(20)})); + {.depositor = owner, + .id = keylet.key, + .amount = share(20'000'000)})); // transfer fees ignored on withdraw BEAST_EXPECT(env.balance(owner, issue) == asset(120)); - BEAST_EXPECT(vaultBalance(keylet) == asset(30)); + BEAST_EXPECT(env.balance(vaultAccount(keylet), issue) == asset(30)); { auto tx = vault.withdraw( {.depositor = owner, .id = keylet.key, - .amount = share(30)}); + .amount = share(30'000'000)}); tx[sfDestination] = charlie.human(); env(tx); } @@ -2167,7 +2514,7 @@ class Vault_test : public beast::unit_test::suite // transfer fees ignored on withdraw to 3rd party BEAST_EXPECT(env.balance(owner, issue) == asset(120)); BEAST_EXPECT(env.balance(charlie, issue) == asset(30)); - BEAST_EXPECT(vaultBalance(keylet) == asset(0)); + BEAST_EXPECT(env.balance(vaultAccount(keylet), issue) == asset(0)); env(vault.del({.owner = owner, .id = keylet.key})); env.close(); @@ -2843,6 +3190,870 @@ class Vault_test : public beast::unit_test::suite env(tx, ter{terADDRESS_COLLISION}); } + void + testScaleIOU() + { + using namespace test::jtx; + + struct Data + { + Account const& owner; + Account const& issuer; + Account const& depositor; + Account const& vaultAccount; + MPTIssue shares; + PrettyAsset const& share; + Vault& vault; + ripple::Keylet keylet; + Issue assets; + PrettyAsset const& asset; + std::function)> peek; + }; + + auto testCase = [&, this]( + std::uint8_t scale, + std::function test) { + Env env{*this, testable_amendments() | featureSingleAssetVault}; + Account const owner{"owner"}; + Account const issuer{"issuer"}; + Account const depositor{"depositor"}; + Vault vault{env}; + env.fund(XRP(1000), issuer, owner, depositor); + env(fset(issuer, asfAllowTrustLineClawback)); + env.close(); + + PrettyAsset const asset = issuer["IOU"]; + env.trust(asset(1000), owner); + env.trust(asset(1000), depositor); + env(pay(issuer, owner, asset(200))); + env(pay(issuer, depositor, asset(200))); + env.close(); + + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + tx[sfScale] = scale; + env(tx); + + auto const [vaultAccount, issuanceId] = + [&env](ripple::Keylet keylet) -> std::tuple { + auto const vault = env.le(keylet); + return { + Account("vault", vault->at(sfAccount)), + vault->at(sfShareMPTID)}; + }(keylet); + MPTIssue shares(issuanceId); + env.memoize(vaultAccount); + + auto const peek = + [=, &env, this](std::function fn) -> bool { + return env.app().openLedger().modify( + [&](OpenView& view, beast::Journal j) -> bool { + Sandbox sb(&view, tapNONE); + auto vault = sb.peek(keylet::vault(keylet.key)); + if (!BEAST_EXPECT(vault != nullptr)) + return false; + auto shares = sb.peek( + keylet::mptIssuance(vault->at(sfShareMPTID))); + if (!BEAST_EXPECT(shares != nullptr)) + return false; + if (fn(*vault, *shares)) + { + sb.update(vault); + sb.update(shares); + sb.apply(view); + return true; + } + return false; + }); + }; + + test( + env, + {.owner = owner, + .issuer = issuer, + .depositor = depositor, + .vaultAccount = vaultAccount, + .shares = shares, + .share = PrettyAsset(shares), + .vault = vault, + .keylet = keylet, + .assets = asset.raw().get(), + .asset = asset, + .peek = peek}); + }; + + testCase(18, [&, this](Env& env, Data d) { + testcase("Scale deposit overflow on first deposit"); + auto tx = d.vault.deposit( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = d.asset(10)}); + env(tx, ter{tecPATH_DRY}); + env.close(); + }); + + testCase(18, [&, this](Env& env, Data d) { + testcase("Scale deposit overflow on second deposit"); + + { + auto tx = d.vault.deposit( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = d.asset(5)}); + env(tx); + env.close(); + } + + { + auto tx = d.vault.deposit( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = d.asset(10)}); + env(tx, ter{tecPATH_DRY}); + env.close(); + } + }); + + testCase(18, [&, this](Env& env, Data d) { + testcase("Scale deposit overflow on total shares"); + + { + auto tx = d.vault.deposit( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = d.asset(5)}); + env(tx); + env.close(); + } + + { + auto tx = d.vault.deposit( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = d.asset(5)}); + env(tx, ter{tecPATH_DRY}); + env.close(); + } + }); + + testCase(1, [&, this](Env& env, Data d) { + testcase("Scale deposit exact"); + + auto const start = env.balance(d.depositor, d.assets).number(); + auto tx = d.vault.deposit( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = d.asset(1)}); + env(tx); + env.close(); + BEAST_EXPECT(env.balance(d.depositor, d.shares) == d.share(10)); + BEAST_EXPECT( + env.balance(d.depositor, d.assets) == + STAmount(d.asset, start - 1)); + }); + + testCase(1, [&, this](Env& env, Data d) { + testcase("Scale deposit insignificant amount"); + + auto tx = d.vault.deposit( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = STAmount(d.asset, Number(9, -2))}); + env(tx, ter{tecPRECISION_LOSS}); + }); + + testCase(1, [&, this](Env& env, Data d) { + testcase("Scale deposit exact, using full precision"); + + auto const start = env.balance(d.depositor, d.assets).number(); + auto tx = d.vault.deposit( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = STAmount(d.asset, Number(15, -1))}); + env(tx); + env.close(); + BEAST_EXPECT(env.balance(d.depositor, d.shares) == d.share(15)); + BEAST_EXPECT( + env.balance(d.depositor, d.assets) == + STAmount(d.asset, start - Number(15, -1))); + }); + + testCase(1, [&, this](Env& env, Data d) { + testcase("Scale deposit exact, truncating from .5"); + + auto const start = env.balance(d.depositor, d.assets).number(); + // Each of the cases below will transfer exactly 1.2 IOU to the + // vault and receive 12 shares in exchange + { + auto tx = d.vault.deposit( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = STAmount(d.asset, Number(125, -2))}); + env(tx); + env.close(); + BEAST_EXPECT(env.balance(d.depositor, d.shares) == d.share(12)); + BEAST_EXPECT( + env.balance(d.depositor, d.assets) == + STAmount(d.asset, start - Number(12, -1))); + } + + { + auto tx = d.vault.deposit( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = STAmount(d.asset, Number(1201, -3))}); + env(tx); + env.close(); + BEAST_EXPECT(env.balance(d.depositor, d.shares) == d.share(24)); + BEAST_EXPECT( + env.balance(d.depositor, d.assets) == + STAmount(d.asset, start - Number(24, -1))); + } + + { + auto tx = d.vault.deposit( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = STAmount(d.asset, Number(1299, -3))}); + env(tx); + env.close(); + BEAST_EXPECT(env.balance(d.depositor, d.shares) == d.share(36)); + BEAST_EXPECT( + env.balance(d.depositor, d.assets) == + STAmount(d.asset, start - Number(36, -1))); + } + }); + + testCase(1, [&, this](Env& env, Data d) { + testcase("Scale deposit exact, truncating from .01"); + + auto const start = env.balance(d.depositor, d.assets).number(); + // round to 12 + auto tx = d.vault.deposit( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = STAmount(d.asset, Number(1201, -3))}); + env(tx); + env.close(); + BEAST_EXPECT(env.balance(d.depositor, d.shares) == d.share(12)); + BEAST_EXPECT( + env.balance(d.depositor, d.assets) == + STAmount(d.asset, start - Number(12, -1))); + + { + // round to 6 + auto tx = d.vault.deposit( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = STAmount(d.asset, Number(69, -2))}); + env(tx); + env.close(); + BEAST_EXPECT(env.balance(d.depositor, d.shares) == d.share(18)); + BEAST_EXPECT( + env.balance(d.depositor, d.assets) == + STAmount(d.asset, start - Number(18, -1))); + } + }); + + testCase(1, [&, this](Env& env, Data d) { + testcase("Scale deposit exact, truncating from .99"); + + auto const start = env.balance(d.depositor, d.assets).number(); + // round to 12 + auto tx = d.vault.deposit( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = STAmount(d.asset, Number(1299, -3))}); + env(tx); + env.close(); + BEAST_EXPECT(env.balance(d.depositor, d.shares) == d.share(12)); + BEAST_EXPECT( + env.balance(d.depositor, d.assets) == + STAmount(d.asset, start - Number(12, -1))); + + { + // round to 6 + auto tx = d.vault.deposit( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = STAmount(d.asset, Number(62, -2))}); + env(tx); + env.close(); + BEAST_EXPECT(env.balance(d.depositor, d.shares) == d.share(18)); + BEAST_EXPECT( + env.balance(d.depositor, d.assets) == + STAmount(d.asset, start - Number(18, -1))); + } + }); + + testCase(1, [&, this](Env& env, Data d) { + // initial setup: deposit 100 IOU, receive 1000 shares + auto const start = env.balance(d.depositor, d.assets).number(); + auto tx = d.vault.deposit( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = STAmount(d.asset, Number(100, 0))}); + env(tx); + env.close(); + BEAST_EXPECT(env.balance(d.depositor, d.shares) == d.share(1000)); + BEAST_EXPECT( + env.balance(d.depositor, d.assets) == + STAmount(d.asset, start - Number(100, 0))); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.assets) == + STAmount(d.asset, Number(100, 0))); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.shares) == + STAmount(d.share, Number(1000, 0))); + + { + testcase("Scale redeem exact"); + // sharesToAssetsWithdraw: + // assets = assetsTotal * (shares / sharesTotal) + // assets = 100 * 100 / 1000 = 100 * 0.1 = 10 + + auto const start = env.balance(d.depositor, d.assets).number(); + auto tx = d.vault.withdraw( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = STAmount(d.share, Number(100, 0))}); + env(tx); + env.close(); + BEAST_EXPECT( + env.balance(d.depositor, d.shares) == d.share(900)); + BEAST_EXPECT( + env.balance(d.depositor, d.assets) == + STAmount(d.asset, start + Number(10, 0))); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.assets) == + STAmount(d.asset, Number(90, 0))); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.shares) == + STAmount(d.share, Number(900, 0))); + } + + { + testcase("Scale redeem with rounding"); + // sharesToAssetsWithdraw: + // assets = assetsTotal * (shares / sharesTotal) + // assets = 90 * 25 / 900 = 90 * 0.02777... = 2.5 + + auto const start = env.balance(d.depositor, d.assets).number(); + d.peek([](SLE& vault, auto&) -> bool { + vault[sfAssetsAvailable] = Number(1); + return true; + }); + + // Note, this transaction fails first (because of above change + // in the open ledger) but then succeeds when the ledger is + // closed (because a modification like above is not persistent), + // which is why the checks below are expected to pass. + auto tx = d.vault.withdraw( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = STAmount(d.share, Number(25, 0))}); + env(tx, ter{tecINSUFFICIENT_FUNDS}); + env.close(); + BEAST_EXPECT( + env.balance(d.depositor, d.shares) == d.share(900 - 25)); + BEAST_EXPECT( + env.balance(d.depositor, d.assets) == + STAmount(d.asset, start + Number(25, -1))); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.assets) == + STAmount(d.asset, Number(900 - 25, -1))); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.shares) == + STAmount(d.share, Number(900 - 25, 0))); + } + + { + testcase("Scale redeem exact"); + // sharesToAssetsWithdraw: + // assets = assetsTotal * (shares / sharesTotal) + // assets = 87.5 * 21 / 875 = 87.5 * 0.024 = 2.1 + + auto const start = env.balance(d.depositor, d.assets).number(); + + tx = d.vault.withdraw( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = STAmount(d.share, Number(21, 0))}); + env(tx); + env.close(); + BEAST_EXPECT( + env.balance(d.depositor, d.shares) == d.share(875 - 21)); + BEAST_EXPECT( + env.balance(d.depositor, d.assets) == + STAmount(d.asset, start + Number(21, -1))); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.assets) == + STAmount(d.asset, Number(875 - 21, -1))); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.shares) == + STAmount(d.share, Number(875 - 21, 0))); + } + + { + testcase("Scale redeem rest"); + auto const rest = env.balance(d.depositor, d.shares).number(); + + tx = d.vault.withdraw( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = STAmount(d.share, rest)}); + env(tx); + env.close(); + BEAST_EXPECT(env.balance(d.depositor, d.shares).number() == 0); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.assets).number() == 0); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.shares).number() == 0); + } + }); + + testCase(18, [&, this](Env& env, Data d) { + testcase("Scale withdraw overflow"); + + { + auto tx = d.vault.deposit( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = d.asset(5)}); + env(tx); + env.close(); + } + + { + auto tx = d.vault.withdraw( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = STAmount(d.asset, Number(10, 0))}); + env(tx, ter{tecPATH_DRY}); + env.close(); + } + }); + + testCase(1, [&, this](Env& env, Data d) { + // initial setup: deposit 100 IOU, receive 1000 shares + auto const start = env.balance(d.depositor, d.assets).number(); + auto tx = d.vault.deposit( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = STAmount(d.asset, Number(100, 0))}); + env(tx); + env.close(); + BEAST_EXPECT(env.balance(d.depositor, d.shares) == d.share(1000)); + BEAST_EXPECT( + env.balance(d.depositor, d.assets) == + STAmount(d.asset, start - Number(100, 0))); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.assets) == + STAmount(d.asset, Number(100, 0))); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.shares) == + STAmount(d.share, Number(1000, 0))); + + { + testcase("Scale withdraw exact"); + // assetsToSharesWithdraw: + // shares = sharesTotal * (assets / assetsTotal) + // shares = 1000 * 10 / 100 = 1000 * 0.1 = 100 + // sharesToAssetsWithdraw: + // assets = assetsTotal * (shares / sharesTotal) + // assets = 100 * 100 / 1000 = 100 * 0.1 = 10 + + auto const start = env.balance(d.depositor, d.assets).number(); + auto tx = d.vault.withdraw( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = STAmount(d.asset, Number(10, 0))}); + env(tx); + env.close(); + BEAST_EXPECT( + env.balance(d.depositor, d.shares) == d.share(900)); + BEAST_EXPECT( + env.balance(d.depositor, d.assets) == + STAmount(d.asset, start + Number(10, 0))); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.assets) == + STAmount(d.asset, Number(90, 0))); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.shares) == + STAmount(d.share, Number(900, 0))); + } + + { + testcase("Scale withdraw insignificant amount"); + auto tx = d.vault.withdraw( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = STAmount(d.asset, Number(4, -2))}); + env(tx, ter{tecPRECISION_LOSS}); + } + + { + testcase("Scale withdraw with rounding assets"); + // assetsToSharesWithdraw: + // shares = sharesTotal * (assets / assetsTotal) + // shares = 900 * 2.5 / 90 = 900 * 0.02777... = 25 + // sharesToAssetsWithdraw: + // assets = assetsTotal * (shares / sharesTotal) + // assets = 90 * 25 / 900 = 90 * 0.02777... = 2.5 + + auto const start = env.balance(d.depositor, d.assets).number(); + d.peek([](SLE& vault, auto&) -> bool { + vault[sfAssetsAvailable] = Number(1); + return true; + }); + + // Note, this transaction fails first (because of above change + // in the open ledger) but then succeeds when the ledger is + // closed (because a modification like above is not persistent), + // which is why the checks below are expected to pass. + auto tx = d.vault.withdraw( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = STAmount(d.asset, Number(25, -1))}); + env(tx, ter{tecINSUFFICIENT_FUNDS}); + env.close(); + BEAST_EXPECT( + env.balance(d.depositor, d.shares) == d.share(900 - 25)); + BEAST_EXPECT( + env.balance(d.depositor, d.assets) == + STAmount(d.asset, start + Number(25, -1))); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.assets) == + STAmount(d.asset, Number(900 - 25, -1))); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.shares) == + STAmount(d.share, Number(900 - 25, 0))); + } + + { + testcase("Scale withdraw with rounding shares up"); + // assetsToSharesWithdraw: + // shares = sharesTotal * (assets / assetsTotal) + // shares = 875 * 3.75 / 87.5 = 875 * 0.042857... = 37.5 + // sharesToAssetsWithdraw: + // assets = assetsTotal * (shares / sharesTotal) + // assets = 87.5 * 38 / 875 = 87.5 * 0.043428... = 3.8 + + auto const start = env.balance(d.depositor, d.assets).number(); + auto tx = d.vault.withdraw( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = STAmount(d.asset, Number(375, -2))}); + env(tx); + env.close(); + BEAST_EXPECT( + env.balance(d.depositor, d.shares) == d.share(875 - 38)); + BEAST_EXPECT( + env.balance(d.depositor, d.assets) == + STAmount(d.asset, start + Number(38, -1))); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.assets) == + STAmount(d.asset, Number(875 - 38, -1))); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.shares) == + STAmount(d.share, Number(875 - 38, 0))); + } + + { + testcase("Scale withdraw with rounding shares down"); + // assetsToSharesWithdraw: + // shares = sharesTotal * (assets / assetsTotal) + // shares = 837 * 3.72 / 83.7 = 837 * 0.04444... = 37.2 + // sharesToAssetsWithdraw: + // assets = assetsTotal * (shares / sharesTotal) + // assets = 83.7 * 37 / 837 = 83.7 * 0.044205... = 3.7 + + auto const start = env.balance(d.depositor, d.assets).number(); + auto tx = d.vault.withdraw( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = STAmount(d.asset, Number(372, -2))}); + env(tx); + env.close(); + BEAST_EXPECT( + env.balance(d.depositor, d.shares) == d.share(837 - 37)); + BEAST_EXPECT( + env.balance(d.depositor, d.assets) == + STAmount(d.asset, start + Number(37, -1))); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.assets) == + STAmount(d.asset, Number(837 - 37, -1))); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.shares) == + STAmount(d.share, Number(837 - 37, 0))); + } + + { + testcase("Scale withdraw tiny amount"); + + auto const start = env.balance(d.depositor, d.assets).number(); + auto tx = d.vault.withdraw( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = STAmount(d.asset, Number(9, -2))}); + env(tx); + env.close(); + BEAST_EXPECT( + env.balance(d.depositor, d.shares) == d.share(800 - 1)); + BEAST_EXPECT( + env.balance(d.depositor, d.assets) == + STAmount(d.asset, start + Number(1, -1))); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.assets) == + STAmount(d.asset, Number(800 - 1, -1))); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.shares) == + STAmount(d.share, Number(800 - 1, 0))); + } + + { + testcase("Scale withdraw rest"); + auto const rest = + env.balance(d.vaultAccount, d.assets).number(); + + tx = d.vault.withdraw( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = STAmount(d.asset, rest)}); + env(tx); + env.close(); + BEAST_EXPECT(env.balance(d.depositor, d.shares).number() == 0); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.assets).number() == 0); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.shares).number() == 0); + } + }); + + testCase(18, [&, this](Env& env, Data d) { + testcase("Scale clawback overflow"); + + { + auto tx = d.vault.deposit( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = d.asset(5)}); + env(tx); + env.close(); + } + + { + auto tx = d.vault.clawback( + {.issuer = d.issuer, + .id = d.keylet.key, + .holder = d.depositor, + .amount = STAmount(d.asset, Number(10, 0))}); + env(tx, ter{tecPATH_DRY}); + env.close(); + } + }); + + testCase(1, [&, this](Env& env, Data d) { + // initial setup: deposit 100 IOU, receive 1000 shares + auto const start = env.balance(d.depositor, d.assets).number(); + auto tx = d.vault.deposit( + {.depositor = d.depositor, + .id = d.keylet.key, + .amount = STAmount(d.asset, Number(100, 0))}); + env(tx); + env.close(); + BEAST_EXPECT(env.balance(d.depositor, d.shares) == d.share(1000)); + BEAST_EXPECT( + env.balance(d.depositor, d.assets) == + STAmount(d.asset, start - Number(100, 0))); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.assets) == + STAmount(d.asset, Number(100, 0))); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.shares) == + STAmount(d.share, Number(1000, 0))); + { + testcase("Scale clawback exact"); + // assetsToSharesWithdraw: + // shares = sharesTotal * (assets / assetsTotal) + // shares = 1000 * 10 / 100 = 1000 * 0.1 = 100 + // sharesToAssetsWithdraw: + // assets = assetsTotal * (shares / sharesTotal) + // assets = 100 * 100 / 1000 = 100 * 0.1 = 10 + + auto const start = env.balance(d.depositor, d.assets).number(); + auto tx = d.vault.clawback( + {.issuer = d.issuer, + .id = d.keylet.key, + .holder = d.depositor, + .amount = STAmount(d.asset, Number(10, 0))}); + env(tx); + env.close(); + BEAST_EXPECT( + env.balance(d.depositor, d.shares) == d.share(900)); + BEAST_EXPECT( + env.balance(d.depositor, d.assets) == + STAmount(d.asset, start)); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.assets) == + STAmount(d.asset, Number(90, 0))); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.shares) == + STAmount(d.share, Number(900, 0))); + } + + { + testcase("Scale clawback insignificant amount"); + auto tx = d.vault.clawback( + {.issuer = d.issuer, + .id = d.keylet.key, + .holder = d.depositor, + .amount = STAmount(d.asset, Number(4, -2))}); + env(tx, ter{tecPRECISION_LOSS}); + } + + { + testcase("Scale clawback with rounding assets"); + // assetsToSharesWithdraw: + // shares = sharesTotal * (assets / assetsTotal) + // shares = 900 * 2.5 / 90 = 900 * 0.02777... = 25 + // sharesToAssetsWithdraw: + // assets = assetsTotal * (shares / sharesTotal) + // assets = 90 * 25 / 900 = 90 * 0.02777... = 2.5 + + auto const start = env.balance(d.depositor, d.assets).number(); + auto tx = d.vault.clawback( + {.issuer = d.issuer, + .id = d.keylet.key, + .holder = d.depositor, + .amount = STAmount(d.asset, Number(25, -1))}); + env(tx); + env.close(); + BEAST_EXPECT( + env.balance(d.depositor, d.shares) == d.share(900 - 25)); + BEAST_EXPECT( + env.balance(d.depositor, d.assets) == + STAmount(d.asset, start)); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.assets) == + STAmount(d.asset, Number(900 - 25, -1))); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.shares) == + STAmount(d.share, Number(900 - 25, 0))); + } + + { + testcase("Scale clawback with rounding shares up"); + // assetsToSharesWithdraw: + // shares = sharesTotal * (assets / assetsTotal) + // shares = 875 * 3.75 / 87.5 = 875 * 0.042857... = 37.5 + // sharesToAssetsWithdraw: + // assets = assetsTotal * (shares / sharesTotal) + // assets = 87.5 * 38 / 875 = 87.5 * 0.043428... = 3.8 + + auto const start = env.balance(d.depositor, d.assets).number(); + auto tx = d.vault.clawback( + {.issuer = d.issuer, + .id = d.keylet.key, + .holder = d.depositor, + .amount = STAmount(d.asset, Number(375, -2))}); + env(tx); + env.close(); + BEAST_EXPECT( + env.balance(d.depositor, d.shares) == d.share(875 - 38)); + BEAST_EXPECT( + env.balance(d.depositor, d.assets) == + STAmount(d.asset, start)); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.assets) == + STAmount(d.asset, Number(875 - 38, -1))); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.shares) == + STAmount(d.share, Number(875 - 38, 0))); + } + + { + testcase("Scale clawback with rounding shares down"); + // assetsToSharesWithdraw: + // shares = sharesTotal * (assets / assetsTotal) + // shares = 837 * 3.72 / 83.7 = 837 * 0.04444... = 37.2 + // sharesToAssetsWithdraw: + // assets = assetsTotal * (shares / sharesTotal) + // assets = 83.7 * 37 / 837 = 83.7 * 0.044205... = 3.7 + + auto const start = env.balance(d.depositor, d.assets).number(); + auto tx = d.vault.clawback( + {.issuer = d.issuer, + .id = d.keylet.key, + .holder = d.depositor, + .amount = STAmount(d.asset, Number(372, -2))}); + env(tx); + env.close(); + BEAST_EXPECT( + env.balance(d.depositor, d.shares) == d.share(837 - 37)); + BEAST_EXPECT( + env.balance(d.depositor, d.assets) == + STAmount(d.asset, start)); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.assets) == + STAmount(d.asset, Number(837 - 37, -1))); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.shares) == + STAmount(d.share, Number(837 - 37, 0))); + } + + { + testcase("Scale clawback tiny amount"); + + auto const start = env.balance(d.depositor, d.assets).number(); + auto tx = d.vault.clawback( + {.issuer = d.issuer, + .id = d.keylet.key, + .holder = d.depositor, + .amount = STAmount(d.asset, Number(9, -2))}); + env(tx); + env.close(); + BEAST_EXPECT( + env.balance(d.depositor, d.shares) == d.share(800 - 1)); + BEAST_EXPECT( + env.balance(d.depositor, d.assets) == + STAmount(d.asset, start)); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.assets) == + STAmount(d.asset, Number(800 - 1, -1))); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.shares) == + STAmount(d.share, Number(800 - 1, 0))); + } + + { + testcase("Scale clawback rest"); + auto const rest = + env.balance(d.vaultAccount, d.assets).number(); + d.peek([](SLE& vault, auto&) -> bool { + vault[sfAssetsAvailable] = Number(5); + return true; + }); + + // Note, this transaction yields two different results: + // * in the open ledger, with AssetsAvailable = 5 + // * when the ledger is closed with unmodified AssetsAvailable + // because a modification like above is not persistent. + tx = d.vault.clawback( + {.issuer = d.issuer, + .id = d.keylet.key, + .holder = d.depositor, + .amount = STAmount(d.asset, rest)}); + env(tx); + env.close(); + BEAST_EXPECT(env.balance(d.depositor, d.shares).number() == 0); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.assets).number() == 0); + BEAST_EXPECT( + env.balance(d.vaultAccount, d.shares).number() == 0); + } + }); + } + void testRPC() { @@ -2943,7 +4154,8 @@ class Vault_test : public beast::unit_test::suite issuance, sfFlags, int(lsfMPTCanEscrow | lsfMPTCanTrade | lsfMPTCanTransfer))); - BEAST_EXPECT(checkString(issuance, sfOutstandingAmount, "50")); + BEAST_EXPECT( + checkString(issuance, sfOutstandingAmount, "50000000")); } }; @@ -3326,6 +4538,7 @@ public: testWithDomainCheckXRP(); testNonTransferableShares(); testFailedPseudoAccount(); + testScaleIOU(); testRPC(); } }; diff --git a/src/test/basics/Number_test.cpp b/src/test/basics/Number_test.cpp index 964cfe9614..f24c0b35e1 100644 --- a/src/test/basics/Number_test.cpp +++ b/src/test/basics/Number_test.cpp @@ -720,6 +720,30 @@ public: BEAST_EXPECT(res2 == STAmount{7518784}); } + void + test_truncate() + { + BEAST_EXPECT(Number(25, +1).truncate() == Number(250, 0)); + BEAST_EXPECT(Number(25, 0).truncate() == Number(25, 0)); + BEAST_EXPECT(Number(25, -1).truncate() == Number(2, 0)); + BEAST_EXPECT(Number(25, -2).truncate() == Number(0, 0)); + BEAST_EXPECT(Number(99, -2).truncate() == Number(0, 0)); + + BEAST_EXPECT(Number(-25, +1).truncate() == Number(-250, 0)); + BEAST_EXPECT(Number(-25, 0).truncate() == Number(-25, 0)); + BEAST_EXPECT(Number(-25, -1).truncate() == Number(-2, 0)); + BEAST_EXPECT(Number(-25, -2).truncate() == Number(0, 0)); + BEAST_EXPECT(Number(-99, -2).truncate() == Number(0, 0)); + + BEAST_EXPECT(Number(0, 0).truncate() == Number(0, 0)); + BEAST_EXPECT(Number(0, 30000).truncate() == Number(0, 0)); + BEAST_EXPECT(Number(0, -30000).truncate() == Number(0, 0)); + BEAST_EXPECT(Number(100, -30000).truncate() == Number(0, 0)); + BEAST_EXPECT(Number(100, -30000).truncate() == Number(0, 0)); + BEAST_EXPECT(Number(-100, -30000).truncate() == Number(0, 0)); + BEAST_EXPECT(Number(-100, -30000).truncate() == Number(0, 0)); + } + void run() override { @@ -740,6 +764,7 @@ public: test_stream(); test_inc_dec(); test_toSTAmount(); + test_truncate(); } }; diff --git a/src/test/jtx/Account.h b/src/test/jtx/Account.h index d91bb4a383..940960051a 100644 --- a/src/test/jtx/Account.h +++ b/src/test/jtx/Account.h @@ -74,6 +74,10 @@ public: /** @} */ + /** Create an Account from an account ID. Should only be used when the + * secret key is unavailable, such as for pseudo-accounts. */ + explicit Account(std::string name, AccountID const& id); + enum AcctStringType { base58Seed, other }; /** Create an account from a base58 seed string. Throws on invalid seed. */ Account(AcctStringType stringType, std::string base58SeedStr); diff --git a/src/test/jtx/impl/Account.cpp b/src/test/jtx/impl/Account.cpp index b61048e66f..fe901848f8 100644 --- a/src/test/jtx/impl/Account.cpp +++ b/src/test/jtx/impl/Account.cpp @@ -86,6 +86,14 @@ Account::Account(AcctStringType stringType, std::string base58SeedStr) { } +Account::Account(std::string name, AccountID const& id) + : Account(name, randomKeyPair(KeyType::secp256k1), privateCtorTag{}) +{ + // override the randomly generated values + id_ = id; + human_ = toBase58(id_); +} + IOU Account::operator[](std::string const& s) const { diff --git a/src/xrpld/app/tx/detail/InvariantCheck.cpp b/src/xrpld/app/tx/detail/InvariantCheck.cpp index d93378d3cd..da0dfc117f 100644 --- a/src/xrpld/app/tx/detail/InvariantCheck.cpp +++ b/src/xrpld/app/tx/detail/InvariantCheck.cpp @@ -1510,6 +1510,12 @@ ValidMPTIssuance::finalize( if (tx.getTxnType() == ttESCROW_FINISH) return true; + + if ((tx.getTxnType() == ttVAULT_CLAWBACK || + tx.getTxnType() == ttVAULT_WITHDRAW) && + mptokensDeleted_ == 1 && mptokensCreated_ == 0 && + mptIssuancesCreated_ == 0 && mptIssuancesDeleted_ == 0) + return true; } if (mptIssuancesCreated_ != 0) diff --git a/src/xrpld/app/tx/detail/VaultClawback.cpp b/src/xrpld/app/tx/detail/VaultClawback.cpp index f9bd0c7629..87740da179 100644 --- a/src/xrpld/app/tx/detail/VaultClawback.cpp +++ b/src/xrpld/app/tx/detail/VaultClawback.cpp @@ -21,8 +21,10 @@ #include #include +#include #include #include +#include #include #include #include @@ -151,7 +153,7 @@ VaultClawback::doApply() if (!vault) return tefINTERNAL; // LCOV_EXCL_LINE - auto const mptIssuanceID = (*vault)[sfShareMPTID]; + auto const mptIssuanceID = *((*vault)[sfShareMPTID]); auto const sleIssuance = view().read(keylet::mptIssuance(mptIssuanceID)); if (!sleIssuance) { @@ -161,68 +163,169 @@ VaultClawback::doApply() // LCOV_EXCL_STOP } - Asset const asset = vault->at(sfAsset); + Asset const vaultAsset = vault->at(sfAsset); STAmount const amount = [&]() -> STAmount { auto const maybeAmount = tx[~sfAmount]; if (maybeAmount) return *maybeAmount; - return {sfAmount, asset, 0}; + return {sfAmount, vaultAsset, 0}; }(); XRPL_ASSERT( - amount.asset() == asset, + amount.asset() == vaultAsset, "ripple::VaultClawback::doApply : matching asset"); + auto assetsAvailable = vault->at(sfAssetsAvailable); + auto assetsTotal = vault->at(sfAssetsTotal); + [[maybe_unused]] auto const lossUnrealized = vault->at(sfLossUnrealized); + XRPL_ASSERT( + lossUnrealized <= (assetsTotal - assetsAvailable), + "ripple::VaultClawback::doApply : loss and assets do balance"); + AccountID holder = tx[sfHolder]; - STAmount assets, shares; - if (amount == beast::zero) + MPTIssue const share{mptIssuanceID}; + STAmount sharesDestroyed = {share}; + STAmount assetsRecovered; + try { - Asset share = *(*vault)[sfShareMPTID]; - shares = accountHolds( - view(), - holder, - share, - FreezeHandling::fhIGNORE_FREEZE, - AuthHandling::ahIGNORE_AUTH, - j_); - assets = sharesToAssetsWithdraw(vault, sleIssuance, shares); + if (amount == beast::zero) + { + sharesDestroyed = accountHolds( + view(), + holder, + share, + FreezeHandling::fhIGNORE_FREEZE, + AuthHandling::ahIGNORE_AUTH, + j_); + + auto const maybeAssets = + sharesToAssetsWithdraw(vault, sleIssuance, sharesDestroyed); + if (!maybeAssets) + return tecINTERNAL; // LCOV_EXCL_LINE + assetsRecovered = *maybeAssets; + } + else + { + assetsRecovered = amount; + { + auto const maybeShares = + assetsToSharesWithdraw(vault, sleIssuance, assetsRecovered); + if (!maybeShares) + return tecINTERNAL; // LCOV_EXCL_LINE + sharesDestroyed = *maybeShares; + } + + auto const maybeAssets = + sharesToAssetsWithdraw(vault, sleIssuance, sharesDestroyed); + if (!maybeAssets) + return tecINTERNAL; // LCOV_EXCL_LINE + assetsRecovered = *maybeAssets; + } + + // Clamp to maximum. + if (assetsRecovered > *assetsAvailable) + { + assetsRecovered = *assetsAvailable; + // Note, it is important to truncate the number of shares, otherwise + // the corresponding assets might breach the AssetsAvailable + { + auto const maybeShares = assetsToSharesWithdraw( + vault, sleIssuance, assetsRecovered, TruncateShares::yes); + if (!maybeShares) + return tecINTERNAL; // LCOV_EXCL_LINE + sharesDestroyed = *maybeShares; + } + + auto const maybeAssets = + sharesToAssetsWithdraw(vault, sleIssuance, sharesDestroyed); + if (!maybeAssets) + return tecINTERNAL; // LCOV_EXCL_LINE + assetsRecovered = *maybeAssets; + if (assetsRecovered > *assetsAvailable) + { + // LCOV_EXCL_START + JLOG(j_.error()) + << "VaultClawback: invalid rounding of shares."; + return tecINTERNAL; + // LCOV_EXCL_STOP + } + } } - else + catch (std::overflow_error const&) { - assets = amount; - shares = assetsToSharesWithdraw(vault, sleIssuance, assets); + // It's easy to hit this exception from Number with large enough Scale + // so we avoid spamming the log and only use debug here. + JLOG(j_.debug()) // + << "VaultClawback: overflow error with" + << " scale=" << (int)vault->at(sfScale).value() // + << ", assetsTotal=" << vault->at(sfAssetsTotal).value() + << ", sharesTotal=" << sleIssuance->at(sfOutstandingAmount) + << ", amount=" << amount.value(); + return tecPATH_DRY; } - // Clamp to maximum. - Number maxAssets = *vault->at(sfAssetsAvailable); - if (assets > maxAssets) - { - assets = maxAssets; - shares = assetsToSharesWithdraw(vault, sleIssuance, assets); - } + if (sharesDestroyed == beast::zero) + return tecPRECISION_LOSS; - if (shares == beast::zero) - return tecINSUFFICIENT_FUNDS; - - vault->at(sfAssetsTotal) -= assets; - vault->at(sfAssetsAvailable) -= assets; + assetsTotal -= assetsRecovered; + assetsAvailable -= assetsRecovered; view().update(vault); auto const& vaultAccount = vault->at(sfAccount); // Transfer shares from holder to vault. - if (auto ter = accountSend( - view(), holder, vaultAccount, shares, j_, WaiveTransferFee::Yes)) + if (auto const ter = accountSend( + view(), + holder, + vaultAccount, + sharesDestroyed, + j_, + WaiveTransferFee::Yes); + !isTesSuccess(ter)) return ter; + // Try to remove MPToken for shares, if the holder balance is zero. Vault + // pseudo-account will never set lsfMPTAuthorized, so we ignore flags. + // Keep MPToken if holder is the vault owner. + if (holder != vault->at(sfOwner)) + { + if (auto const ter = + removeEmptyHolding(view(), holder, sharesDestroyed.asset(), j_); + isTesSuccess(ter)) + { + JLOG(j_.debug()) // + << "VaultClawback: removed empty MPToken for vault shares" + << " MPTID=" << to_string(mptIssuanceID) // + << " account=" << toBase58(holder); + } + else if (ter != tecHAS_OBLIGATIONS) + { + // LCOV_EXCL_START + JLOG(j_.error()) // + << "VaultClawback: failed to remove MPToken for vault shares" + << " MPTID=" << to_string(mptIssuanceID) // + << " account=" << toBase58(holder) // + << " with result: " << transToken(ter); + return ter; + // LCOV_EXCL_STOP + } + // else quietly ignore, holder balance is not zero + } + // Transfer assets from vault to issuer. - if (auto ter = accountSend( - view(), vaultAccount, account_, assets, j_, WaiveTransferFee::Yes)) + if (auto const ter = accountSend( + view(), + vaultAccount, + account_, + assetsRecovered, + j_, + WaiveTransferFee::Yes); + !isTesSuccess(ter)) return ter; // Sanity check if (accountHolds( view(), vaultAccount, - assets.asset(), + assetsRecovered.asset(), FreezeHandling::fhIGNORE_FREEZE, AuthHandling::ahIGNORE_AUTH, j_) < beast::zero) diff --git a/src/xrpld/app/tx/detail/VaultCreate.cpp b/src/xrpld/app/tx/detail/VaultCreate.cpp index cb6a994e7e..0b5cdd4fc0 100644 --- a/src/xrpld/app/tx/detail/VaultCreate.cpp +++ b/src/xrpld/app/tx/detail/VaultCreate.cpp @@ -25,8 +25,10 @@ #include #include #include +#include #include #include +#include #include #include #include @@ -84,6 +86,16 @@ VaultCreate::preflight(PreflightContext const& ctx) return temMALFORMED; } + if (auto const scale = ctx.tx[~sfScale]) + { + auto const vaultAsset = ctx.tx[sfAsset]; + if (vaultAsset.holds() || vaultAsset.native()) + return temMALFORMED; + + if (scale > vaultMaximumIOUScale) + return temMALFORMED; + } + return preflight2(ctx); } @@ -97,8 +109,8 @@ VaultCreate::calculateBaseFee(ReadView const& view, STTx const& tx) TER VaultCreate::preclaim(PreclaimContext const& ctx) { - auto vaultAsset = ctx.tx[sfAsset]; - auto account = ctx.tx[sfAccount]; + auto const vaultAsset = ctx.tx[sfAsset]; + auto const account = ctx.tx[sfAccount]; if (vaultAsset.native()) ; // No special checks for XRP @@ -148,7 +160,7 @@ VaultCreate::preclaim(PreclaimContext const& ctx) return tecOBJECT_NOT_FOUND; } - auto sequence = ctx.tx.getSeqValue(); + auto const sequence = ctx.tx.getSeqValue(); if (auto const accountId = pseudoAccountAddress( ctx.view, keylet::vault(account, sequence).key); accountId == beast::zero) @@ -165,8 +177,8 @@ VaultCreate::doApply() // we can consider downgrading them to `tef` or `tem`. auto const& tx = ctx_.tx; - auto sequence = tx.getSeqValue(); - auto owner = view().peek(keylet::account(account_)); + auto const sequence = tx.getSeqValue(); + auto const owner = view().peek(keylet::account(account_)); if (owner == nullptr) return tefINTERNAL; // LCOV_EXCL_LINE @@ -190,6 +202,10 @@ VaultCreate::doApply() !isTesSuccess(ter)) return ter; + std::uint8_t const scale = (asset.holds() || asset.native()) + ? 0 + : ctx_.tx[~sfScale].value_or(vaultDefaultIOUScale); + auto txFlags = tx.getFlags(); std::uint32_t mptFlags = 0; if ((txFlags & tfVaultShareNonTransferable) == 0) @@ -209,12 +225,13 @@ VaultCreate::doApply() .account = pseudoId->value(), .sequence = 1, .flags = mptFlags, + .assetScale = scale, .metadata = tx[~sfMPTokenMetadata], .domainId = tx[~sfDomainID], }); if (!maybeShare) return maybeShare.error(); // LCOV_EXCL_LINE - auto& share = *maybeShare; + auto const& mptIssuanceID = *maybeShare; vault->setFieldIssue(sfAsset, STIssue{sfAsset, asset}); vault->at(sfFlags) = txFlags & tfVaultPrivate; @@ -227,7 +244,7 @@ VaultCreate::doApply() // Leave default values for AssetTotal and AssetAvailable, both zero. if (auto value = tx[~sfAssetsMaximum]) vault->at(sfAssetsMaximum) = *value; - vault->at(sfShareMPTID) = share; + vault->at(sfShareMPTID) = mptIssuanceID; if (auto value = tx[~sfData]) vault->at(sfData) = *value; // Required field, default to vaultStrategyFirstComeFirstServe @@ -235,9 +252,31 @@ VaultCreate::doApply() vault->at(sfWithdrawalPolicy) = *value; else vault->at(sfWithdrawalPolicy) = vaultStrategyFirstComeFirstServe; - // No `LossUnrealized`. + if (scale) + vault->at(sfScale) = scale; view().insert(vault); + // Explicitly create MPToken for the vault owner + if (auto const err = authorizeMPToken( + view(), mPriorBalance, mptIssuanceID, account_, ctx_.journal); + !isTesSuccess(err)) + return err; + + // If the vault is private, set the authorized flag for the vault owner + if (txFlags & tfVaultPrivate) + { + if (auto const err = authorizeMPToken( + view(), + mPriorBalance, + mptIssuanceID, + pseudoId, + ctx_.journal, + {}, + account_); + !isTesSuccess(err)) + return err; + } + return tesSUCCESS; } diff --git a/src/xrpld/app/tx/detail/VaultDelete.cpp b/src/xrpld/app/tx/detail/VaultDelete.cpp index 7861e9e9b6..d4b74ae1d5 100644 --- a/src/xrpld/app/tx/detail/VaultDelete.cpp +++ b/src/xrpld/app/tx/detail/VaultDelete.cpp @@ -21,6 +21,7 @@ #include #include +#include #include #include #include @@ -128,7 +129,8 @@ VaultDelete::doApply() // Destroy the share issuance. Do not use MPTokenIssuanceDestroy for this, // no special logic needed. First run few checks, duplicated from preclaim. - auto const mpt = view().peek(keylet::mptIssuance(vault->at(sfShareMPTID))); + auto const shareMPTID = *vault->at(sfShareMPTID); + auto const mpt = view().peek(keylet::mptIssuance(shareMPTID)); if (!mpt) { // LCOV_EXCL_START @@ -137,6 +139,24 @@ VaultDelete::doApply() // LCOV_EXCL_STOP } + // Try to remove MPToken for vault shares for the vault owner if it exists. + if (auto const mptoken = view().peek(keylet::mptoken(shareMPTID, account_))) + { + if (auto const ter = + removeEmptyHolding(view(), account_, MPTIssue(shareMPTID), j_); + !isTesSuccess(ter)) + { + // LCOV_EXCL_START + JLOG(j_.error()) // + << "VaultDelete: failed to remove vault owner's MPToken" + << " MPTID=" << to_string(shareMPTID) // + << " account=" << toBase58(account_) // + << " with result: " << transToken(ter); + return ter; + // LCOV_EXCL_STOP + } + } + if (!view().dirRemove( keylet::ownerDir(pseudoID), (*mpt)[sfOwnerNode], mpt->key(), false)) { diff --git a/src/xrpld/app/tx/detail/VaultDeposit.cpp b/src/xrpld/app/tx/detail/VaultDeposit.cpp index db1fc3bbfe..5cdcb43e20 100644 --- a/src/xrpld/app/tx/detail/VaultDeposit.cpp +++ b/src/xrpld/app/tx/detail/VaultDeposit.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -138,7 +139,7 @@ VaultDeposit::preclaim(PreclaimContext const& ctx) if (isFrozen(ctx.view, account, vaultShare)) return tecLOCKED; - if (vault->isFlag(tfVaultPrivate) && account != vault->at(sfOwner)) + if (vault->isFlag(lsfVaultPrivate) && account != vault->at(sfOwner)) { auto const maybeDomainID = sleIssuance->at(~sfDomainID); // Since this is a private vault and the account is not its owner, we @@ -183,7 +184,7 @@ VaultDeposit::doApply() if (!vault) return tefINTERNAL; // LCOV_EXCL_LINE - auto const assets = ctx_.tx[sfAmount]; + auto const amount = ctx_.tx[sfAmount]; // Make sure the depositor can hold shares. auto const mptIssuanceID = (*vault)[sfShareMPTID]; auto const sleIssuance = view().read(keylet::mptIssuance(mptIssuanceID)); @@ -197,14 +198,14 @@ VaultDeposit::doApply() auto const& vaultAccount = vault->at(sfAccount); // Note, vault owner is always authorized - if ((vault->getFlags() & tfVaultPrivate) && account_ != vault->at(sfOwner)) + if (vault->isFlag(lsfVaultPrivate) && account_ != vault->at(sfOwner)) { if (auto const err = enforceMPTokenAuthorization( ctx_.view(), mptIssuanceID, account_, mPriorBalance, j_); !isTesSuccess(err)) return err; } - else + else // !vault->isFlag(lsfVaultPrivate) || account_ == vault->at(sfOwner) { // No authorization needed, but must ensure there is MPToken auto sleMpt = view().read(keylet::mptoken(mptIssuanceID, account_)); @@ -221,8 +222,12 @@ VaultDeposit::doApply() } // If the vault is private, set the authorized flag for the vault owner - if (vault->isFlag(tfVaultPrivate)) + if (vault->isFlag(lsfVaultPrivate)) { + // This follows from the reverse of the outer enclosing if condition + XRPL_ASSERT( + account_ == vault->at(sfOwner), + "ripple::VaultDeposit::doApply : account is owner"); if (auto const err = authorizeMPToken( view(), mPriorBalance, // priorBalance @@ -237,14 +242,52 @@ VaultDeposit::doApply() } } - // Compute exchange before transferring any amounts. - auto const shares = assetsToSharesDeposit(vault, sleIssuance, assets); + STAmount sharesCreated = {vault->at(sfShareMPTID)}, assetsDeposited; + try + { + // Compute exchange before transferring any amounts. + { + auto const maybeShares = + assetsToSharesDeposit(vault, sleIssuance, amount); + if (!maybeShares) + return tecINTERNAL; // LCOV_EXCL_LINE + sharesCreated = *maybeShares; + } + if (sharesCreated == beast::zero) + return tecPRECISION_LOSS; + + auto const maybeAssets = + sharesToAssetsDeposit(vault, sleIssuance, sharesCreated); + if (!maybeAssets) + return tecINTERNAL; // LCOV_EXCL_LINE + else if (*maybeAssets > amount) + { + // LCOV_EXCL_START + JLOG(j_.error()) << "VaultDeposit: would take more than offered."; + return tecINTERNAL; + // LCOV_EXCL_STOP + } + assetsDeposited = *maybeAssets; + } + catch (std::overflow_error const&) + { + // It's easy to hit this exception from Number with large enough Scale + // so we avoid spamming the log and only use debug here. + JLOG(j_.debug()) // + << "VaultDeposit: overflow error with" + << " scale=" << (int)vault->at(sfScale).value() // + << ", assetsTotal=" << vault->at(sfAssetsTotal).value() + << ", sharesTotal=" << sleIssuance->at(sfOutstandingAmount) + << ", amount=" << amount; + return tecPATH_DRY; + } + XRPL_ASSERT( - shares.asset() != assets.asset(), + sharesCreated.asset() != assetsDeposited.asset(), "ripple::VaultDeposit::doApply : assets are not shares"); - vault->at(sfAssetsTotal) += assets; - vault->at(sfAssetsAvailable) += assets; + vault->at(sfAssetsTotal) += assetsDeposited; + vault->at(sfAssetsAvailable) += assetsDeposited; view().update(vault); // A deposit must not push the vault over its limit. @@ -253,15 +296,21 @@ VaultDeposit::doApply() return tecLIMIT_EXCEEDED; // Transfer assets from depositor to vault. - if (auto ter = accountSend( - view(), account_, vaultAccount, assets, j_, WaiveTransferFee::Yes)) + if (auto const ter = accountSend( + view(), + account_, + vaultAccount, + assetsDeposited, + j_, + WaiveTransferFee::Yes); + !isTesSuccess(ter)) return ter; // Sanity check if (accountHolds( view(), account_, - assets.asset(), + assetsDeposited.asset(), FreezeHandling::fhIGNORE_FREEZE, AuthHandling::ahIGNORE_AUTH, j_) < beast::zero) @@ -273,8 +322,14 @@ VaultDeposit::doApply() } // Transfer shares from vault to depositor. - if (auto ter = accountSend( - view(), vaultAccount, account_, shares, j_, WaiveTransferFee::Yes)) + if (auto const ter = accountSend( + view(), + vaultAccount, + account_, + sharesCreated, + j_, + WaiveTransferFee::Yes); + !isTesSuccess(ter)) return ter; return tesSUCCESS; diff --git a/src/xrpld/app/tx/detail/VaultSet.cpp b/src/xrpld/app/tx/detail/VaultSet.cpp index a13ce6d10e..4750f89be2 100644 --- a/src/xrpld/app/tx/detail/VaultSet.cpp +++ b/src/xrpld/app/tx/detail/VaultSet.cpp @@ -108,7 +108,7 @@ VaultSet::preclaim(PreclaimContext const& ctx) if (auto const domain = ctx.tx[~sfDomainID]) { // We can only set domain if private flag was originally set - if ((vault->getFlags() & tfVaultPrivate) == 0) + if (!vault->isFlag(lsfVaultPrivate)) { JLOG(ctx.j.debug()) << "VaultSet: vault is not private"; return tecNO_PERMISSION; @@ -175,9 +175,9 @@ VaultSet::doApply() { if (*domainId != beast::zero) { - // In VaultSet::preclaim we enforce that tfVaultPrivate must have + // In VaultSet::preclaim we enforce that lsfVaultPrivate must have // been set in the vault. We currently do not support making such a - // vault public (i.e. removal of tfVaultPrivate flag). The + // vault public (i.e. removal of lsfVaultPrivate flag). The // sfDomainID flag must be set in the MPTokenIssuance object and can // be freely updated. sleIssuance->setFieldH256(sfDomainID, *domainId); diff --git a/src/xrpld/app/tx/detail/VaultWithdraw.cpp b/src/xrpld/app/tx/detail/VaultWithdraw.cpp index 09a9fd14e1..0ceaabbfde 100644 --- a/src/xrpld/app/tx/detail/VaultWithdraw.cpp +++ b/src/xrpld/app/tx/detail/VaultWithdraw.cpp @@ -177,7 +177,7 @@ VaultWithdraw::doApply() if (!vault) return tefINTERNAL; // LCOV_EXCL_LINE - auto const mptIssuanceID = (*vault)[sfShareMPTID]; + auto const mptIssuanceID = *((*vault)[sfShareMPTID]); auto const sleIssuance = view().read(keylet::mptIssuance(mptIssuanceID)); if (!sleIssuance) { @@ -192,24 +192,57 @@ VaultWithdraw::doApply() // to deposit into it, and this means you are also indefinitely authorized // to withdraw from it. - auto amount = ctx_.tx[sfAmount]; - auto const asset = vault->at(sfAsset); - auto const share = MPTIssue(mptIssuanceID); - STAmount shares, assets; - if (amount.asset() == asset) + auto const amount = ctx_.tx[sfAmount]; + Asset const vaultAsset = vault->at(sfAsset); + MPTIssue const share{mptIssuanceID}; + STAmount sharesRedeemed = {share}; + STAmount assetsWithdrawn; + try { - // Fixed assets, variable shares. - assets = amount; - shares = assetsToSharesWithdraw(vault, sleIssuance, assets); + if (amount.asset() == vaultAsset) + { + // Fixed assets, variable shares. + { + auto const maybeShares = + assetsToSharesWithdraw(vault, sleIssuance, amount); + if (!maybeShares) + return tecINTERNAL; // LCOV_EXCL_LINE + sharesRedeemed = *maybeShares; + } + + if (sharesRedeemed == beast::zero) + return tecPRECISION_LOSS; + auto const maybeAssets = + sharesToAssetsWithdraw(vault, sleIssuance, sharesRedeemed); + if (!maybeAssets) + return tecINTERNAL; // LCOV_EXCL_LINE + assetsWithdrawn = *maybeAssets; + } + else if (amount.asset() == share) + { + // Fixed shares, variable assets. + sharesRedeemed = amount; + auto const maybeAssets = + sharesToAssetsWithdraw(vault, sleIssuance, sharesRedeemed); + if (!maybeAssets) + return tecINTERNAL; // LCOV_EXCL_LINE + assetsWithdrawn = *maybeAssets; + } + else + return tefINTERNAL; // LCOV_EXCL_LINE } - else if (amount.asset() == share) + catch (std::overflow_error const&) { - // Fixed shares, variable assets. - shares = amount; - assets = sharesToAssetsWithdraw(vault, sleIssuance, shares); + // It's easy to hit this exception from Number with large enough Scale + // so we avoid spamming the log and only use debug here. + JLOG(j_.debug()) // + << "VaultWithdraw: overflow error with" + << " scale=" << (int)vault->at(sfScale).value() // + << ", assetsTotal=" << vault->at(sfAssetsTotal).value() + << ", sharesTotal=" << sleIssuance->at(sfOutstandingAmount) + << ", amount=" << amount.value(); + return tecPATH_DRY; } - else - return tefINTERNAL; // LCOV_EXCL_LINE if (accountHolds( view(), @@ -217,31 +250,72 @@ VaultWithdraw::doApply() share, FreezeHandling::fhZERO_IF_FROZEN, AuthHandling::ahIGNORE_AUTH, - j_) < shares) + j_) < sharesRedeemed) { JLOG(j_.debug()) << "VaultWithdraw: account doesn't hold enough shares"; return tecINSUFFICIENT_FUNDS; } - // The vault must have enough assets on hand. The vault may hold assets that - // it has already pledged. That is why we look at AssetAvailable instead of - // the pseudo-account balance. - if (*vault->at(sfAssetsAvailable) < assets) + auto assetsAvailable = vault->at(sfAssetsAvailable); + auto assetsTotal = vault->at(sfAssetsTotal); + [[maybe_unused]] auto const lossUnrealized = vault->at(sfLossUnrealized); + XRPL_ASSERT( + lossUnrealized <= (assetsTotal - assetsAvailable), + "ripple::VaultWithdraw::doApply : loss and assets do balance"); + + // The vault must have enough assets on hand. The vault may hold assets + // that it has already pledged. That is why we look at AssetAvailable + // instead of the pseudo-account balance. + if (*assetsAvailable < assetsWithdrawn) { JLOG(j_.debug()) << "VaultWithdraw: vault doesn't hold enough assets"; return tecINSUFFICIENT_FUNDS; } - vault->at(sfAssetsTotal) -= assets; - vault->at(sfAssetsAvailable) -= assets; + assetsTotal -= assetsWithdrawn; + assetsAvailable -= assetsWithdrawn; view().update(vault); auto const& vaultAccount = vault->at(sfAccount); // Transfer shares from depositor to vault. - if (auto ter = accountSend( - view(), account_, vaultAccount, shares, j_, WaiveTransferFee::Yes)) + if (auto const ter = accountSend( + view(), + account_, + vaultAccount, + sharesRedeemed, + j_, + WaiveTransferFee::Yes); + !isTesSuccess(ter)) return ter; + // Try to remove MPToken for shares, if the account balance is zero. Vault + // pseudo-account will never set lsfMPTAuthorized, so we ignore flags. + // Keep MPToken if holder is the vault owner. + if (account_ != vault->at(sfOwner)) + { + if (auto const ter = removeEmptyHolding( + view(), account_, sharesRedeemed.asset(), j_); + isTesSuccess(ter)) + { + JLOG(j_.debug()) // + << "VaultWithdraw: removed empty MPToken for vault shares" + << " MPTID=" << to_string(mptIssuanceID) // + << " account=" << toBase58(account_); + } + else if (ter != tecHAS_OBLIGATIONS) + { + // LCOV_EXCL_START + JLOG(j_.error()) // + << "VaultWithdraw: failed to remove MPToken for vault shares" + << " MPTID=" << to_string(mptIssuanceID) // + << " account=" << toBase58(account_) // + << " with result: " << transToken(ter); + return ter; + // LCOV_EXCL_STOP + } + // else quietly ignore, account balance is not zero + } + auto const dstAcct = [&]() -> AccountID { if (ctx_.tx.isFieldPresent(sfDestination)) return ctx_.tx.getAccountID(sfDestination); @@ -249,15 +323,21 @@ VaultWithdraw::doApply() }(); // Transfer assets from vault to depositor or destination account. - if (auto ter = accountSend( - view(), vaultAccount, dstAcct, assets, j_, WaiveTransferFee::Yes)) + if (auto const ter = accountSend( + view(), + vaultAccount, + dstAcct, + assetsWithdrawn, + j_, + WaiveTransferFee::Yes); + !isTesSuccess(ter)) return ter; // Sanity check if (accountHolds( view(), vaultAccount, - assets.asset(), + assetsWithdrawn.asset(), FreezeHandling::fhIGNORE_FREEZE, AuthHandling::ahIGNORE_AUTH, j_) < beast::zero) diff --git a/src/xrpld/ledger/View.h b/src/xrpld/ledger/View.h index 07f6945dd4..cfd3599f78 100644 --- a/src/xrpld/ledger/View.h +++ b/src/xrpld/ledger/View.h @@ -912,28 +912,41 @@ deleteAMMTrustLine( std::optional const& ammAccountID, beast::Journal j); -// From the perspective of a vault, -// return the number of shares to give the depositor -// when they deposit a fixed amount of assets. -[[nodiscard]] STAmount +// From the perspective of a vault, return the number of shares to give the +// depositor when they deposit a fixed amount of assets. Since shares are MPT +// this number is integral and always truncated in this calculation. +[[nodiscard]] std::optional assetsToSharesDeposit( std::shared_ptr const& vault, std::shared_ptr const& issuance, STAmount const& assets); -// From the perspective of a vault, -// return the number of shares to demand from the depositor -// when they ask to withdraw a fixed amount of assets. -[[nodiscard]] STAmount +// From the perspective of a vault, return the number of assets to take from +// depositor when they receive a fixed amount of shares. Note, since shares are +// MPT, they are always an integral number. +[[nodiscard]] std::optional +sharesToAssetsDeposit( + std::shared_ptr const& vault, + std::shared_ptr const& issuance, + STAmount const& shares); + +enum class TruncateShares : bool { no = false, yes = true }; + +// From the perspective of a vault, return the number of shares to demand from +// the depositor when they ask to withdraw a fixed amount of assets. Since +// shares are MPT this number is integral, and it will be rounded to nearest +// unless explicitly requested to be truncated instead. +[[nodiscard]] std::optional assetsToSharesWithdraw( std::shared_ptr const& vault, std::shared_ptr const& issuance, - STAmount const& assets); + STAmount const& assets, + TruncateShares truncate = TruncateShares::no); -// From the perspective of a vault, -// return the number of assets to give the depositor -// when they redeem a fixed amount of shares. -[[nodiscard]] STAmount +// From the perspective of a vault, return the number of assets to give the +// depositor when they redeem a fixed amount of shares. Note, since shares are +// MPT, they are always an integral number. +[[nodiscard]] std::optional sharesToAssetsWithdraw( std::shared_ptr const& vault, std::shared_ptr const& issuance, diff --git a/src/xrpld/ledger/detail/View.cpp b/src/xrpld/ledger/detail/View.cpp index 4f8a29d15c..708d5b29f7 100644 --- a/src/xrpld/ledger/detail/View.cpp +++ b/src/xrpld/ledger/detail/View.cpp @@ -2793,58 +2793,113 @@ rippleCredit( saAmount.asset().value()); } -[[nodiscard]] STAmount +[[nodiscard]] std::optional assetsToSharesDeposit( std::shared_ptr const& vault, std::shared_ptr const& issuance, STAmount const& assets) { + XRPL_ASSERT( + !assets.negative(), + "ripple::assetsToSharesDeposit : non-negative assets"); XRPL_ASSERT( assets.asset() == vault->at(sfAsset), "ripple::assetsToSharesDeposit : assets and vault match"); - Number assetTotal = vault->at(sfAssetsTotal); - STAmount shares{vault->at(sfShareMPTID), static_cast(assets)}; + if (assets.negative() || assets.asset() != vault->at(sfAsset)) + return std::nullopt; // LCOV_EXCL_LINE + + Number const assetTotal = vault->at(sfAssetsTotal); + STAmount shares{vault->at(sfShareMPTID)}; if (assetTotal == 0) - return shares; - Number shareTotal = issuance->at(sfOutstandingAmount); - shares = shareTotal * (assets / assetTotal); + return STAmount{ + shares.asset(), + Number(assets.mantissa(), assets.exponent() + vault->at(sfScale)) + .truncate()}; + + Number const shareTotal = issuance->at(sfOutstandingAmount); + shares = (shareTotal * (assets / assetTotal)).truncate(); return shares; } -[[nodiscard]] STAmount +[[nodiscard]] std::optional +sharesToAssetsDeposit( + std::shared_ptr const& vault, + std::shared_ptr const& issuance, + STAmount const& shares) +{ + XRPL_ASSERT( + !shares.negative(), + "ripple::sharesToAssetsDeposit : non-negative shares"); + XRPL_ASSERT( + shares.asset() == vault->at(sfShareMPTID), + "ripple::sharesToAssetsDeposit : shares and vault match"); + if (shares.negative() || shares.asset() != vault->at(sfShareMPTID)) + return std::nullopt; // LCOV_EXCL_LINE + + Number const assetTotal = vault->at(sfAssetsTotal); + STAmount assets{vault->at(sfAsset)}; + if (assetTotal == 0) + return STAmount{ + assets.asset(), + shares.mantissa(), + shares.exponent() - vault->at(sfScale), + false}; + + Number const shareTotal = issuance->at(sfOutstandingAmount); + assets = assetTotal * (shares / shareTotal); + return assets; +} + +[[nodiscard]] std::optional assetsToSharesWithdraw( std::shared_ptr const& vault, std::shared_ptr const& issuance, - STAmount const& assets) + STAmount const& assets, + TruncateShares truncate) { + XRPL_ASSERT( + !assets.negative(), + "ripple::assetsToSharesDeposit : non-negative assets"); XRPL_ASSERT( assets.asset() == vault->at(sfAsset), "ripple::assetsToSharesWithdraw : assets and vault match"); + if (assets.negative() || assets.asset() != vault->at(sfAsset)) + return std::nullopt; // LCOV_EXCL_LINE + Number assetTotal = vault->at(sfAssetsTotal); assetTotal -= vault->at(sfLossUnrealized); STAmount shares{vault->at(sfShareMPTID)}; if (assetTotal == 0) return shares; - Number shareTotal = issuance->at(sfOutstandingAmount); - shares = shareTotal * (assets / assetTotal); + Number const shareTotal = issuance->at(sfOutstandingAmount); + Number result = shareTotal * (assets / assetTotal); + if (truncate == TruncateShares::yes) + result = result.truncate(); + shares = result; return shares; } -[[nodiscard]] STAmount +[[nodiscard]] std::optional sharesToAssetsWithdraw( std::shared_ptr const& vault, std::shared_ptr const& issuance, STAmount const& shares) { + XRPL_ASSERT( + !shares.negative(), + "ripple::sharesToAssetsDeposit : non-negative shares"); XRPL_ASSERT( shares.asset() == vault->at(sfShareMPTID), "ripple::sharesToAssetsWithdraw : shares and vault match"); + if (shares.negative() || shares.asset() != vault->at(sfShareMPTID)) + return std::nullopt; // LCOV_EXCL_LINE + Number assetTotal = vault->at(sfAssetsTotal); assetTotal -= vault->at(sfLossUnrealized); STAmount assets{vault->at(sfAsset)}; if (assetTotal == 0) return assets; - Number shareTotal = issuance->at(sfOutstandingAmount); + Number const shareTotal = issuance->at(sfOutstandingAmount); assets = assetTotal * (shares / shareTotal); return assets; } From 811c98082144fb19ca21d321f2318e51c54af4c8 Mon Sep 17 00:00:00 2001 From: Ayaz Salikhov Date: Thu, 4 Sep 2025 16:27:30 +0100 Subject: [PATCH 156/244] ci: Use cleanup-workspace action (#5763) * ci: Use cleanup-workspace action * Use latest version --- .github/workflows/build-test.yml | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml index 90e1d9853c..40399539b8 100644 --- a/.github/workflows/build-test.yml +++ b/.github/workflows/build-test.yml @@ -102,16 +102,9 @@ jobs: echo 'CMake target: ${{ matrix.cmake_target }}' echo 'Config name: ${{ matrix.config_name }}' - - name: Clean workspace (MacOS) - if: ${{ inputs.os == 'macos' }} - run: | - WORKSPACE=${{ github.workspace }} - echo "Cleaning workspace '${WORKSPACE}'." - if [ -z "${WORKSPACE}" ] || [ "${WORKSPACE}" = "/" ]; then - echo "Invalid working directory '${WORKSPACE}'." - exit 1 - fi - find "${WORKSPACE}" -depth 1 | xargs rm -rfv + - name: Cleanup workspace + if: ${{ runner.os == 'macOS' }} + uses: XRPLF/actions/.github/actions/cleanup-workspace@3f044c7478548e3c32ff68980eeb36ece02b364e - name: Checkout repository uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 From 3865dde0b89a778b3bd7d0ebf6c368bbd83ce6c6 Mon Sep 17 00:00:00 2001 From: Ayaz Salikhov Date: Thu, 4 Sep 2025 20:26:57 +0100 Subject: [PATCH 157/244] fix: Add missing info to notify-clio workflow (#5761) * Add missing info to notify-clio workflow, as conan_ref --- .github/workflows/notify-clio.yml | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/.github/workflows/notify-clio.yml b/.github/workflows/notify-clio.yml index f7e10de7af..b0e5f35787 100644 --- a/.github/workflows/notify-clio.yml +++ b/.github/workflows/notify-clio.yml @@ -50,6 +50,10 @@ jobs: echo "channel=pr_${{ github.event.pull_request.number }}" >> "${GITHUB_OUTPUT}" echo 'Extracting version.' echo "version=$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" | awk -F '"' '{print $2}')" >> "${GITHUB_OUTPUT}" + - name: Calculate conan reference + id: conan_ref + run: | + echo "conan_ref=${{ steps.generate.outputs.version }}@${{ steps.generate.outputs.user }}/@${{ steps.generate.outputs.channel }}" >> "${GITHUB_OUTPUT}" - name: Add Conan remote run: | echo "Adding Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}." @@ -61,10 +65,9 @@ jobs: - name: Upload package run: | conan export --user=${{ steps.generate.outputs.user }} --channel=${{ steps.generate.outputs.channel }} . - conan upload --confirm --check --remote=${{ inputs.conan_remote_name }} xrpl/${{ steps.generate.outputs.version }}@${{ steps.generate.outputs.user }}/${{ steps.generate.outputs.channel }} + conan upload --confirm --check --remote=${{ inputs.conan_remote_name }} xrpl/${{ steps.conan_ref.outputs.conan_ref }} outputs: - channel: ${{ steps.generate.outputs.channel }} - version: ${{ steps.generate.outputs.version }} + conan_ref: ${{ steps.conan_ref.outputs.conan_ref }} notify: needs: upload @@ -76,5 +79,5 @@ jobs: run: | gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \ /repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \ - -F "client_payload[version]=${{ needs.upload.outputs.version }}@${{ needs.upload.outputs.user }}/${{ needs.upload.outputs.channel }}" \ - -F "client_payload[pr]=${{ github.event.pull_request.number }}" + -F "client_payload[conan_ref]=${{ needs.upload.outputs.conan_ref }}" \ + -F "client_payload[pr_url]=${{ github.event.pull_request.html_url }}" From b0f4174e4790a96de39bd05c19807627b39d2cd8 Mon Sep 17 00:00:00 2001 From: Ayaz Salikhov Date: Thu, 4 Sep 2025 21:30:54 +0100 Subject: [PATCH 158/244] chore: Use tooling provided by pre-commit (#5753) --- .github/workflows/check-format.yml | 35 +---------------- .pre-commit-config.yaml | 60 +++++++++--------------------- 2 files changed, 19 insertions(+), 76 deletions(-) diff --git a/.github/workflows/check-format.yml b/.github/workflows/check-format.yml index 359e3e634b..c63589017d 100644 --- a/.github/workflows/check-format.yml +++ b/.github/workflows/check-format.yml @@ -17,41 +17,10 @@ jobs: runs-on: ubuntu-latest container: ghcr.io/xrplf/ci/tools-rippled-pre-commit steps: - # The $GITHUB_WORKSPACE and ${{ github.workspace }} might not point to the - # same directory for jobs running in containers. The actions/checkout step - # is *supposed* to checkout into $GITHUB_WORKSPACE and then add it to - # safe.directory (see instructions at https://github.com/actions/checkout) - # but that is apparently not happening for some container images. We - # therefore preemptively add both directories to safe.directory. See also - # https://github.com/actions/runner/issues/2058 for more details. - - name: Configure git safe.directory - run: | - git config --global --add safe.directory $GITHUB_WORKSPACE - git config --global --add safe.directory ${{ github.workspace }} - name: Checkout repository uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - - name: Check configuration - run: | - echo 'Checking path.' - echo ${PATH} | tr ':' '\n' - - echo 'Checking environment variables.' - env | sort - - echo 'Checking pre-commit version.' - pre-commit --version - - echo 'Checking clang-format version.' - clang-format --version - - echo 'Checking NPM version.' - npm --version - - echo 'Checking Node.js version.' - node --version - - echo 'Checking prettier version.' - prettier --version + - name: Prepare runner + uses: XRPLF/actions/.github/actions/prepare-runner@638e0dc11ea230f91bd26622fb542116bb5254d5 - name: Format code run: pre-commit run --show-diff-on-failure --color=always --all-files - name: Check for differences diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 223c324a8c..85568a8b2e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,18 +1,5 @@ # To run pre-commit hooks, first install pre-commit: # - `pip install pre-commit==${PRE_COMMIT_VERSION}` -# - `pip install pre-commit-hooks==${PRE_COMMIT_HOOKS_VERSION}` -# -# Depending on your system, you can use `brew install` or `apt install` as well -# for installing the pre-commit package, but `pip` is needed to install the -# hooks; you can also use `pipx` if you prefer. -# Next, install the required formatters: -# - `pip install clang-format==${CLANG_VERSION}` -# - `npm install prettier@${PRETTIER_VERSION}` -# -# See https://github.com/XRPLF/ci/blob/main/.github/workflows/tools-rippled.yml -# for the versions used in the CI pipeline. You will need to have the exact same -# versions of the tools installed on your system to produce the same results as -# the pipeline. # # Then, run the following command to install the git hook scripts: # - `pre-commit install` @@ -20,42 +7,29 @@ # - `pre-commit run --all-files` # To manually run a specific hook, use: # - `pre-commit run --all-files` -# To run the hooks against only the files changed in the current commit, use: +# To run the hooks against only the staged files, use: # - `pre-commit run` repos: - - repo: local + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: 3e8a8703264a2f4a69428a0aa4dcb512790b2c8c # frozen: v6.0.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: mixed-line-ending + - id: check-merge-conflict + args: [--assume-in-merge] + + - repo: https://github.com/pre-commit/mirrors-clang-format + rev: 7d85583be209cb547946c82fbe51f4bc5dd1d017 # frozen: v18.1.8 hooks: - id: clang-format - name: clang-format - language: system - entry: clang-format -i - files: '\.(cpp|hpp|h|ipp|proto)$' - - id: trailing-whitespace - name: trailing-whitespace - entry: trailing-whitespace-fixer - language: system - types: [text] - - id: end-of-file - name: end-of-file - entry: end-of-file-fixer - language: system - types: [text] - - id: mixed-line-ending - name: mixed-line-ending - entry: mixed-line-ending - language: system - types: [text] - - id: check-merge-conflict - name: check-merge-conflict - entry: check-merge-conflict --assume-in-merge - language: system - types: [text] - - repo: local + args: [--style=file] + "types_or": [c++, c, proto] + + - repo: https://github.com/rbubley/mirrors-prettier + rev: 5ba47274f9b181bce26a5150a725577f3c336011 # frozen: v3.6.2 hooks: - id: prettier - name: prettier - language: system - entry: prettier --ignore-unknown --write exclude: | (?x)^( From 6bf83380387a0f8a779598ee629e6f49b66fec73 Mon Sep 17 00:00:00 2001 From: Mayukha Vadari Date: Thu, 4 Sep 2025 18:32:23 -0400 Subject: [PATCH 159/244] chore: Add `conan.lock` to workflow file checks (#5769) * Add conan.lock to workflow file checks * Add conan.lock to on-trigger.yml --- .github/workflows/on-pr.yml | 1 + .github/workflows/on-trigger.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/.github/workflows/on-pr.yml b/.github/workflows/on-pr.yml index a4bbfd0997..8008aebcbb 100644 --- a/.github/workflows/on-pr.yml +++ b/.github/workflows/on-pr.yml @@ -75,6 +75,7 @@ jobs: tests/** CMakeLists.txt conanfile.py + conan.lock - name: Check whether to run # This step determines whether the rest of the workflow should # run. The rest of the workflow will run if this job runs AND at diff --git a/.github/workflows/on-trigger.yml b/.github/workflows/on-trigger.yml index 7732b814ad..dcd592a1f7 100644 --- a/.github/workflows/on-trigger.yml +++ b/.github/workflows/on-trigger.yml @@ -32,6 +32,7 @@ on: - "tests/**" - "CMakeLists.txt" - "conanfile.py" + - "conan.lock" # Run at 06:32 UTC on every day of the week from Monday through Friday. This # will force all dependencies to be rebuilt, which is useful to verify that From cb52c9af001316a906665a10283f6f5dd271ebb3 Mon Sep 17 00:00:00 2001 From: Ayaz Salikhov Date: Fri, 5 Sep 2025 14:08:17 +0100 Subject: [PATCH 160/244] fix: Remove extra @ in notify-clio.yml (#5771) --- .github/workflows/notify-clio.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/notify-clio.yml b/.github/workflows/notify-clio.yml index b0e5f35787..0c85bc9302 100644 --- a/.github/workflows/notify-clio.yml +++ b/.github/workflows/notify-clio.yml @@ -53,7 +53,7 @@ jobs: - name: Calculate conan reference id: conan_ref run: | - echo "conan_ref=${{ steps.generate.outputs.version }}@${{ steps.generate.outputs.user }}/@${{ steps.generate.outputs.channel }}" >> "${GITHUB_OUTPUT}" + echo "conan_ref=${{ steps.generate.outputs.version }}@${{ steps.generate.outputs.user }}/${{ steps.generate.outputs.channel }}" >> "${GITHUB_OUTPUT}" - name: Add Conan remote run: | echo "Adding Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}." From 9fe0a154f15df0e6314317b1b5952353470c0765 Mon Sep 17 00:00:00 2001 From: tzchenxixi Date: Mon, 8 Sep 2025 21:13:32 +0800 Subject: [PATCH 161/244] chore: remove redundant word in comment (#5752) --- src/xrpld/app/paths/Pathfinder.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/xrpld/app/paths/Pathfinder.h b/src/xrpld/app/paths/Pathfinder.h index ea3928dff4..b6c8bb8b2d 100644 --- a/src/xrpld/app/paths/Pathfinder.h +++ b/src/xrpld/app/paths/Pathfinder.h @@ -166,7 +166,7 @@ private: int addFlags, std::function const& continueCallback); - // Compute the liquidity for a path. Return tesSUCCESS if it has has enough + // Compute the liquidity for a path. Return tesSUCCESS if it has enough // liquidity to be worth keeping, otherwise an error. TER getPathLiquidity( From 6d40b882a4684d6926d0b23ad0e68c847e19e1ef Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Mon, 8 Sep 2025 14:54:50 +0100 Subject: [PATCH 162/244] Switch on-trigger to minimal build (#5773) --- .github/workflows/on-trigger.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/on-trigger.yml b/.github/workflows/on-trigger.yml index dcd592a1f7..d005d43d33 100644 --- a/.github/workflows/on-trigger.yml +++ b/.github/workflows/on-trigger.yml @@ -112,7 +112,7 @@ jobs: dependencies_force_build: ${{ needs.generate-outputs.outputs.dependencies_force_build == 'true' }} dependencies_force_upload: ${{ needs.generate-outputs.outputs.dependencies_force_upload == 'true' }} os: ${{ matrix.os }} - strategy_matrix: "all" + strategy_matrix: "minimal" secrets: codecov_token: ${{ secrets.CODECOV_TOKEN }} conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }} From 9ebeb413e45b64b738c9c2ee1367b1e7674859be Mon Sep 17 00:00:00 2001 From: Ayaz Salikhov Date: Mon, 8 Sep 2025 16:15:59 +0100 Subject: [PATCH 163/244] feat: Implement separate upload workflow (#5762) * feat: Implement separate upload workflow * Use cleanup-workspace * Name some workflows reusable * Add dependencies --- .github/actions/build-deps/action.yml | 31 +------- .github/actions/build-test/action.yml | 1 + .github/actions/setup-conan/action.yml | 43 ++++++++++ .github/scripts/strategy-matrix/generate.py | 0 .github/workflows/build-test.yml | 62 ++------------- .github/workflows/notify-clio.yml | 19 +++-- .github/workflows/on-pr.yml | 26 +------ .github/workflows/on-trigger.yml | 36 --------- .../workflows/reusable-strategy-matrix.yml | 38 +++++++++ .../reusable-upload-conan-deps-os.yml | 78 +++++++++++++++++++ .github/workflows/upload-conan-deps.yml | 62 +++++++++++++++ 11 files changed, 243 insertions(+), 153 deletions(-) create mode 100644 .github/actions/setup-conan/action.yml mode change 100644 => 100755 .github/scripts/strategy-matrix/generate.py create mode 100644 .github/workflows/reusable-strategy-matrix.yml create mode 100644 .github/workflows/reusable-upload-conan-deps-os.yml create mode 100644 .github/workflows/upload-conan-deps.yml diff --git a/.github/actions/build-deps/action.yml b/.github/actions/build-deps/action.yml index ba4f4e9e2f..c3b405e70f 100644 --- a/.github/actions/build-deps/action.yml +++ b/.github/actions/build-deps/action.yml @@ -1,7 +1,5 @@ -# This action installs and optionally uploads Conan dependencies to a remote -# repository. The dependencies will only be uploaded if the credentials are -# provided. name: Build Conan dependencies +description: "Install Conan dependencies, optionally forcing a rebuild of all dependencies." # Note that actions do not support 'type' and all inputs are strings, see # https://docs.github.com/en/actions/reference/workflows-and-actions/metadata-syntax#inputs. @@ -12,28 +10,10 @@ inputs: build_type: description: 'The build type to use ("Debug", "Release").' required: true - conan_remote_name: - description: "The name of the Conan remote to use." - required: true - conan_remote_url: - description: "The URL of the Conan endpoint to use." - required: true - conan_remote_username: - description: "The username for logging into the Conan remote. If not provided, the dependencies will not be uploaded." - required: false - default: "" - conan_remote_password: - description: "The password for logging into the Conan remote. If not provided, the dependencies will not be uploaded." - required: false - default: "" force_build: description: 'Force building of all dependencies ("true", "false").' required: false default: "false" - force_upload: - description: 'Force uploading of all dependencies ("true", "false").' - required: false - default: "false" runs: using: composite @@ -51,12 +31,3 @@ runs: --options:host '&:xrpld=True' \ --settings:all build_type=${{ inputs.build_type }} \ --format=json .. - - name: Upload Conan dependencies - if: ${{ inputs.conan_remote_username != '' && inputs.conan_remote_password != '' }} - shell: bash - working-directory: ${{ inputs.build_dir }} - run: | - echo "Logging into Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}." - conan remote login ${{ inputs.conan_remote_name }} "${{ inputs.conan_remote_username }}" --password "${{ inputs.conan_remote_password }}" - echo 'Uploading dependencies.' - conan upload '*' --confirm --check ${{ inputs.force_upload == 'true' && '--force' || '' }} --remote=${{ inputs.conan_remote_name }} diff --git a/.github/actions/build-test/action.yml b/.github/actions/build-test/action.yml index ee945dcf38..cf1bac16f7 100644 --- a/.github/actions/build-test/action.yml +++ b/.github/actions/build-test/action.yml @@ -1,6 +1,7 @@ # This action build and tests the binary. The Conan dependencies must have # already been installed (see the build-deps action). name: Build and Test +description: "Build and test the binary." # Note that actions do not support 'type' and all inputs are strings, see # https://docs.github.com/en/actions/reference/workflows-and-actions/metadata-syntax#inputs. diff --git a/.github/actions/setup-conan/action.yml b/.github/actions/setup-conan/action.yml new file mode 100644 index 0000000000..d31809dc94 --- /dev/null +++ b/.github/actions/setup-conan/action.yml @@ -0,0 +1,43 @@ +name: Setup Conan +description: "Set up Conan configuration, profile, and remote." + +inputs: + conan_remote_name: + description: "The name of the Conan remote to use." + required: false + default: xrplf + conan_remote_url: + description: "The URL of the Conan endpoint to use." + required: false + default: https://conan.ripplex.io + +runs: + using: composite + + steps: + - name: Set up Conan configuration + shell: bash + run: | + echo 'Installing configuration.' + cat conan/global.conf ${{ runner.os == 'Linux' && '>>' || '>' }} $(conan config home)/global.conf + + echo 'Conan configuration:' + conan config show '*' + + - name: Set up Conan profile + shell: bash + run: | + echo 'Installing profile.' + conan config install conan/profiles/default -tf $(conan config home)/profiles/ + + echo 'Conan profile:' + conan profile show + + - name: Set up Conan remote + shell: bash + run: | + echo "Adding Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}." + conan remote add --index 0 --force ${{ inputs.conan_remote_name }} ${{ inputs.conan_remote_url }} + + echo 'Listing Conan remotes.' + conan remote list diff --git a/.github/scripts/strategy-matrix/generate.py b/.github/scripts/strategy-matrix/generate.py old mode 100644 new mode 100755 diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml index 40399539b8..69ff986f98 100644 --- a/.github/workflows/build-test.yml +++ b/.github/workflows/build-test.yml @@ -13,14 +13,6 @@ on: required: false type: string default: ".build" - conan_remote_name: - description: "The name of the Conan remote to use." - required: true - type: string - conan_remote_url: - description: "The URL of the Conan endpoint to use." - required: true - type: string dependencies_force_build: description: "Force building of all dependencies." required: false @@ -45,12 +37,6 @@ on: codecov_token: description: "The Codecov token to use for uploading coverage reports." required: false - conan_remote_username: - description: "The username for logging into the Conan remote. If not provided, the dependencies will not be uploaded." - required: false - conan_remote_password: - description: "The password for logging into the Conan remote. If not provided, the dependencies will not be uploaded." - required: false concurrency: group: ${{ github.workflow }}-${{ github.ref }}-${{ inputs.os }} @@ -63,20 +49,10 @@ defaults: jobs: # Generate the strategy matrix to be used by the following job. generate-matrix: - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - - name: Set up Python - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 - with: - python-version: 3.13 - - name: Generate strategy matrix - working-directory: .github/scripts/strategy-matrix - id: generate - run: python generate.py ${{ inputs.strategy_matrix == 'all' && '--all' || '' }} --config=${{ inputs.os }}.json >> "${GITHUB_OUTPUT}" - outputs: - matrix: ${{ steps.generate.outputs.matrix }} + uses: ./.github/workflows/reusable-strategy-matrix.yml + with: + os: ${{ inputs.os }} + strategy_matrix: ${{ inputs.strategy_matrix }} # Build and test the binary. build-test: @@ -148,40 +124,16 @@ jobs: echo 'Checking nproc version.' nproc --version - - name: Set up Conan configuration - run: | - echo 'Installing configuration.' - cat conan/global.conf ${{ inputs.os == 'linux' && '>>' || '>' }} $(conan config home)/global.conf - - echo 'Conan configuration:' - conan config show '*' - - name: Set up Conan profile - run: | - echo 'Installing profile.' - conan config install conan/profiles/default -tf $(conan config home)/profiles/ - - echo 'Conan profile:' - conan profile show - - name: Set up Conan remote - shell: bash - run: | - echo "Adding Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}." - conan remote add --index 0 --force ${{ inputs.conan_remote_name }} ${{ inputs.conan_remote_url }} - - echo 'Listing Conan remotes.' - conan remote list + - name: Setup Conan + uses: ./.github/actions/setup-conan - name: Build dependencies uses: ./.github/actions/build-deps with: build_dir: ${{ inputs.build_dir }} build_type: ${{ matrix.build_type }} - conan_remote_name: ${{ inputs.conan_remote_name }} - conan_remote_url: ${{ inputs.conan_remote_url }} - conan_remote_username: ${{ secrets.conan_remote_username }} - conan_remote_password: ${{ secrets.conan_remote_password }} force_build: ${{ inputs.dependencies_force_build }} - force_upload: ${{ inputs.dependencies_force_upload }} + - name: Build and test binary uses: ./.github/actions/build-test with: diff --git a/.github/workflows/notify-clio.yml b/.github/workflows/notify-clio.yml index 0c85bc9302..692904ff12 100644 --- a/.github/workflows/notify-clio.yml +++ b/.github/workflows/notify-clio.yml @@ -9,12 +9,14 @@ on: inputs: conan_remote_name: description: "The name of the Conan remote to use." - required: true + required: false type: string + default: xrplf conan_remote_url: description: "The URL of the Conan endpoint to use." - required: true + required: false type: string + default: https://conan.ripplex.io secrets: clio_notify_token: description: "The GitHub token to notify Clio about new versions." @@ -54,12 +56,13 @@ jobs: id: conan_ref run: | echo "conan_ref=${{ steps.generate.outputs.version }}@${{ steps.generate.outputs.user }}/${{ steps.generate.outputs.channel }}" >> "${GITHUB_OUTPUT}" - - name: Add Conan remote - run: | - echo "Adding Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}." - conan remote add --index 0 --force ${{ inputs.conan_remote_name }} ${{ inputs.conan_remote_url }} - echo 'Listing Conan remotes.' - conan remote list + + - name: Set up Conan + uses: ./.github/actions/setup-conan + with: + conan_remote_name: ${{ inputs.conan_remote_name }} + conan_remote_url: ${{ inputs.conan_remote_url }} + - name: Log into Conan remote run: conan remote login ${{ inputs.conan_remote_name }} "${{ secrets.conan_remote_username }}" --password "${{ secrets.conan_remote_password }}" - name: Upload package diff --git a/.github/workflows/on-pr.yml b/.github/workflows/on-pr.yml index 8008aebcbb..f72b8a9121 100644 --- a/.github/workflows/on-pr.yml +++ b/.github/workflows/on-pr.yml @@ -23,10 +23,6 @@ defaults: run: shell: bash -env: - CONAN_REMOTE_NAME: xrplf - CONAN_REMOTE_URL: https://conan.ripplex.io - jobs: # This job determines whether the rest of the workflow should run. It runs # when the PR is not a draft (which should also cover merge-group) or @@ -105,40 +101,22 @@ jobs: if: needs.should-run.outputs.go == 'true' uses: ./.github/workflows/check-levelization.yml - # This job works around the limitation that GitHub Actions does not support - # using environment variables as inputs for reusable workflows. - generate-outputs: - needs: should-run - if: needs.should-run.outputs.go == 'true' - runs-on: ubuntu-latest - steps: - - name: No-op - run: true - outputs: - conan_remote_name: ${{ env.CONAN_REMOTE_NAME }} - conan_remote_url: ${{ env.CONAN_REMOTE_URL }} - build-test: - needs: generate-outputs + needs: should-run uses: ./.github/workflows/build-test.yml strategy: matrix: os: [linux, macos, windows] with: - conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }} - conan_remote_url: ${{ needs.generate-outputs.outputs.conan_remote_url }} os: ${{ matrix.os }} secrets: codecov_token: ${{ secrets.CODECOV_TOKEN }} notify-clio: needs: - - generate-outputs + - should-run - build-test uses: ./.github/workflows/notify-clio.yml - with: - conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }} - conan_remote_url: ${{ needs.generate-outputs.outputs.conan_remote_url }} secrets: clio_notify_token: ${{ secrets.CLIO_NOTIFY_TOKEN }} conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }} diff --git a/.github/workflows/on-trigger.yml b/.github/workflows/on-trigger.yml index d005d43d33..14884391ef 100644 --- a/.github/workflows/on-trigger.yml +++ b/.github/workflows/on-trigger.yml @@ -66,54 +66,18 @@ defaults: run: shell: bash -env: - CONAN_REMOTE_NAME: xrplf - CONAN_REMOTE_URL: https://conan.ripplex.io - jobs: check-missing-commits: if: ${{ github.event_name == 'push' && github.ref_type == 'branch' && contains(fromJSON('["develop", "release"]'), github.ref_name) }} uses: ./.github/workflows/check-missing-commits.yml - # This job works around the limitation that GitHub Actions does not support - # using environment variables as inputs for reusable workflows. It also sets - # outputs that depend on the event that triggered the workflow. - generate-outputs: - runs-on: ubuntu-latest - steps: - - name: Check inputs and set outputs - id: generate - run: | - if [[ '${{ github.event_name }}' == 'push' ]]; then - echo 'dependencies_force_build=false' >> "${GITHUB_OUTPUT}" - echo 'dependencies_force_upload=false' >> "${GITHUB_OUTPUT}" - elif [[ '${{ github.event_name }}' == 'schedule' ]]; then - echo 'dependencies_force_build=true' >> "${GITHUB_OUTPUT}" - echo 'dependencies_force_upload=false' >> "${GITHUB_OUTPUT}" - else - echo 'dependencies_force_build=${{ inputs.dependencies_force_build }}' >> "${GITHUB_OUTPUT}" - echo 'dependencies_force_upload=${{ inputs.dependencies_force_upload }}' >> "${GITHUB_OUTPUT}" - fi - outputs: - conan_remote_name: ${{ env.CONAN_REMOTE_NAME }} - conan_remote_url: ${{ env.CONAN_REMOTE_URL }} - dependencies_force_build: ${{ steps.generate.outputs.dependencies_force_build }} - dependencies_force_upload: ${{ steps.generate.outputs.dependencies_force_upload }} - build-test: - needs: generate-outputs uses: ./.github/workflows/build-test.yml strategy: matrix: os: [linux, macos, windows] with: - conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }} - conan_remote_url: ${{ needs.generate-outputs.outputs.conan_remote_url }} - dependencies_force_build: ${{ needs.generate-outputs.outputs.dependencies_force_build == 'true' }} - dependencies_force_upload: ${{ needs.generate-outputs.outputs.dependencies_force_upload == 'true' }} os: ${{ matrix.os }} strategy_matrix: "minimal" secrets: codecov_token: ${{ secrets.CODECOV_TOKEN }} - conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }} - conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }} diff --git a/.github/workflows/reusable-strategy-matrix.yml b/.github/workflows/reusable-strategy-matrix.yml new file mode 100644 index 0000000000..5c84b51de1 --- /dev/null +++ b/.github/workflows/reusable-strategy-matrix.yml @@ -0,0 +1,38 @@ +name: Generate strategy matrix + +on: + workflow_call: + inputs: + os: + description: 'The operating system to use for the build ("linux", "macos", "windows").' + required: true + type: string + strategy_matrix: + # TODO: Support additional strategies, e.g. "ubuntu" for generating all Ubuntu configurations. + description: 'The strategy matrix to use for generating the configurations ("minimal", "all").' + required: false + type: string + default: "minimal" + outputs: + matrix: + description: "The generated strategy matrix." + value: ${{ jobs.generate-matrix.outputs.matrix }} + +jobs: + generate-matrix: + runs-on: ubuntu-latest + outputs: + matrix: ${{ steps.generate.outputs.matrix }} + steps: + - name: Checkout repository + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 + + - name: Set up Python + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + with: + python-version: 3.13 + + - name: Generate strategy matrix + working-directory: .github/scripts/strategy-matrix + id: generate + run: ./generate.py ${{ inputs.strategy_matrix == 'all' && '--all' || '' }} --config=${{ inputs.os }}.json >> "${GITHUB_OUTPUT}" diff --git a/.github/workflows/reusable-upload-conan-deps-os.yml b/.github/workflows/reusable-upload-conan-deps-os.yml new file mode 100644 index 0000000000..787cf6a39e --- /dev/null +++ b/.github/workflows/reusable-upload-conan-deps-os.yml @@ -0,0 +1,78 @@ +name: Upload Conan Dependencies + +on: + workflow_call: + inputs: + build_dir: + description: "The directory where to build." + required: false + type: string + default: ".build" + conan_remote_name: + description: "The name of the Conan remote to use." + required: false + type: string + default: xrplf + + os: + description: 'The operating system to use for the build ("linux", "macos", "windows").' + required: true + type: string + force_source_build: + description: "Force source build of all dependencies" + required: true + type: boolean + force_upload: + description: "Force upload of all dependencies" + required: true + type: boolean + secrets: + CONAN_USERNAME: + required: true + CONAN_PASSWORD: + required: true + +jobs: + generate-matrix: + uses: ./.github/workflows/reusable-strategy-matrix.yml + with: + os: ${{ inputs.os }} + strategy_matrix: all + + upload-conan-deps: + needs: + - generate-matrix + strategy: + fail-fast: false + matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }} + runs-on: ${{ matrix.architecture.runner }} + container: ${{ inputs.os == 'linux' && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version) || null }} + + steps: + - name: Cleanup workspace + if: ${{ runner.os == 'macOS' }} + uses: XRPLF/actions/.github/actions/cleanup-workspace@3f044c7478548e3c32ff68980eeb36ece02b364e + + - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 + - name: Prepare runner + uses: XRPLF/actions/.github/actions/prepare-runner@638e0dc11ea230f91bd26622fb542116bb5254d5 + with: + disable_ccache: false + + - name: Setup Conan + uses: ./.github/actions/setup-conan + + - name: Build dependencies + uses: ./.github/actions/build-deps + with: + build_dir: ${{ inputs.build_dir }} + build_type: ${{ matrix.build_type }} + force_build: ${{ inputs.force_source_build }} + + - name: Login to Conan + if: github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' + run: conan remote login -p ${{ secrets.CONAN_PASSWORD }} ${{ inputs.conan_remote_name }} ${{ secrets.CONAN_USERNAME }} + + - name: Upload Conan packages + if: github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' && github.event_name != 'schedule' + run: conan upload "*" -r=${{ inputs.conan_remote_name }} --confirm ${{ inputs.force_upload == 'true' && '--force' || '' }} diff --git a/.github/workflows/upload-conan-deps.yml b/.github/workflows/upload-conan-deps.yml new file mode 100644 index 0000000000..5a6adc99be --- /dev/null +++ b/.github/workflows/upload-conan-deps.yml @@ -0,0 +1,62 @@ +name: Upload Conan Dependencies + +on: + schedule: + - cron: "0 9 * * 1-5" + workflow_dispatch: + inputs: + force_source_build: + description: "Force source build of all dependencies" + required: false + default: false + type: boolean + force_upload: + description: "Force upload of all dependencies" + required: false + default: false + type: boolean + pull_request: + branches: [develop] + paths: + - .github/workflows/upload-conan-deps.yml + + - .github/workflows/reusable-strategy-matrix.yml + - .github/workflows/reusable-upload-conan-deps-os.yml + + - .github/actions/build-deps/action.yml + - ".github/scripts/strategy-matrix/**" + + - conanfile.py + - conan.lock + push: + branches: [develop] + paths: + - .github/workflows/upload-conan-deps.yml + + - .github/workflows/reusable-strategy-matrix.yml + - .github/workflows/reusable-upload-conan-deps-os.yml + + - .github/actions/build-deps/action.yml + - ".github/scripts/strategy-matrix/**" + + - conanfile.py + - conan.lock + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + run-upload-conan-deps: + strategy: + fail-fast: true + matrix: + os: ["linux", "macos", "windows"] + uses: ./.github/workflows/reusable-upload-conan-deps-os.yml + with: + force_source_build: ${{ github.event_name == 'schedule' || github.event.inputs.force_source_build == 'true' }} + force_upload: ${{ github.event.inputs.force_upload == 'true' }} + os: ${{ matrix.os }} + secrets: + CONAN_USERNAME: ${{ secrets.CONAN_REMOTE_USERNAME }} + CONAN_PASSWORD: ${{ secrets.CONAN_REMOTE_PASSWORD }} From bcde2790a412e647f15bb8e4df6c778c4050bd67 Mon Sep 17 00:00:00 2001 From: Wo Jake Date: Tue, 9 Sep 2025 02:03:20 +0800 Subject: [PATCH 164/244] Update old links & descriptions in README.md (#4701) --- README.md | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index fe7daa38bc..dbc5ab078e 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ The [XRP Ledger](https://xrpl.org/) is a decentralized cryptographic ledger powe ## XRP -[XRP](https://xrpl.org/xrp.html) is a public, counterparty-free asset native to the XRP Ledger, and is designed to bridge the many different currencies in use worldwide. XRP is traded on the open-market and is available for anyone to access. The XRP Ledger was created in 2012 with a finite supply of 100 billion units of XRP. +[XRP](https://xrpl.org/xrp.html) is a public, counterparty-free crypto-asset native to the XRP Ledger, and is designed as a gas token for network services and to bridge different currencies. XRP is traded on the open-market and is available for anyone to access. The XRP Ledger was created in 2012 with a finite supply of 100 billion units of XRP. ## rippled @@ -23,19 +23,19 @@ If you are interested in running an **API Server** (including a **Full History S - **[Censorship-Resistant Transaction Processing][]:** No single party decides which transactions succeed or fail, and no one can "roll back" a transaction after it completes. As long as those who choose to participate in the network keep it healthy, they can settle transactions in seconds. - **[Fast, Efficient Consensus Algorithm][]:** The XRP Ledger's consensus algorithm settles transactions in 4 to 5 seconds, processing at a throughput of up to 1500 transactions per second. These properties put XRP at least an order of magnitude ahead of other top digital assets. -- **[Finite XRP Supply][]:** When the XRP Ledger began, 100 billion XRP were created, and no more XRP will ever be created. The available supply of XRP decreases slowly over time as small amounts are destroyed to pay transaction costs. -- **[Responsible Software Governance][]:** A team of full-time, world-class developers at Ripple maintain and continually improve the XRP Ledger's underlying software with contributions from the open-source community. Ripple acts as a steward for the technology and an advocate for its interests, and builds constructive relationships with governments and financial institutions worldwide. +- **[Finite XRP Supply][]:** When the XRP Ledger began, 100 billion XRP were created, and no more XRP will ever be created. The available supply of XRP decreases slowly over time as small amounts are destroyed to pay transaction fees. +- **[Responsible Software Governance][]:** A team of full-time developers at Ripple & other organizations maintain and continually improve the XRP Ledger's underlying software with contributions from the open-source community. Ripple acts as a steward for the technology and an advocate for its interests. - **[Secure, Adaptable Cryptography][]:** The XRP Ledger relies on industry standard digital signature systems like ECDSA (the same scheme used by Bitcoin) but also supports modern, efficient algorithms like Ed25519. The extensible nature of the XRP Ledger's software makes it possible to add and disable algorithms as the state of the art in cryptography advances. -- **[Modern Features for Smart Contracts][]:** Features like Escrow, Checks, and Payment Channels support cutting-edge financial applications including the [Interledger Protocol](https://interledger.org/). This toolbox of advanced features comes with safety features like a process for amending the network and separate checks against invariant constraints. +- **[Modern Features][]:** Features like Escrow, Checks, and Payment Channels support financial applications atop of the XRP Ledger. This toolbox of advanced features comes with safety features like a process for amending the network and separate checks against invariant constraints. - **[On-Ledger Decentralized Exchange][]:** In addition to all the features that make XRP useful on its own, the XRP Ledger also has a fully-functional accounting system for tracking and trading obligations denominated in any way users want, and an exchange built into the protocol. The XRP Ledger can settle long, cross-currency payment paths and exchanges of multiple currencies in atomic transactions, bridging gaps of trust with XRP. -[Censorship-Resistant Transaction Processing]: https://xrpl.org/xrp-ledger-overview.html#censorship-resistant-transaction-processing -[Fast, Efficient Consensus Algorithm]: https://xrpl.org/xrp-ledger-overview.html#fast-efficient-consensus-algorithm -[Finite XRP Supply]: https://xrpl.org/xrp-ledger-overview.html#finite-xrp-supply -[Responsible Software Governance]: https://xrpl.org/xrp-ledger-overview.html#responsible-software-governance -[Secure, Adaptable Cryptography]: https://xrpl.org/xrp-ledger-overview.html#secure-adaptable-cryptography -[Modern Features for Smart Contracts]: https://xrpl.org/xrp-ledger-overview.html#modern-features-for-smart-contracts -[On-Ledger Decentralized Exchange]: https://xrpl.org/xrp-ledger-overview.html#on-ledger-decentralized-exchange +[Censorship-Resistant Transaction Processing]: https://xrpl.org/transaction-censorship-detection.html#transaction-censorship-detection +[Fast, Efficient Consensus Algorithm]: https://xrpl.org/consensus-research.html#consensus-research +[Finite XRP Supply]: https://xrpl.org/what-is-xrp.html +[Responsible Software Governance]: https://xrpl.org/contribute-code.html#contribute-code-to-the-xrp-ledger +[Secure, Adaptable Cryptography]: https://xrpl.org/cryptographic-keys.html#cryptographic-keys +[Modern Features]: https://xrpl.org/use-specialized-payment-types.html +[On-Ledger Decentralized Exchange]: https://xrpl.org/decentralized-exchange.html#decentralized-exchange ## Source Code From da4c8c9550157dd91950aa774d13659104609e2a Mon Sep 17 00:00:00 2001 From: Ayaz Salikhov Date: Tue, 9 Sep 2025 16:25:41 +0100 Subject: [PATCH 165/244] ci: Only run build-test/notify-clio if should-run indicates to (#5777) - Fixes an issue introduced by #5762 which removed the transitive `should-run` check from these two jobs. --- .github/workflows/on-pr.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/on-pr.yml b/.github/workflows/on-pr.yml index f72b8a9121..4aa9ca5869 100644 --- a/.github/workflows/on-pr.yml +++ b/.github/workflows/on-pr.yml @@ -103,6 +103,7 @@ jobs: build-test: needs: should-run + if: needs.should-run.outputs.go == 'true' uses: ./.github/workflows/build-test.yml strategy: matrix: @@ -116,6 +117,7 @@ jobs: needs: - should-run - build-test + if: needs.should-run.outputs.go == 'true' uses: ./.github/workflows/notify-clio.yml secrets: clio_notify_token: ${{ secrets.CLIO_NOTIFY_TOKEN }} From f1eaa6a2641238cf1c4098f4a006e5ab04a3afee Mon Sep 17 00:00:00 2001 From: yinyiqian1 Date: Tue, 9 Sep 2025 11:57:28 -0400 Subject: [PATCH 166/244] enable fixAMMClawbackRounding (#5750) --- include/xrpl/protocol/detail/features.macro | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/xrpl/protocol/detail/features.macro b/include/xrpl/protocol/detail/features.macro index e2725d1fc0..264fad7fc2 100644 --- a/include/xrpl/protocol/detail/features.macro +++ b/include/xrpl/protocol/detail/features.macro @@ -34,7 +34,7 @@ XRPL_FIX (PriceOracleOrder, Supported::no, VoteBehavior::DefaultNo) XRPL_FIX (MPTDeliveredAmount, Supported::no, VoteBehavior::DefaultNo) -XRPL_FIX (AMMClawbackRounding, Supported::no, VoteBehavior::DefaultNo) +XRPL_FIX (AMMClawbackRounding, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(TokenEscrow, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (EnforceNFTokenTrustlineV2, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (AMMv1_3, Supported::yes, VoteBehavior::DefaultNo) From 148f669a2562a1870503969946286885a5981835 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Tue, 9 Sep 2025 14:07:04 -0400 Subject: [PATCH 167/244] chore: "passed" fails if any previous jobs fail or are cancelled (#5776) For the purposes of being able to merge a PR, Github Actions jobs count as passed if they ran and passed, or were skipped. With this change, if any of the jobs that "passed" depends on fail or are cancelled, then "passed" will fail. If they all succeed or are skipped, then "passed" is skipped, which does not prevent a merge. This saves spinning up a runner in the usual case where things work, and will simplify our branch protection rules, so that only "passed" will need to be checked. --- .github/workflows/on-pr.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/on-pr.yml b/.github/workflows/on-pr.yml index 4aa9ca5869..f194bd1e37 100644 --- a/.github/workflows/on-pr.yml +++ b/.github/workflows/on-pr.yml @@ -125,11 +125,12 @@ jobs: conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }} passed: + if: failure() || cancelled() needs: - build-test - check-format - check-levelization runs-on: ubuntu-latest steps: - - name: No-op - run: true + - name: Fail + run: false From e67e0395df2d7e3ead98163920112b7772bed325 Mon Sep 17 00:00:00 2001 From: Ayaz Salikhov Date: Tue, 9 Sep 2025 20:47:06 +0100 Subject: [PATCH 168/244] ci: Limit number of parallel jobs in "upload-conan-deps" (#5781) - This should prevent Artifactory from being overloaded by too many requests at a time. - Uses "max-parallel" to limit the build job to 10 simultaneous instances. - Only run the minimal matrix on PRs. --- .github/scripts/strategy-matrix/generate.py | 43 +++++++--- .../workflows/reusable-strategy-matrix.yml | 4 +- .../reusable-upload-conan-deps-os.yml | 78 ------------------- .github/workflows/upload-conan-deps.yml | 56 +++++++++---- 4 files changed, 75 insertions(+), 106 deletions(-) delete mode 100644 .github/workflows/reusable-upload-conan-deps-os.yml diff --git a/.github/scripts/strategy-matrix/generate.py b/.github/scripts/strategy-matrix/generate.py index 9743d5a4e3..b6f6601291 100755 --- a/.github/scripts/strategy-matrix/generate.py +++ b/.github/scripts/strategy-matrix/generate.py @@ -2,7 +2,17 @@ import argparse import itertools import json -import re +from pathlib import Path +from dataclasses import dataclass + +THIS_DIR = Path(__file__).parent.resolve() + +@dataclass +class Config: + architecture: list[dict] + os: list[dict] + build_type: list[str] + cmake_args: list[str] ''' Generate a strategy matrix for GitHub Actions CI. @@ -18,9 +28,9 @@ We will further set additional CMake arguments as follows: - Certain Debian Bookworm configurations will change the reference fee, enable codecov, and enable voidstar in PRs. ''' -def generate_strategy_matrix(all: bool, architecture: list[dict], os: list[dict], build_type: list[str], cmake_args: list[str]) -> dict: +def generate_strategy_matrix(all: bool, config: Config) -> list: configurations = [] - for architecture, os, build_type, cmake_args in itertools.product(architecture, os, build_type, cmake_args): + for architecture, os, build_type, cmake_args in itertools.product(config.architecture, config.os, config.build_type, config.cmake_args): # The default CMake target is 'all' for Linux and MacOS and 'install' # for Windows, but it can get overridden for certain configurations. cmake_target = 'install' if os["distro_name"] == 'windows' else 'all' @@ -158,21 +168,30 @@ def generate_strategy_matrix(all: bool, architecture: list[dict], os: list[dict] 'architecture': architecture, }) - return {'include': configurations} + return configurations + + +def read_config(file: Path) -> Config: + config = json.loads(file.read_text()) + if config['architecture'] is None or config['os'] is None or config['build_type'] is None or config['cmake_args'] is None: + raise Exception('Invalid configuration file.') + + return Config(**config) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-a', '--all', help='Set to generate all configurations (generally used when merging a PR) or leave unset to generate a subset of configurations (generally used when committing to a PR).', action="store_true") - parser.add_argument('-c', '--config', help='Path to the JSON file containing the strategy matrix configurations.', required=True, type=str) + parser.add_argument('-c', '--config', help='Path to the JSON file containing the strategy matrix configurations.', required=False, type=Path) args = parser.parse_args() - # Load the JSON configuration file. - config = None - with open(args.config, 'r') as f: - config = json.load(f) - if config['architecture'] is None or config['os'] is None or config['build_type'] is None or config['cmake_args'] is None: - raise Exception('Invalid configuration file.') + matrix = [] + if args.config is None or args.config == '': + matrix += generate_strategy_matrix(args.all, read_config(THIS_DIR / "linux.json")) + matrix += generate_strategy_matrix(args.all, read_config(THIS_DIR / "macos.json")) + matrix += generate_strategy_matrix(args.all, read_config(THIS_DIR / "windows.json")) + else: + matrix += generate_strategy_matrix(args.all, read_config(args.config)) # Generate the strategy matrix. - print(f'matrix={json.dumps(generate_strategy_matrix(args.all, config['architecture'], config['os'], config['build_type'], config['cmake_args']))}') + print(f'matrix={json.dumps({"include": matrix})}') diff --git a/.github/workflows/reusable-strategy-matrix.yml b/.github/workflows/reusable-strategy-matrix.yml index 5c84b51de1..20a90fc2e3 100644 --- a/.github/workflows/reusable-strategy-matrix.yml +++ b/.github/workflows/reusable-strategy-matrix.yml @@ -5,7 +5,7 @@ on: inputs: os: description: 'The operating system to use for the build ("linux", "macos", "windows").' - required: true + required: false type: string strategy_matrix: # TODO: Support additional strategies, e.g. "ubuntu" for generating all Ubuntu configurations. @@ -35,4 +35,4 @@ jobs: - name: Generate strategy matrix working-directory: .github/scripts/strategy-matrix id: generate - run: ./generate.py ${{ inputs.strategy_matrix == 'all' && '--all' || '' }} --config=${{ inputs.os }}.json >> "${GITHUB_OUTPUT}" + run: ./generate.py ${{ inputs.strategy_matrix == 'all' && '--all' || '' }} ${{ inputs.os != '' && format('--config={0}.json', inputs.os) || '' }} >> "${GITHUB_OUTPUT}" diff --git a/.github/workflows/reusable-upload-conan-deps-os.yml b/.github/workflows/reusable-upload-conan-deps-os.yml deleted file mode 100644 index 787cf6a39e..0000000000 --- a/.github/workflows/reusable-upload-conan-deps-os.yml +++ /dev/null @@ -1,78 +0,0 @@ -name: Upload Conan Dependencies - -on: - workflow_call: - inputs: - build_dir: - description: "The directory where to build." - required: false - type: string - default: ".build" - conan_remote_name: - description: "The name of the Conan remote to use." - required: false - type: string - default: xrplf - - os: - description: 'The operating system to use for the build ("linux", "macos", "windows").' - required: true - type: string - force_source_build: - description: "Force source build of all dependencies" - required: true - type: boolean - force_upload: - description: "Force upload of all dependencies" - required: true - type: boolean - secrets: - CONAN_USERNAME: - required: true - CONAN_PASSWORD: - required: true - -jobs: - generate-matrix: - uses: ./.github/workflows/reusable-strategy-matrix.yml - with: - os: ${{ inputs.os }} - strategy_matrix: all - - upload-conan-deps: - needs: - - generate-matrix - strategy: - fail-fast: false - matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }} - runs-on: ${{ matrix.architecture.runner }} - container: ${{ inputs.os == 'linux' && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version) || null }} - - steps: - - name: Cleanup workspace - if: ${{ runner.os == 'macOS' }} - uses: XRPLF/actions/.github/actions/cleanup-workspace@3f044c7478548e3c32ff68980eeb36ece02b364e - - - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - - name: Prepare runner - uses: XRPLF/actions/.github/actions/prepare-runner@638e0dc11ea230f91bd26622fb542116bb5254d5 - with: - disable_ccache: false - - - name: Setup Conan - uses: ./.github/actions/setup-conan - - - name: Build dependencies - uses: ./.github/actions/build-deps - with: - build_dir: ${{ inputs.build_dir }} - build_type: ${{ matrix.build_type }} - force_build: ${{ inputs.force_source_build }} - - - name: Login to Conan - if: github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' - run: conan remote login -p ${{ secrets.CONAN_PASSWORD }} ${{ inputs.conan_remote_name }} ${{ secrets.CONAN_USERNAME }} - - - name: Upload Conan packages - if: github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' && github.event_name != 'schedule' - run: conan upload "*" -r=${{ inputs.conan_remote_name }} --confirm ${{ inputs.force_upload == 'true' && '--force' || '' }} diff --git a/.github/workflows/upload-conan-deps.yml b/.github/workflows/upload-conan-deps.yml index 5a6adc99be..f63482761d 100644 --- a/.github/workflows/upload-conan-deps.yml +++ b/.github/workflows/upload-conan-deps.yml @@ -21,7 +21,6 @@ on: - .github/workflows/upload-conan-deps.yml - .github/workflows/reusable-strategy-matrix.yml - - .github/workflows/reusable-upload-conan-deps-os.yml - .github/actions/build-deps/action.yml - ".github/scripts/strategy-matrix/**" @@ -34,7 +33,6 @@ on: - .github/workflows/upload-conan-deps.yml - .github/workflows/reusable-strategy-matrix.yml - - .github/workflows/reusable-upload-conan-deps-os.yml - .github/actions/build-deps/action.yml - ".github/scripts/strategy-matrix/**" @@ -47,16 +45,46 @@ concurrency: cancel-in-progress: true jobs: - run-upload-conan-deps: - strategy: - fail-fast: true - matrix: - os: ["linux", "macos", "windows"] - uses: ./.github/workflows/reusable-upload-conan-deps-os.yml + generate-matrix: + uses: ./.github/workflows/reusable-strategy-matrix.yml with: - force_source_build: ${{ github.event_name == 'schedule' || github.event.inputs.force_source_build == 'true' }} - force_upload: ${{ github.event.inputs.force_upload == 'true' }} - os: ${{ matrix.os }} - secrets: - CONAN_USERNAME: ${{ secrets.CONAN_REMOTE_USERNAME }} - CONAN_PASSWORD: ${{ secrets.CONAN_REMOTE_PASSWORD }} + strategy_matrix: ${{ github.event_name == 'pull_request' && 'minimal' || 'all' }} + + run-upload-conan-deps: + needs: + - generate-matrix + strategy: + fail-fast: false + matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }} + max-parallel: 10 + runs-on: ${{ matrix.architecture.runner }} + container: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version) || null }} + + steps: + - name: Cleanup workspace + if: ${{ runner.os == 'macOS' }} + uses: XRPLF/actions/.github/actions/cleanup-workspace@3f044c7478548e3c32ff68980eeb36ece02b364e + + - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 + - name: Prepare runner + uses: XRPLF/actions/.github/actions/prepare-runner@638e0dc11ea230f91bd26622fb542116bb5254d5 + with: + disable_ccache: false + + - name: Setup Conan + uses: ./.github/actions/setup-conan + + - name: Build dependencies + uses: ./.github/actions/build-deps + with: + build_dir: .build + build_type: ${{ matrix.build_type }} + force_build: ${{ github.event_name == 'schedule' || github.event.inputs.force_source_build == 'true' }} + + - name: Login to Conan + if: github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' + run: conan remote login -p ${{ secrets.CONAN_PASSWORD }} ${{ inputs.conan_remote_name }} ${{ secrets.CONAN_USERNAME }} + + - name: Upload Conan packages + if: github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' && github.event_name != 'schedule' + run: conan upload "*" -r=${{ inputs.conan_remote_name }} --confirm ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }} From e5f7a8442d763870db531070c587fc0aec70a5d8 Mon Sep 17 00:00:00 2001 From: Ayaz Salikhov Date: Tue, 9 Sep 2025 21:21:12 +0100 Subject: [PATCH 169/244] ci: Change `upload-conan-deps` workflow is run (#5782) - Don't run upload-conan-deps in PRs, unless the PR changes the workflow file. - Change cron schedule for uploading Conan dependencies to run after work hours for most dev. --- .github/workflows/upload-conan-deps.yml | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/.github/workflows/upload-conan-deps.yml b/.github/workflows/upload-conan-deps.yml index f63482761d..07e9a60dbd 100644 --- a/.github/workflows/upload-conan-deps.yml +++ b/.github/workflows/upload-conan-deps.yml @@ -2,7 +2,7 @@ name: Upload Conan Dependencies on: schedule: - - cron: "0 9 * * 1-5" + - cron: "0 3 * * 2-6" workflow_dispatch: inputs: force_source_build: @@ -18,15 +18,8 @@ on: pull_request: branches: [develop] paths: + # This allows testing changes to the upload workflow in a PR - .github/workflows/upload-conan-deps.yml - - - .github/workflows/reusable-strategy-matrix.yml - - - .github/actions/build-deps/action.yml - - ".github/scripts/strategy-matrix/**" - - - conanfile.py - - conan.lock push: branches: [develop] paths: From f6426ca1832d61e3b752124673eddf61553b7b56 Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Tue, 9 Sep 2025 22:23:07 +0100 Subject: [PATCH 170/244] Switch CI pipeline bookworm:gcc-13 from arm64 to amd64 (#5779) --- .github/scripts/strategy-matrix/generate.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/scripts/strategy-matrix/generate.py b/.github/scripts/strategy-matrix/generate.py index b6f6601291..ac39803fff 100755 --- a/.github/scripts/strategy-matrix/generate.py +++ b/.github/scripts/strategy-matrix/generate.py @@ -45,7 +45,7 @@ def generate_strategy_matrix(all: bool, config: Config) -> list: # Only generate a subset of configurations in PRs. if not all: # Debian: - # - Bookworm using GCC 13: Release and Unity on linux/arm64, set + # - Bookworm using GCC 13: Release and Unity on linux/amd64, set # the reference fee to 500. # - Bookworm using GCC 15: Debug and no Unity on linux/amd64, enable # code coverage (which will be done below). @@ -57,7 +57,7 @@ def generate_strategy_matrix(all: bool, config: Config) -> list: if os['distro_name'] == 'debian': skip = True if os['distro_version'] == 'bookworm': - if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-13' and build_type == 'Release' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/arm64': + if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-13' and build_type == 'Release' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/amd64': cmake_args = f'-DUNIT_TEST_REFERENCE_FEE=500 {cmake_args}' skip = False if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-15' and build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/amd64': From cdbe70b2a7d54e85cf2e5eddd55e93bc206a5ef6 Mon Sep 17 00:00:00 2001 From: Ayaz Salikhov Date: Wed, 10 Sep 2025 08:35:58 +0100 Subject: [PATCH 171/244] ci: Use default conan install format (#5784) --- .github/actions/build-deps/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/build-deps/action.yml b/.github/actions/build-deps/action.yml index c3b405e70f..351d8a6361 100644 --- a/.github/actions/build-deps/action.yml +++ b/.github/actions/build-deps/action.yml @@ -30,4 +30,4 @@ runs: --options:host '&:tests=True' \ --options:host '&:xrpld=True' \ --settings:all build_type=${{ inputs.build_type }} \ - --format=json .. + .. From 3d92375d127acb61ca7b931d561e5cca7762cabf Mon Sep 17 00:00:00 2001 From: Ayaz Salikhov Date: Wed, 10 Sep 2025 09:20:45 +0100 Subject: [PATCH 172/244] ci: Add missing dependencies to workflows (#5783) --- .github/workflows/on-pr.yml | 2 ++ .github/workflows/on-trigger.yml | 2 ++ .github/workflows/upload-conan-deps.yml | 1 + 3 files changed, 5 insertions(+) diff --git a/.github/workflows/on-pr.yml b/.github/workflows/on-pr.yml index f194bd1e37..c480cc5476 100644 --- a/.github/workflows/on-pr.yml +++ b/.github/workflows/on-pr.yml @@ -60,8 +60,10 @@ jobs: # Keep the paths below in sync with those in `on-trigger.yml`. .github/actions/build-deps/** .github/actions/build-test/** + .github/actions/setup-conan/** .github/scripts/strategy-matrix/** .github/workflows/build-test.yml + .github/workflows/reusable-strategy-matrix.yml .codecov.yml cmake/** conan/** diff --git a/.github/workflows/on-trigger.yml b/.github/workflows/on-trigger.yml index 14884391ef..7c17621d67 100644 --- a/.github/workflows/on-trigger.yml +++ b/.github/workflows/on-trigger.yml @@ -21,8 +21,10 @@ on: # Keep the paths below in sync with those in `on-pr.yml`. - ".github/actions/build-deps/**" - ".github/actions/build-test/**" + - ".github/actions/setup-conan/**" - ".github/scripts/strategy-matrix/**" - ".github/workflows/build-test.yml" + - ".github/workflows/reusable-strategy-matrix.yml" - ".codecov.yml" - "cmake/**" - "conan/**" diff --git a/.github/workflows/upload-conan-deps.yml b/.github/workflows/upload-conan-deps.yml index 07e9a60dbd..5af72a9e41 100644 --- a/.github/workflows/upload-conan-deps.yml +++ b/.github/workflows/upload-conan-deps.yml @@ -28,6 +28,7 @@ on: - .github/workflows/reusable-strategy-matrix.yml - .github/actions/build-deps/action.yml + - .github/actions/setup-conan/action.yml - ".github/scripts/strategy-matrix/**" - conanfile.py From 61d628d654fd03135a47bf0dea28f56b03a8bd22 Mon Sep 17 00:00:00 2001 From: yinyiqian1 Date: Wed, 10 Sep 2025 13:47:33 -0400 Subject: [PATCH 173/244] fix: Add restrictions to Permission Delegation: fixDelegateV1_1 (#5650) - Amendment: fixDelegateV1_1 - In DelegateSet, disallow invalid PermissionValues like 0, and transaction values when the transaction's amendment is not enabled. Acts as if the transaction doesn't exist, which is the same thing older versions without the amendment will do. - Payment burn/mint should disallow DEX currency exchange. - Support MPT for Payment burn/mint. --- include/xrpl/protocol/Permissions.h | 7 +- include/xrpl/protocol/TxFormats.h | 2 +- include/xrpl/protocol/detail/features.macro | 1 + .../xrpl/protocol/detail/transactions.macro | 332 ++++++++++++--- include/xrpl/protocol/jss.h | 2 +- src/libxrpl/protocol/Permissions.cpp | 42 +- src/libxrpl/protocol/TxFormats.cpp | 2 +- src/test/app/AMMClawback_test.cpp | 3 +- src/test/app/Delegate_test.cpp | 390 +++++++++++++++++- src/xrpld/app/tx/detail/DelegateSet.cpp | 22 +- src/xrpld/app/tx/detail/Payment.cpp | 27 +- src/xrpld/app/tx/detail/applySteps.cpp | 4 +- 12 files changed, 732 insertions(+), 102 deletions(-) diff --git a/include/xrpl/protocol/Permissions.h b/include/xrpl/protocol/Permissions.h index 67f3eea8d7..cf49ff7382 100644 --- a/include/xrpl/protocol/Permissions.h +++ b/include/xrpl/protocol/Permissions.h @@ -20,6 +20,8 @@ #ifndef RIPPLE_PROTOCOL_PERMISSION_H_INCLUDED #define RIPPLE_PROTOCOL_PERMISSION_H_INCLUDED +#include +#include #include #include @@ -53,6 +55,8 @@ class Permission private: Permission(); + std::unordered_map txFeatureMap_; + std::unordered_map delegatableTx_; std::unordered_map @@ -80,7 +84,8 @@ public: getGranularTxType(GranularPermissionType const& gpType) const; bool - isDelegatable(std::uint32_t const& permissionValue) const; + isDelegatable(std::uint32_t const& permissionValue, Rules const& rules) + const; // for tx level permission, permission value is equal to tx type plus one uint32_t diff --git a/include/xrpl/protocol/TxFormats.h b/include/xrpl/protocol/TxFormats.h index 70b721a3d7..d17eea7644 100644 --- a/include/xrpl/protocol/TxFormats.h +++ b/include/xrpl/protocol/TxFormats.h @@ -59,7 +59,7 @@ enum TxType : std::uint16_t #pragma push_macro("TRANSACTION") #undef TRANSACTION -#define TRANSACTION(tag, value, name, delegatable, fields) tag = value, +#define TRANSACTION(tag, value, ...) tag = value, #include diff --git a/include/xrpl/protocol/detail/features.macro b/include/xrpl/protocol/detail/features.macro index 264fad7fc2..9aacbbe3d9 100644 --- a/include/xrpl/protocol/detail/features.macro +++ b/include/xrpl/protocol/detail/features.macro @@ -32,6 +32,7 @@ // If you add an amendment here, then do not forget to increment `numFeatures` // in include/xrpl/protocol/Feature.h. +XRPL_FIX (DelegateV1_1, Supported::no, VoteBehavior::DefaultNo) XRPL_FIX (PriceOracleOrder, Supported::no, VoteBehavior::DefaultNo) XRPL_FIX (MPTDeliveredAmount, Supported::no, VoteBehavior::DefaultNo) XRPL_FIX (AMMClawbackRounding, Supported::yes, VoteBehavior::DefaultNo) diff --git a/include/xrpl/protocol/detail/transactions.macro b/include/xrpl/protocol/detail/transactions.macro index 1131e24f61..bfbc18aa1b 100644 --- a/include/xrpl/protocol/detail/transactions.macro +++ b/include/xrpl/protocol/detail/transactions.macro @@ -22,14 +22,17 @@ #endif /** - * TRANSACTION(tag, value, name, delegatable, fields) + * TRANSACTION(tag, value, name, delegatable, amendments, fields) * * You must define a transactor class in the `ripple` namespace named `name`, * and include its header in `src/xrpld/app/tx/detail/applySteps.cpp`. */ /** This transaction type executes a payment. */ -TRANSACTION(ttPAYMENT, 0, Payment, Delegation::delegatable, ({ +TRANSACTION(ttPAYMENT, 0, Payment, + Delegation::delegatable, + uint256{}, + ({ {sfDestination, soeREQUIRED}, {sfAmount, soeREQUIRED, soeMPTSupported}, {sfSendMax, soeOPTIONAL, soeMPTSupported}, @@ -42,7 +45,10 @@ TRANSACTION(ttPAYMENT, 0, Payment, Delegation::delegatable, ({ })) /** This transaction type creates an escrow object. */ -TRANSACTION(ttESCROW_CREATE, 1, EscrowCreate, Delegation::delegatable, ({ +TRANSACTION(ttESCROW_CREATE, 1, EscrowCreate, + Delegation::delegatable, + uint256{}, + ({ {sfDestination, soeREQUIRED}, {sfAmount, soeREQUIRED, soeMPTSupported}, {sfCondition, soeOPTIONAL}, @@ -52,7 +58,10 @@ TRANSACTION(ttESCROW_CREATE, 1, EscrowCreate, Delegation::delegatable, ({ })) /** This transaction type completes an existing escrow. */ -TRANSACTION(ttESCROW_FINISH, 2, EscrowFinish, Delegation::delegatable, ({ +TRANSACTION(ttESCROW_FINISH, 2, EscrowFinish, + Delegation::delegatable, + uint256{}, + ({ {sfOwner, soeREQUIRED}, {sfOfferSequence, soeREQUIRED}, {sfFulfillment, soeOPTIONAL}, @@ -62,7 +71,10 @@ TRANSACTION(ttESCROW_FINISH, 2, EscrowFinish, Delegation::delegatable, ({ /** This transaction type adjusts various account settings. */ -TRANSACTION(ttACCOUNT_SET, 3, AccountSet, Delegation::notDelegatable, ({ +TRANSACTION(ttACCOUNT_SET, 3, AccountSet, + Delegation::notDelegatable, + uint256{}, + ({ {sfEmailHash, soeOPTIONAL}, {sfWalletLocator, soeOPTIONAL}, {sfWalletSize, soeOPTIONAL}, @@ -76,20 +88,29 @@ TRANSACTION(ttACCOUNT_SET, 3, AccountSet, Delegation::notDelegatable, ({ })) /** This transaction type cancels an existing escrow. */ -TRANSACTION(ttESCROW_CANCEL, 4, EscrowCancel, Delegation::delegatable, ({ +TRANSACTION(ttESCROW_CANCEL, 4, EscrowCancel, + Delegation::delegatable, + uint256{}, + ({ {sfOwner, soeREQUIRED}, {sfOfferSequence, soeREQUIRED}, })) /** This transaction type sets or clears an account's "regular key". */ -TRANSACTION(ttREGULAR_KEY_SET, 5, SetRegularKey, Delegation::notDelegatable, ({ +TRANSACTION(ttREGULAR_KEY_SET, 5, SetRegularKey, + Delegation::notDelegatable, + uint256{}, + ({ {sfRegularKey, soeOPTIONAL}, })) // 6 deprecated /** This transaction type creates an offer to trade one asset for another. */ -TRANSACTION(ttOFFER_CREATE, 7, OfferCreate, Delegation::delegatable, ({ +TRANSACTION(ttOFFER_CREATE, 7, OfferCreate, + Delegation::delegatable, + uint256{}, + ({ {sfTakerPays, soeREQUIRED}, {sfTakerGets, soeREQUIRED}, {sfExpiration, soeOPTIONAL}, @@ -98,14 +119,20 @@ TRANSACTION(ttOFFER_CREATE, 7, OfferCreate, Delegation::delegatable, ({ })) /** This transaction type cancels existing offers to trade one asset for another. */ -TRANSACTION(ttOFFER_CANCEL, 8, OfferCancel, Delegation::delegatable, ({ +TRANSACTION(ttOFFER_CANCEL, 8, OfferCancel, + Delegation::delegatable, + uint256{}, + ({ {sfOfferSequence, soeREQUIRED}, })) // 9 deprecated /** This transaction type creates a new set of tickets. */ -TRANSACTION(ttTICKET_CREATE, 10, TicketCreate, Delegation::delegatable, ({ +TRANSACTION(ttTICKET_CREATE, 10, TicketCreate, + Delegation::delegatable, + featureTicketBatch, + ({ {sfTicketCount, soeREQUIRED}, })) @@ -114,13 +141,19 @@ TRANSACTION(ttTICKET_CREATE, 10, TicketCreate, Delegation::delegatable, ({ /** This transaction type modifies the signer list associated with an account. */ // The SignerEntries are optional because a SignerList is deleted by // setting the SignerQuorum to zero and omitting SignerEntries. -TRANSACTION(ttSIGNER_LIST_SET, 12, SignerListSet, Delegation::notDelegatable, ({ +TRANSACTION(ttSIGNER_LIST_SET, 12, SignerListSet, + Delegation::notDelegatable, + uint256{}, + ({ {sfSignerQuorum, soeREQUIRED}, {sfSignerEntries, soeOPTIONAL}, })) /** This transaction type creates a new unidirectional XRP payment channel. */ -TRANSACTION(ttPAYCHAN_CREATE, 13, PaymentChannelCreate, Delegation::delegatable, ({ +TRANSACTION(ttPAYCHAN_CREATE, 13, PaymentChannelCreate, + Delegation::delegatable, + uint256{}, + ({ {sfDestination, soeREQUIRED}, {sfAmount, soeREQUIRED}, {sfSettleDelay, soeREQUIRED}, @@ -130,14 +163,20 @@ TRANSACTION(ttPAYCHAN_CREATE, 13, PaymentChannelCreate, Delegation::delegatable, })) /** This transaction type funds an existing unidirectional XRP payment channel. */ -TRANSACTION(ttPAYCHAN_FUND, 14, PaymentChannelFund, Delegation::delegatable, ({ +TRANSACTION(ttPAYCHAN_FUND, 14, PaymentChannelFund, + Delegation::delegatable, + uint256{}, + ({ {sfChannel, soeREQUIRED}, {sfAmount, soeREQUIRED}, {sfExpiration, soeOPTIONAL}, })) /** This transaction type submits a claim against an existing unidirectional payment channel. */ -TRANSACTION(ttPAYCHAN_CLAIM, 15, PaymentChannelClaim, Delegation::delegatable, ({ +TRANSACTION(ttPAYCHAN_CLAIM, 15, PaymentChannelClaim, + Delegation::delegatable, + uint256{}, + ({ {sfChannel, soeREQUIRED}, {sfAmount, soeOPTIONAL}, {sfBalance, soeOPTIONAL}, @@ -147,7 +186,10 @@ TRANSACTION(ttPAYCHAN_CLAIM, 15, PaymentChannelClaim, Delegation::delegatable, ( })) /** This transaction type creates a new check. */ -TRANSACTION(ttCHECK_CREATE, 16, CheckCreate, Delegation::delegatable, ({ +TRANSACTION(ttCHECK_CREATE, 16, CheckCreate, + Delegation::delegatable, + featureChecks, + ({ {sfDestination, soeREQUIRED}, {sfSendMax, soeREQUIRED}, {sfExpiration, soeOPTIONAL}, @@ -156,19 +198,28 @@ TRANSACTION(ttCHECK_CREATE, 16, CheckCreate, Delegation::delegatable, ({ })) /** This transaction type cashes an existing check. */ -TRANSACTION(ttCHECK_CASH, 17, CheckCash, Delegation::delegatable, ({ +TRANSACTION(ttCHECK_CASH, 17, CheckCash, + Delegation::delegatable, + featureChecks, + ({ {sfCheckID, soeREQUIRED}, {sfAmount, soeOPTIONAL}, {sfDeliverMin, soeOPTIONAL}, })) /** This transaction type cancels an existing check. */ -TRANSACTION(ttCHECK_CANCEL, 18, CheckCancel, Delegation::delegatable, ({ +TRANSACTION(ttCHECK_CANCEL, 18, CheckCancel, + Delegation::delegatable, + featureChecks, + ({ {sfCheckID, soeREQUIRED}, })) /** This transaction type grants or revokes authorization to transfer funds. */ -TRANSACTION(ttDEPOSIT_PREAUTH, 19, DepositPreauth, Delegation::delegatable, ({ +TRANSACTION(ttDEPOSIT_PREAUTH, 19, DepositPreauth, + Delegation::delegatable, + featureDepositPreauth, + ({ {sfAuthorize, soeOPTIONAL}, {sfUnauthorize, soeOPTIONAL}, {sfAuthorizeCredentials, soeOPTIONAL}, @@ -176,14 +227,20 @@ TRANSACTION(ttDEPOSIT_PREAUTH, 19, DepositPreauth, Delegation::delegatable, ({ })) /** This transaction type modifies a trustline between two accounts. */ -TRANSACTION(ttTRUST_SET, 20, TrustSet, Delegation::delegatable, ({ +TRANSACTION(ttTRUST_SET, 20, TrustSet, + Delegation::delegatable, + uint256{}, + ({ {sfLimitAmount, soeOPTIONAL}, {sfQualityIn, soeOPTIONAL}, {sfQualityOut, soeOPTIONAL}, })) /** This transaction type deletes an existing account. */ -TRANSACTION(ttACCOUNT_DELETE, 21, AccountDelete, Delegation::notDelegatable, ({ +TRANSACTION(ttACCOUNT_DELETE, 21, AccountDelete, + Delegation::notDelegatable, + uint256{}, + ({ {sfDestination, soeREQUIRED}, {sfDestinationTag, soeOPTIONAL}, {sfCredentialIDs, soeOPTIONAL}, @@ -192,7 +249,10 @@ TRANSACTION(ttACCOUNT_DELETE, 21, AccountDelete, Delegation::notDelegatable, ({ // 22 reserved /** This transaction mints a new NFT. */ -TRANSACTION(ttNFTOKEN_MINT, 25, NFTokenMint, Delegation::delegatable, ({ +TRANSACTION(ttNFTOKEN_MINT, 25, NFTokenMint, + Delegation::delegatable, + featureNonFungibleTokensV1, + ({ {sfNFTokenTaxon, soeREQUIRED}, {sfTransferFee, soeOPTIONAL}, {sfIssuer, soeOPTIONAL}, @@ -203,13 +263,19 @@ TRANSACTION(ttNFTOKEN_MINT, 25, NFTokenMint, Delegation::delegatable, ({ })) /** This transaction burns (i.e. destroys) an existing NFT. */ -TRANSACTION(ttNFTOKEN_BURN, 26, NFTokenBurn, Delegation::delegatable, ({ +TRANSACTION(ttNFTOKEN_BURN, 26, NFTokenBurn, + Delegation::delegatable, + featureNonFungibleTokensV1, + ({ {sfNFTokenID, soeREQUIRED}, {sfOwner, soeOPTIONAL}, })) /** This transaction creates a new offer to buy or sell an NFT. */ -TRANSACTION(ttNFTOKEN_CREATE_OFFER, 27, NFTokenCreateOffer, Delegation::delegatable, ({ +TRANSACTION(ttNFTOKEN_CREATE_OFFER, 27, NFTokenCreateOffer, + Delegation::delegatable, + featureNonFungibleTokensV1, + ({ {sfNFTokenID, soeREQUIRED}, {sfAmount, soeREQUIRED}, {sfDestination, soeOPTIONAL}, @@ -218,25 +284,37 @@ TRANSACTION(ttNFTOKEN_CREATE_OFFER, 27, NFTokenCreateOffer, Delegation::delegata })) /** This transaction cancels an existing offer to buy or sell an existing NFT. */ -TRANSACTION(ttNFTOKEN_CANCEL_OFFER, 28, NFTokenCancelOffer, Delegation::delegatable, ({ +TRANSACTION(ttNFTOKEN_CANCEL_OFFER, 28, NFTokenCancelOffer, + Delegation::delegatable, + featureNonFungibleTokensV1, + ({ {sfNFTokenOffers, soeREQUIRED}, })) /** This transaction accepts an existing offer to buy or sell an existing NFT. */ -TRANSACTION(ttNFTOKEN_ACCEPT_OFFER, 29, NFTokenAcceptOffer, Delegation::delegatable, ({ +TRANSACTION(ttNFTOKEN_ACCEPT_OFFER, 29, NFTokenAcceptOffer, + Delegation::delegatable, + featureNonFungibleTokensV1, + ({ {sfNFTokenBuyOffer, soeOPTIONAL}, {sfNFTokenSellOffer, soeOPTIONAL}, {sfNFTokenBrokerFee, soeOPTIONAL}, })) /** This transaction claws back issued tokens. */ -TRANSACTION(ttCLAWBACK, 30, Clawback, Delegation::delegatable, ({ +TRANSACTION(ttCLAWBACK, 30, Clawback, + Delegation::delegatable, + featureClawback, + ({ {sfAmount, soeREQUIRED, soeMPTSupported}, {sfHolder, soeOPTIONAL}, })) /** This transaction claws back tokens from an AMM pool. */ -TRANSACTION(ttAMM_CLAWBACK, 31, AMMClawback, Delegation::delegatable, ({ +TRANSACTION(ttAMM_CLAWBACK, 31, AMMClawback, + Delegation::delegatable, + featureAMMClawback, + ({ {sfHolder, soeREQUIRED}, {sfAsset, soeREQUIRED}, {sfAsset2, soeREQUIRED}, @@ -244,14 +322,20 @@ TRANSACTION(ttAMM_CLAWBACK, 31, AMMClawback, Delegation::delegatable, ({ })) /** This transaction type creates an AMM instance */ -TRANSACTION(ttAMM_CREATE, 35, AMMCreate, Delegation::delegatable, ({ +TRANSACTION(ttAMM_CREATE, 35, AMMCreate, + Delegation::delegatable, + featureAMM, + ({ {sfAmount, soeREQUIRED}, {sfAmount2, soeREQUIRED}, {sfTradingFee, soeREQUIRED}, })) /** This transaction type deposits into an AMM instance */ -TRANSACTION(ttAMM_DEPOSIT, 36, AMMDeposit, Delegation::delegatable, ({ +TRANSACTION(ttAMM_DEPOSIT, 36, AMMDeposit, + Delegation::delegatable, + featureAMM, + ({ {sfAsset, soeREQUIRED}, {sfAsset2, soeREQUIRED}, {sfAmount, soeOPTIONAL}, @@ -262,7 +346,10 @@ TRANSACTION(ttAMM_DEPOSIT, 36, AMMDeposit, Delegation::delegatable, ({ })) /** This transaction type withdraws from an AMM instance */ -TRANSACTION(ttAMM_WITHDRAW, 37, AMMWithdraw, Delegation::delegatable, ({ +TRANSACTION(ttAMM_WITHDRAW, 37, AMMWithdraw, + Delegation::delegatable, + featureAMM, + ({ {sfAsset, soeREQUIRED}, {sfAsset2, soeREQUIRED}, {sfAmount, soeOPTIONAL}, @@ -272,14 +359,20 @@ TRANSACTION(ttAMM_WITHDRAW, 37, AMMWithdraw, Delegation::delegatable, ({ })) /** This transaction type votes for the trading fee */ -TRANSACTION(ttAMM_VOTE, 38, AMMVote, Delegation::delegatable, ({ +TRANSACTION(ttAMM_VOTE, 38, AMMVote, + Delegation::delegatable, + featureAMM, + ({ {sfAsset, soeREQUIRED}, {sfAsset2, soeREQUIRED}, {sfTradingFee, soeREQUIRED}, })) /** This transaction type bids for the auction slot */ -TRANSACTION(ttAMM_BID, 39, AMMBid, Delegation::delegatable, ({ +TRANSACTION(ttAMM_BID, 39, AMMBid, + Delegation::delegatable, + featureAMM, + ({ {sfAsset, soeREQUIRED}, {sfAsset2, soeREQUIRED}, {sfBidMin, soeOPTIONAL}, @@ -288,20 +381,29 @@ TRANSACTION(ttAMM_BID, 39, AMMBid, Delegation::delegatable, ({ })) /** This transaction type deletes AMM in the empty state */ -TRANSACTION(ttAMM_DELETE, 40, AMMDelete, Delegation::delegatable, ({ +TRANSACTION(ttAMM_DELETE, 40, AMMDelete, + Delegation::delegatable, + featureAMM, + ({ {sfAsset, soeREQUIRED}, {sfAsset2, soeREQUIRED}, })) /** This transactions creates a crosschain sequence number */ -TRANSACTION(ttXCHAIN_CREATE_CLAIM_ID, 41, XChainCreateClaimID, Delegation::delegatable, ({ +TRANSACTION(ttXCHAIN_CREATE_CLAIM_ID, 41, XChainCreateClaimID, + Delegation::delegatable, + featureXChainBridge, + ({ {sfXChainBridge, soeREQUIRED}, {sfSignatureReward, soeREQUIRED}, {sfOtherChainSource, soeREQUIRED}, })) /** This transactions initiates a crosschain transaction */ -TRANSACTION(ttXCHAIN_COMMIT, 42, XChainCommit, Delegation::delegatable, ({ +TRANSACTION(ttXCHAIN_COMMIT, 42, XChainCommit, + Delegation::delegatable, + featureXChainBridge, + ({ {sfXChainBridge, soeREQUIRED}, {sfXChainClaimID, soeREQUIRED}, {sfAmount, soeREQUIRED}, @@ -309,7 +411,10 @@ TRANSACTION(ttXCHAIN_COMMIT, 42, XChainCommit, Delegation::delegatable, ({ })) /** This transaction completes a crosschain transaction */ -TRANSACTION(ttXCHAIN_CLAIM, 43, XChainClaim, Delegation::delegatable, ({ +TRANSACTION(ttXCHAIN_CLAIM, 43, XChainClaim, + Delegation::delegatable, + featureXChainBridge, + ({ {sfXChainBridge, soeREQUIRED}, {sfXChainClaimID, soeREQUIRED}, {sfDestination, soeREQUIRED}, @@ -318,7 +423,10 @@ TRANSACTION(ttXCHAIN_CLAIM, 43, XChainClaim, Delegation::delegatable, ({ })) /** This transaction initiates a crosschain account create transaction */ -TRANSACTION(ttXCHAIN_ACCOUNT_CREATE_COMMIT, 44, XChainAccountCreateCommit, Delegation::delegatable, ({ +TRANSACTION(ttXCHAIN_ACCOUNT_CREATE_COMMIT, 44, XChainAccountCreateCommit, + Delegation::delegatable, + featureXChainBridge, + ({ {sfXChainBridge, soeREQUIRED}, {sfDestination, soeREQUIRED}, {sfAmount, soeREQUIRED}, @@ -326,7 +434,10 @@ TRANSACTION(ttXCHAIN_ACCOUNT_CREATE_COMMIT, 44, XChainAccountCreateCommit, Deleg })) /** This transaction adds an attestation to a claim */ -TRANSACTION(ttXCHAIN_ADD_CLAIM_ATTESTATION, 45, XChainAddClaimAttestation, Delegation::delegatable, ({ +TRANSACTION(ttXCHAIN_ADD_CLAIM_ATTESTATION, 45, XChainAddClaimAttestation, + Delegation::delegatable, + featureXChainBridge, + ({ {sfXChainBridge, soeREQUIRED}, {sfAttestationSignerAccount, soeREQUIRED}, @@ -342,7 +453,10 @@ TRANSACTION(ttXCHAIN_ADD_CLAIM_ATTESTATION, 45, XChainAddClaimAttestation, Deleg })) /** This transaction adds an attestation to an account */ -TRANSACTION(ttXCHAIN_ADD_ACCOUNT_CREATE_ATTESTATION, 46, XChainAddAccountCreateAttestation, Delegation::delegatable, ({ +TRANSACTION(ttXCHAIN_ADD_ACCOUNT_CREATE_ATTESTATION, 46, XChainAddAccountCreateAttestation, + Delegation::delegatable, + featureXChainBridge, + ({ {sfXChainBridge, soeREQUIRED}, {sfAttestationSignerAccount, soeREQUIRED}, @@ -359,31 +473,46 @@ TRANSACTION(ttXCHAIN_ADD_ACCOUNT_CREATE_ATTESTATION, 46, XChainAddAccountCreateA })) /** This transaction modifies a sidechain */ -TRANSACTION(ttXCHAIN_MODIFY_BRIDGE, 47, XChainModifyBridge, Delegation::delegatable, ({ +TRANSACTION(ttXCHAIN_MODIFY_BRIDGE, 47, XChainModifyBridge, + Delegation::delegatable, + featureXChainBridge, + ({ {sfXChainBridge, soeREQUIRED}, {sfSignatureReward, soeOPTIONAL}, {sfMinAccountCreateAmount, soeOPTIONAL}, })) /** This transactions creates a sidechain */ -TRANSACTION(ttXCHAIN_CREATE_BRIDGE, 48, XChainCreateBridge, Delegation::delegatable, ({ +TRANSACTION(ttXCHAIN_CREATE_BRIDGE, 48, XChainCreateBridge, + Delegation::delegatable, + featureXChainBridge, + ({ {sfXChainBridge, soeREQUIRED}, {sfSignatureReward, soeREQUIRED}, {sfMinAccountCreateAmount, soeOPTIONAL}, })) /** This transaction type creates or updates a DID */ -TRANSACTION(ttDID_SET, 49, DIDSet, Delegation::delegatable, ({ +TRANSACTION(ttDID_SET, 49, DIDSet, + Delegation::delegatable, + featureDID, + ({ {sfDIDDocument, soeOPTIONAL}, {sfURI, soeOPTIONAL}, {sfData, soeOPTIONAL}, })) /** This transaction type deletes a DID */ -TRANSACTION(ttDID_DELETE, 50, DIDDelete, Delegation::delegatable, ({})) +TRANSACTION(ttDID_DELETE, 50, DIDDelete, + Delegation::delegatable, + featureDID, + ({})) /** This transaction type creates an Oracle instance */ -TRANSACTION(ttORACLE_SET, 51, OracleSet, Delegation::delegatable, ({ +TRANSACTION(ttORACLE_SET, 51, OracleSet, + Delegation::delegatable, + featurePriceOracle, + ({ {sfOracleDocumentID, soeREQUIRED}, {sfProvider, soeOPTIONAL}, {sfURI, soeOPTIONAL}, @@ -393,18 +522,27 @@ TRANSACTION(ttORACLE_SET, 51, OracleSet, Delegation::delegatable, ({ })) /** This transaction type deletes an Oracle instance */ -TRANSACTION(ttORACLE_DELETE, 52, OracleDelete, Delegation::delegatable, ({ +TRANSACTION(ttORACLE_DELETE, 52, OracleDelete, + Delegation::delegatable, + featurePriceOracle, + ({ {sfOracleDocumentID, soeREQUIRED}, })) /** This transaction type fixes a problem in the ledger state */ -TRANSACTION(ttLEDGER_STATE_FIX, 53, LedgerStateFix, Delegation::delegatable, ({ +TRANSACTION(ttLEDGER_STATE_FIX, 53, LedgerStateFix, + Delegation::delegatable, + fixNFTokenPageLinks, + ({ {sfLedgerFixType, soeREQUIRED}, {sfOwner, soeOPTIONAL}, })) /** This transaction type creates a MPTokensIssuance instance */ -TRANSACTION(ttMPTOKEN_ISSUANCE_CREATE, 54, MPTokenIssuanceCreate, Delegation::delegatable, ({ +TRANSACTION(ttMPTOKEN_ISSUANCE_CREATE, 54, MPTokenIssuanceCreate, + Delegation::delegatable, + featureMPTokensV1, + ({ {sfAssetScale, soeOPTIONAL}, {sfTransferFee, soeOPTIONAL}, {sfMaximumAmount, soeOPTIONAL}, @@ -413,25 +551,37 @@ TRANSACTION(ttMPTOKEN_ISSUANCE_CREATE, 54, MPTokenIssuanceCreate, Delegation::de })) /** This transaction type destroys a MPTokensIssuance instance */ -TRANSACTION(ttMPTOKEN_ISSUANCE_DESTROY, 55, MPTokenIssuanceDestroy, Delegation::delegatable, ({ +TRANSACTION(ttMPTOKEN_ISSUANCE_DESTROY, 55, MPTokenIssuanceDestroy, + Delegation::delegatable, + featureMPTokensV1, + ({ {sfMPTokenIssuanceID, soeREQUIRED}, })) /** This transaction type sets flags on a MPTokensIssuance or MPToken instance */ -TRANSACTION(ttMPTOKEN_ISSUANCE_SET, 56, MPTokenIssuanceSet, Delegation::delegatable, ({ +TRANSACTION(ttMPTOKEN_ISSUANCE_SET, 56, MPTokenIssuanceSet, + Delegation::delegatable, + featureMPTokensV1, + ({ {sfMPTokenIssuanceID, soeREQUIRED}, {sfHolder, soeOPTIONAL}, {sfDomainID, soeOPTIONAL}, })) /** This transaction type authorizes a MPToken instance */ -TRANSACTION(ttMPTOKEN_AUTHORIZE, 57, MPTokenAuthorize, Delegation::delegatable, ({ +TRANSACTION(ttMPTOKEN_AUTHORIZE, 57, MPTokenAuthorize, + Delegation::delegatable, + featureMPTokensV1, + ({ {sfMPTokenIssuanceID, soeREQUIRED}, {sfHolder, soeOPTIONAL}, })) /** This transaction type create an Credential instance */ -TRANSACTION(ttCREDENTIAL_CREATE, 58, CredentialCreate, Delegation::delegatable, ({ +TRANSACTION(ttCREDENTIAL_CREATE, 58, CredentialCreate, + Delegation::delegatable, + featureCredentials, + ({ {sfSubject, soeREQUIRED}, {sfCredentialType, soeREQUIRED}, {sfExpiration, soeOPTIONAL}, @@ -439,44 +589,65 @@ TRANSACTION(ttCREDENTIAL_CREATE, 58, CredentialCreate, Delegation::delegatable, })) /** This transaction type accept an Credential object */ -TRANSACTION(ttCREDENTIAL_ACCEPT, 59, CredentialAccept, Delegation::delegatable, ({ +TRANSACTION(ttCREDENTIAL_ACCEPT, 59, CredentialAccept, + Delegation::delegatable, + featureCredentials, + ({ {sfIssuer, soeREQUIRED}, {sfCredentialType, soeREQUIRED}, })) /** This transaction type delete an Credential object */ -TRANSACTION(ttCREDENTIAL_DELETE, 60, CredentialDelete, Delegation::delegatable, ({ +TRANSACTION(ttCREDENTIAL_DELETE, 60, CredentialDelete, + Delegation::delegatable, + featureCredentials, + ({ {sfSubject, soeOPTIONAL}, {sfIssuer, soeOPTIONAL}, {sfCredentialType, soeREQUIRED}, })) /** This transaction type modify a NFToken */ -TRANSACTION(ttNFTOKEN_MODIFY, 61, NFTokenModify, Delegation::delegatable, ({ +TRANSACTION(ttNFTOKEN_MODIFY, 61, NFTokenModify, + Delegation::delegatable, + featureDynamicNFT, + ({ {sfNFTokenID, soeREQUIRED}, {sfOwner, soeOPTIONAL}, {sfURI, soeOPTIONAL}, })) /** This transaction type creates or modifies a Permissioned Domain */ -TRANSACTION(ttPERMISSIONED_DOMAIN_SET, 62, PermissionedDomainSet, Delegation::delegatable, ({ +TRANSACTION(ttPERMISSIONED_DOMAIN_SET, 62, PermissionedDomainSet, + Delegation::delegatable, + featurePermissionedDomains, + ({ {sfDomainID, soeOPTIONAL}, {sfAcceptedCredentials, soeREQUIRED}, })) /** This transaction type deletes a Permissioned Domain */ -TRANSACTION(ttPERMISSIONED_DOMAIN_DELETE, 63, PermissionedDomainDelete, Delegation::delegatable, ({ +TRANSACTION(ttPERMISSIONED_DOMAIN_DELETE, 63, PermissionedDomainDelete, + Delegation::delegatable, + featurePermissionedDomains, + ({ {sfDomainID, soeREQUIRED}, })) /** This transaction type delegates authorized account specified permissions */ -TRANSACTION(ttDELEGATE_SET, 64, DelegateSet, Delegation::notDelegatable, ({ +TRANSACTION(ttDELEGATE_SET, 64, DelegateSet, + Delegation::notDelegatable, + featurePermissionDelegation, + ({ {sfAuthorize, soeREQUIRED}, {sfPermissions, soeREQUIRED}, })) /** This transaction creates a single asset vault. */ -TRANSACTION(ttVAULT_CREATE, 65, VaultCreate, Delegation::delegatable, ({ +TRANSACTION(ttVAULT_CREATE, 65, VaultCreate, + Delegation::delegatable, + featureSingleAssetVault, + ({ {sfAsset, soeREQUIRED, soeMPTSupported}, {sfAssetsMaximum, soeOPTIONAL}, {sfMPTokenMetadata, soeOPTIONAL}, @@ -487,7 +658,10 @@ TRANSACTION(ttVAULT_CREATE, 65, VaultCreate, Delegation::delegatable, ({ })) /** This transaction updates a single asset vault. */ -TRANSACTION(ttVAULT_SET, 66, VaultSet, Delegation::delegatable, ({ +TRANSACTION(ttVAULT_SET, 66, VaultSet, + Delegation::delegatable, + featureSingleAssetVault, + ({ {sfVaultID, soeREQUIRED}, {sfAssetsMaximum, soeOPTIONAL}, {sfDomainID, soeOPTIONAL}, @@ -495,18 +669,27 @@ TRANSACTION(ttVAULT_SET, 66, VaultSet, Delegation::delegatable, ({ })) /** This transaction deletes a single asset vault. */ -TRANSACTION(ttVAULT_DELETE, 67, VaultDelete, Delegation::delegatable, ({ +TRANSACTION(ttVAULT_DELETE, 67, VaultDelete, + Delegation::delegatable, + featureSingleAssetVault, + ({ {sfVaultID, soeREQUIRED}, })) /** This transaction trades assets for shares with a vault. */ -TRANSACTION(ttVAULT_DEPOSIT, 68, VaultDeposit, Delegation::delegatable, ({ +TRANSACTION(ttVAULT_DEPOSIT, 68, VaultDeposit, + Delegation::delegatable, + featureSingleAssetVault, + ({ {sfVaultID, soeREQUIRED}, {sfAmount, soeREQUIRED, soeMPTSupported}, })) /** This transaction trades shares for assets with a vault. */ -TRANSACTION(ttVAULT_WITHDRAW, 69, VaultWithdraw, Delegation::delegatable, ({ +TRANSACTION(ttVAULT_WITHDRAW, 69, VaultWithdraw, + Delegation::delegatable, + featureSingleAssetVault, + ({ {sfVaultID, soeREQUIRED}, {sfAmount, soeREQUIRED, soeMPTSupported}, {sfDestination, soeOPTIONAL}, @@ -514,14 +697,20 @@ TRANSACTION(ttVAULT_WITHDRAW, 69, VaultWithdraw, Delegation::delegatable, ({ })) /** This transaction claws back tokens from a vault. */ -TRANSACTION(ttVAULT_CLAWBACK, 70, VaultClawback, Delegation::delegatable, ({ +TRANSACTION(ttVAULT_CLAWBACK, 70, VaultClawback, + Delegation::delegatable, + featureSingleAssetVault, + ({ {sfVaultID, soeREQUIRED}, {sfHolder, soeREQUIRED}, {sfAmount, soeOPTIONAL, soeMPTSupported}, })) /** This transaction type batches together transactions. */ -TRANSACTION(ttBATCH, 71, Batch, Delegation::notDelegatable, ({ +TRANSACTION(ttBATCH, 71, Batch, + Delegation::notDelegatable, + featureBatch, + ({ {sfRawTransactions, soeREQUIRED}, {sfBatchSigners, soeOPTIONAL}, })) @@ -530,7 +719,10 @@ TRANSACTION(ttBATCH, 71, Batch, Delegation::notDelegatable, ({ For details, see: https://xrpl.org/amendments.html */ -TRANSACTION(ttAMENDMENT, 100, EnableAmendment, Delegation::notDelegatable, ({ +TRANSACTION(ttAMENDMENT, 100, EnableAmendment, + Delegation::notDelegatable, + uint256{}, + ({ {sfLedgerSequence, soeREQUIRED}, {sfAmendment, soeREQUIRED}, })) @@ -538,7 +730,10 @@ TRANSACTION(ttAMENDMENT, 100, EnableAmendment, Delegation::notDelegatable, ({ /** This system-generated transaction type is used to update the network's fee settings. For details, see: https://xrpl.org/fee-voting.html */ -TRANSACTION(ttFEE, 101, SetFee, Delegation::notDelegatable, ({ +TRANSACTION(ttFEE, 101, SetFee, + Delegation::notDelegatable, + uint256{}, + ({ {sfLedgerSequence, soeOPTIONAL}, // Old version uses raw numbers {sfBaseFee, soeOPTIONAL}, @@ -555,7 +750,10 @@ TRANSACTION(ttFEE, 101, SetFee, Delegation::notDelegatable, ({ For details, see: https://xrpl.org/negative-unl.html */ -TRANSACTION(ttUNL_MODIFY, 102, UNLModify, Delegation::notDelegatable, ({ +TRANSACTION(ttUNL_MODIFY, 102, UNLModify, + Delegation::notDelegatable, + uint256{}, + ({ {sfUNLModifyDisabling, soeREQUIRED}, {sfLedgerSequence, soeREQUIRED}, {sfUNLModifyValidator, soeREQUIRED}, diff --git a/include/xrpl/protocol/jss.h b/include/xrpl/protocol/jss.h index 68d2497aca..d847cf6012 100644 --- a/include/xrpl/protocol/jss.h +++ b/include/xrpl/protocol/jss.h @@ -710,7 +710,7 @@ JSS(write_load); // out: GetCounts #pragma push_macro("TRANSACTION") #undef TRANSACTION -#define TRANSACTION(tag, value, name, delegatable, fields) JSS(name); +#define TRANSACTION(tag, value, name, ...) JSS(name); #include diff --git a/src/libxrpl/protocol/Permissions.cpp b/src/libxrpl/protocol/Permissions.cpp index ca8cb26f36..781799f128 100644 --- a/src/libxrpl/protocol/Permissions.cpp +++ b/src/libxrpl/protocol/Permissions.cpp @@ -18,6 +18,7 @@ //============================================================================== #include +#include #include #include @@ -25,11 +26,24 @@ namespace ripple { Permission::Permission() { + txFeatureMap_ = { +#pragma push_macro("TRANSACTION") +#undef TRANSACTION + +#define TRANSACTION(tag, value, name, delegatable, amendment, ...) \ + {value, amendment}, + +#include + +#undef TRANSACTION +#pragma pop_macro("TRANSACTION") + }; + delegatableTx_ = { #pragma push_macro("TRANSACTION") #undef TRANSACTION -#define TRANSACTION(tag, value, name, delegatable, fields) {value, delegatable}, +#define TRANSACTION(tag, value, name, delegatable, ...) {value, delegatable}, #include @@ -118,7 +132,9 @@ Permission::getGranularTxType(GranularPermissionType const& gpType) const } bool -Permission::isDelegatable(std::uint32_t const& permissionValue) const +Permission::isDelegatable( + std::uint32_t const& permissionValue, + Rules const& rules) const { auto const granularPermission = getGranularName(static_cast(permissionValue)); @@ -126,7 +142,27 @@ Permission::isDelegatable(std::uint32_t const& permissionValue) const // granular permissions are always allowed to be delegated return true; - auto const it = delegatableTx_.find(permissionValue - 1); + auto const txType = permissionToTxType(permissionValue); + auto const it = delegatableTx_.find(txType); + + if (rules.enabled(fixDelegateV1_1)) + { + if (it == delegatableTx_.end()) + return false; + + auto const txFeaturesIt = txFeatureMap_.find(txType); + XRPL_ASSERT( + txFeaturesIt != txFeatureMap_.end(), + "ripple::Permissions::isDelegatable : tx exists in txFeatureMap_"); + + // fixDelegateV1_1: Delegation is only allowed if the required amendment + // for the transaction is enabled. For transactions that do not require + // an amendment, delegation is always allowed. + if (txFeaturesIt->second != uint256{} && + !rules.enabled(txFeaturesIt->second)) + return false; + } + if (it != delegatableTx_.end() && it->second == Delegation::notDelegatable) return false; diff --git a/src/libxrpl/protocol/TxFormats.cpp b/src/libxrpl/protocol/TxFormats.cpp index 5edffeb666..c10c023ee9 100644 --- a/src/libxrpl/protocol/TxFormats.cpp +++ b/src/libxrpl/protocol/TxFormats.cpp @@ -55,7 +55,7 @@ TxFormats::TxFormats() #undef TRANSACTION #define UNWRAP(...) __VA_ARGS__ -#define TRANSACTION(tag, value, name, delegatable, fields) \ +#define TRANSACTION(tag, value, name, delegatable, amendment, fields) \ add(jss::name, tag, UNWRAP fields, commonFields); #include diff --git a/src/test/app/AMMClawback_test.cpp b/src/test/app/AMMClawback_test.cpp index 9564911664..707113fe32 100644 --- a/src/test/app/AMMClawback_test.cpp +++ b/src/test/app/AMMClawback_test.cpp @@ -2442,8 +2442,7 @@ class AMMClawback_test : public beast::unit_test::suite void run() override { - FeatureBitset const all{ - jtx::testable_amendments() | fixAMMClawbackRounding}; + FeatureBitset const all = jtx::testable_amendments(); testInvalidRequest(); testFeatureDisabled(all - featureAMMClawback); diff --git a/src/test/app/Delegate_test.cpp b/src/test/app/Delegate_test.cpp index 44cb6a54b6..ea5e073a55 100644 --- a/src/test/app/Delegate_test.cpp +++ b/src/test/app/Delegate_test.cpp @@ -16,6 +16,7 @@ //============================================================================== #include +#include #include #include @@ -139,12 +140,12 @@ class Delegate_test : public beast::unit_test::suite } void - testInvalidRequest() + testInvalidRequest(FeatureBitset features) { testcase("test invalid DelegateSet"); using namespace jtx; - Env env(*this); + Env env(*this, features); Account gw{"gateway"}; Account alice{"alice"}; Account bob{"bob"}; @@ -216,22 +217,17 @@ class Delegate_test : public beast::unit_test::suite } // non-delegatable transaction + auto const res = features[fixDelegateV1_1] ? ter(temMALFORMED) + : ter(tecNO_PERMISSION); { - env(delegate::set(gw, alice, {"SetRegularKey"}), - ter(tecNO_PERMISSION)); - env(delegate::set(gw, alice, {"AccountSet"}), - ter(tecNO_PERMISSION)); - env(delegate::set(gw, alice, {"SignerListSet"}), - ter(tecNO_PERMISSION)); - env(delegate::set(gw, alice, {"DelegateSet"}), - ter(tecNO_PERMISSION)); - env(delegate::set(gw, alice, {"SetRegularKey"}), - ter(tecNO_PERMISSION)); - env(delegate::set(gw, alice, {"EnableAmendment"}), - ter(tecNO_PERMISSION)); - env(delegate::set(gw, alice, {"UNLModify"}), ter(tecNO_PERMISSION)); - env(delegate::set(gw, alice, {"SetFee"}), ter(tecNO_PERMISSION)); - env(delegate::set(gw, alice, {"Batch"}), ter(tecNO_PERMISSION)); + env(delegate::set(gw, alice, {"SetRegularKey"}), res); + env(delegate::set(gw, alice, {"AccountSet"}), res); + env(delegate::set(gw, alice, {"SignerListSet"}), res); + env(delegate::set(gw, alice, {"DelegateSet"}), res); + env(delegate::set(gw, alice, {"EnableAmendment"}), res); + env(delegate::set(gw, alice, {"UNLModify"}), res); + env(delegate::set(gw, alice, {"SetFee"}), res); + env(delegate::set(gw, alice, {"Batch"}), res); } } @@ -536,7 +532,7 @@ class Delegate_test : public beast::unit_test::suite } void - testPaymentGranular() + testPaymentGranular(FeatureBitset features) { testcase("test payment granular"); using namespace jtx; @@ -706,6 +702,158 @@ class Delegate_test : public beast::unit_test::suite env.require(balance(alice, USD(50))); BEAST_EXPECT(env.balance(bob, USD) == USD(0)); } + + // disallow cross currency payment with only PaymentBurn/PaymentMint + // permission + { + Env env(*this, features); + Account const alice{"alice"}; + Account const bob{"bob"}; + Account const gw{"gateway"}; + Account const carol{"carol"}; + auto const USD = gw["USD"]; + + env.fund(XRP(10000), alice, bob, carol, gw); + env.close(); + env.trust(USD(50000), alice); + env.trust(USD(50000), bob); + env.trust(USD(50000), carol); + env(pay(gw, alice, USD(10000))); + env(pay(gw, bob, USD(10000))); + env(pay(gw, carol, USD(10000))); + env.close(); + + auto const result = features[fixDelegateV1_1] + ? static_cast(tecNO_DELEGATE_PERMISSION) + : static_cast(tesSUCCESS); + auto const offerCount = features[fixDelegateV1_1] ? 1 : 0; + + // PaymentMint + { + env(offer(carol, XRP(100), USD(501))); + BEAST_EXPECT(expectOffers(env, carol, 1)); + env(delegate::set(gw, bob, {"PaymentMint"})); + env.close(); + + // post-amendment: fixDelegateV1_1 + // bob can not send cross currency payment on behalf of the gw, + // even with PaymentMint permission and gw being the issuer. + env(pay(gw, alice, USD(5000)), + path(~USD), + sendmax(XRP(1001)), + txflags(tfPartialPayment), + delegate::as(bob), + ter(result)); + BEAST_EXPECT(expectOffers(env, carol, offerCount)); + + // succeed with direct payment + env(pay(gw, alice, USD(100)), delegate::as(bob)); + env.close(); + } + + // PaymentBurn + { + env(offer(bob, XRP(100), USD(501))); + BEAST_EXPECT(expectOffers(env, bob, 1)); + env(delegate::set(alice, bob, {"PaymentBurn"})); + env.close(); + + // post-amendment: fixDelegateV1_1 + // bob can not send cross currency payment on behalf of alice, + // even with PaymentBurn permission and gw being the issuer. + env(pay(alice, gw, USD(5000)), + path(~USD), + sendmax(XRP(1001)), + txflags(tfPartialPayment), + delegate::as(bob), + ter(result)); + BEAST_EXPECT(expectOffers(env, bob, offerCount)); + + // succeed with direct payment + env(pay(alice, gw, USD(100)), delegate::as(bob)); + env.close(); + } + } + + // PaymentMint and PaymentBurn for MPT + { + std::string logs; + Env env(*this, features, std::make_unique(&logs)); + Account const alice{"alice"}; + Account const bob{"bob"}; + Account const gw{"gateway"}; + + MPTTester mpt(env, gw, {.holders = {alice, bob}}); + mpt.create( + {.ownerCount = 1, .holderCount = 0, .flags = tfMPTCanTransfer}); + + mpt.authorize({.account = alice}); + mpt.authorize({.account = bob}); + + auto const MPT = mpt["MPT"]; + env(pay(gw, alice, MPT(500))); + env(pay(gw, bob, MPT(500))); + env.close(); + auto aliceMPT = env.balance(alice, MPT); + auto bobMPT = env.balance(bob, MPT); + + // PaymentMint + { + env(delegate::set(gw, bob, {"PaymentMint"})); + env.close(); + + if (!features[fixDelegateV1_1]) + { + // pre-amendment: PaymentMint is not supported for MPT + env(pay(gw, alice, MPT(50)), + delegate::as(bob), + ter(tefEXCEPTION)); + } + else + { + env(pay(gw, alice, MPT(50)), delegate::as(bob)); + BEAST_EXPECT(env.balance(alice, MPT) == aliceMPT + MPT(50)); + BEAST_EXPECT(env.balance(bob, MPT) == bobMPT); + aliceMPT = env.balance(alice, MPT); + } + } + + // PaymentBurn + { + env(delegate::set(alice, bob, {"PaymentBurn"})); + env.close(); + + if (!features[fixDelegateV1_1]) + { + // pre-amendment: PaymentBurn is not supported for MPT + env(pay(alice, gw, MPT(50)), + delegate::as(bob), + ter(tefEXCEPTION)); + } + else + { + env(pay(alice, gw, MPT(50)), delegate::as(bob)); + BEAST_EXPECT(env.balance(alice, MPT) == aliceMPT - MPT(50)); + BEAST_EXPECT(env.balance(bob, MPT) == bobMPT); + aliceMPT = env.balance(alice, MPT); + } + } + + // Payment transaction for MPT is allowed for both pre and post + // amendment + { + env(delegate::set( + alice, bob, {"PaymentBurn", "PaymentMint", "Payment"})); + env.close(); + env(pay(alice, gw, MPT(50)), delegate::as(bob)); + BEAST_EXPECT(env.balance(alice, MPT) == aliceMPT - MPT(50)); + BEAST_EXPECT(env.balance(bob, MPT) == bobMPT); + aliceMPT = env.balance(alice, MPT); + env(pay(alice, bob, MPT(100)), delegate::as(bob)); + BEAST_EXPECT(env.balance(alice, MPT) == aliceMPT - MPT(100)); + BEAST_EXPECT(env.balance(bob, MPT) == bobMPT + MPT(100)); + } + } } void @@ -1476,18 +1624,216 @@ class Delegate_test : public beast::unit_test::suite BEAST_EXPECT(env.balance(edward) == edwardBalance); } + void + testPermissionValue(FeatureBitset features) + { + testcase("test permission value"); + using namespace jtx; + + Env env(*this, features); + + Account alice{"alice"}; + Account bob{"bob"}; + env.fund(XRP(100000), alice, bob); + env.close(); + + auto buildRequest = [&](auto value) -> Json::Value { + Json::Value jv; + jv[jss::TransactionType] = jss::DelegateSet; + jv[jss::Account] = alice.human(); + jv[sfAuthorize.jsonName] = bob.human(); + + Json::Value permissionsJson(Json::arrayValue); + Json::Value permissionValue; + permissionValue[sfPermissionValue.jsonName] = value; + Json::Value permissionObj; + permissionObj[sfPermission.jsonName] = permissionValue; + permissionsJson.append(permissionObj); + jv[sfPermissions.jsonName] = permissionsJson; + + return jv; + }; + + // invalid permission value. + // neither granular permission nor transaction level permission + for (auto value : {0, 100000, 54321}) + { + auto jv = buildRequest(value); + if (!features[fixDelegateV1_1]) + env(jv); + else + env(jv, ter(temMALFORMED)); + } + } + + void + testTxReqireFeatures(FeatureBitset features) + { + testcase("test delegate disabled tx"); + using namespace jtx; + + // map of tx and required feature. + // non-delegatable tx are not included. + // NFTokenMint, NFTokenBurn, NFTokenCreateOffer, NFTokenCancelOffer, + // NFTokenAcceptOffer are not included, they are tested separately. + std::unordered_map txRequiredFeatures{ + {"TicketCreate", featureTicketBatch}, + {"CheckCreate", featureChecks}, + {"CheckCash", featureChecks}, + {"CheckCancel", featureChecks}, + {"DepositPreauth", featureDepositPreauth}, + {"Clawback", featureClawback}, + {"AMMClawback", featureAMMClawback}, + {"AMMCreate", featureAMM}, + {"AMMDeposit", featureAMM}, + {"AMMWithdraw", featureAMM}, + {"AMMVote", featureAMM}, + {"AMMBid", featureAMM}, + {"AMMDelete", featureAMM}, + {"XChainCreateClaimID", featureXChainBridge}, + {"XChainCommit", featureXChainBridge}, + {"XChainClaim", featureXChainBridge}, + {"XChainAccountCreateCommit", featureXChainBridge}, + {"XChainAddClaimAttestation", featureXChainBridge}, + {"XChainAddAccountCreateAttestation", featureXChainBridge}, + {"XChainModifyBridge", featureXChainBridge}, + {"XChainCreateBridge", featureXChainBridge}, + {"DIDSet", featureDID}, + {"DIDDelete", featureDID}, + {"OracleSet", featurePriceOracle}, + {"OracleDelete", featurePriceOracle}, + {"LedgerStateFix", fixNFTokenPageLinks}, + {"MPTokenIssuanceCreate", featureMPTokensV1}, + {"MPTokenIssuanceDestroy", featureMPTokensV1}, + {"MPTokenIssuanceSet", featureMPTokensV1}, + {"MPTokenAuthorize", featureMPTokensV1}, + {"CredentialCreate", featureCredentials}, + {"CredentialAccept", featureCredentials}, + {"CredentialDelete", featureCredentials}, + {"NFTokenModify", featureDynamicNFT}, + {"PermissionedDomainSet", featurePermissionedDomains}, + {"PermissionedDomainDelete", featurePermissionedDomains}, + {"VaultCreate", featureSingleAssetVault}, + {"VaultSet", featureSingleAssetVault}, + {"VaultDelete", featureSingleAssetVault}, + {"VaultDeposit", featureSingleAssetVault}, + {"VaultWithdraw", featureSingleAssetVault}, + {"VaultClawback", featureSingleAssetVault}}; + + // fixDelegateV1_1 post-amendment: can not delegate tx if any + // required feature disabled. + { + auto txAmendmentDisabled = [&](FeatureBitset features, + std::string const& tx) { + BEAST_EXPECT(txRequiredFeatures.contains(tx)); + + Env env(*this, features - txRequiredFeatures[tx]); + + Account const alice{"alice"}; + Account const bob{"bob"}; + env.fund(XRP(100000), alice, bob); + env.close(); + + if (!features[fixDelegateV1_1]) + env(delegate::set(alice, bob, {tx})); + else + env(delegate::set(alice, bob, {tx}), ter(temMALFORMED)); + }; + + for (auto const& tx : txRequiredFeatures) + txAmendmentDisabled(features, tx.first); + } + + // if all the required features in txRequiredFeatures are enabled, will + // succeed + { + auto txAmendmentEnabled = [&](std::string const& tx) { + Env env(*this, features); + + Account const alice{"alice"}; + Account const bob{"bob"}; + env.fund(XRP(100000), alice, bob); + env.close(); + + env(delegate::set(alice, bob, {tx})); + }; + + for (auto const& tx : txRequiredFeatures) + txAmendmentEnabled(tx.first); + } + + // NFTokenMint, NFTokenBurn, NFTokenCreateOffer, NFTokenCancelOffer, and + // NFTokenAcceptOffer are tested separately. Since + // featureNonFungibleTokensV1_1 includes the functionality of + // featureNonFungibleTokensV1, fixNFTokenNegOffer, and fixNFTokenDirV1, + // both featureNonFungibleTokensV1_1 and featureNonFungibleTokensV1 need + // to be disabled to block these transactions from being delegated. + { + Env env( + *this, + features - featureNonFungibleTokensV1 - + featureNonFungibleTokensV1_1); + + Account const alice{"alice"}; + Account const bob{"bob"}; + env.fund(XRP(100000), alice, bob); + env.close(); + + for (auto const tx : + {"NFTokenMint", + "NFTokenBurn", + "NFTokenCreateOffer", + "NFTokenCancelOffer", + "NFTokenAcceptOffer"}) + { + if (!features[fixDelegateV1_1]) + env(delegate::set(alice, bob, {tx})); + else + env(delegate::set(alice, bob, {tx}), ter(temMALFORMED)); + } + } + + // NFTokenMint, NFTokenBurn, NFTokenCreateOffer, NFTokenCancelOffer, and + // NFTokenAcceptOffer are allowed to be delegated if either + // featureNonFungibleTokensV1 or featureNonFungibleTokensV1_1 is + // enabled. + { + for (auto const feature : + {featureNonFungibleTokensV1, featureNonFungibleTokensV1_1}) + { + Env env(*this, features - feature); + Account const alice{"alice"}; + Account const bob{"bob"}; + env.fund(XRP(100000), alice, bob); + env.close(); + + for (auto const tx : + {"NFTokenMint", + "NFTokenBurn", + "NFTokenCreateOffer", + "NFTokenCancelOffer", + "NFTokenAcceptOffer"}) + env(delegate::set(alice, bob, {tx})); + } + } + } + void run() override { + FeatureBitset const all = jtx::testable_amendments(); + testFeatureDisabled(); testDelegateSet(); - testInvalidRequest(); + testInvalidRequest(all); + testInvalidRequest(all - fixDelegateV1_1); testReserve(); testFee(); testSequence(); testAccountDelete(); testDelegateTransaction(); - testPaymentGranular(); + testPaymentGranular(all); + testPaymentGranular(all - fixDelegateV1_1); testTrustSetGranular(); testAccountSetGranular(); testMPTokenIssuanceSetGranular(); @@ -1495,6 +1841,10 @@ class Delegate_test : public beast::unit_test::suite testSingleSignBadSecret(); testMultiSign(); testMultiSignQuorumNotMet(); + testPermissionValue(all); + testPermissionValue(all - fixDelegateV1_1); + testTxReqireFeatures(all); + testTxReqireFeatures(all - fixDelegateV1_1); } }; BEAST_DEFINE_TESTSUITE(Delegate, app, ripple); diff --git a/src/xrpld/app/tx/detail/DelegateSet.cpp b/src/xrpld/app/tx/detail/DelegateSet.cpp index 708cdf0dc2..ddeb01b399 100644 --- a/src/xrpld/app/tx/detail/DelegateSet.cpp +++ b/src/xrpld/app/tx/detail/DelegateSet.cpp @@ -23,7 +23,6 @@ #include #include #include -#include #include namespace ripple { @@ -51,6 +50,11 @@ DelegateSet::preflight(PreflightContext const& ctx) { if (!permissionSet.insert(permission[sfPermissionValue]).second) return temMALFORMED; + + if (ctx.rules.enabled(fixDelegateV1_1) && + !Permission::getInstance().isDelegatable( + permission[sfPermissionValue], ctx.rules)) + return temMALFORMED; } return preflight2(ctx); @@ -68,9 +72,21 @@ DelegateSet::preclaim(PreclaimContext const& ctx) auto const& permissions = ctx.tx.getFieldArray(sfPermissions); for (auto const& permission : permissions) { - auto const permissionValue = permission[sfPermissionValue]; - if (!Permission::getInstance().isDelegatable(permissionValue)) + if (!ctx.view.rules().enabled(fixDelegateV1_1) && + !Permission::getInstance().isDelegatable( + permission[sfPermissionValue], ctx.view.rules())) + { + // Before fixDelegateV1_1: + // - The check was performed during preclaim. + // - Transactions from amendments not yet enabled could still be + // delegated. + // + // After fixDelegateV1_1: + // - The check is performed during preflight. + // - Transactions from amendments not yet enabled can no longer be + // delegated. return tecNO_PERMISSION; + } } return tesSUCCESS; diff --git a/src/xrpld/app/tx/detail/Payment.cpp b/src/xrpld/app/tx/detail/Payment.cpp index 386b170ed1..784330b203 100644 --- a/src/xrpld/app/tx/detail/Payment.cpp +++ b/src/xrpld/app/tx/detail/Payment.cpp @@ -265,8 +265,33 @@ Payment::checkPermission(ReadView const& view, STTx const& tx) loadGranularPermission(sle, ttPAYMENT, granularPermissions); auto const& dstAmount = tx.getFieldAmount(sfAmount); - auto const& amountIssue = dstAmount.issue(); + // post-amendment: disallow cross currency payments for PaymentMint and + // PaymentBurn + if (view.rules().enabled(fixDelegateV1_1)) + { + auto const& amountAsset = dstAmount.asset(); + if (tx.isFieldPresent(sfSendMax) && + tx[sfSendMax].asset() != amountAsset) + return tecNO_DELEGATE_PERMISSION; + if (granularPermissions.contains(PaymentMint) && !isXRP(amountAsset) && + amountAsset.getIssuer() == tx[sfAccount]) + return tesSUCCESS; + + if (granularPermissions.contains(PaymentBurn) && !isXRP(amountAsset) && + amountAsset.getIssuer() == tx[sfDestination]) + return tesSUCCESS; + + return tecNO_DELEGATE_PERMISSION; + } + + // Calling dstAmount.issue() in the next line would throw if it holds MPT. + // That exception would be caught in preclaim and returned as tefEXCEPTION. + // This check is just a cleaner, more explicit way to get the same result. + if (dstAmount.holds()) + return tefEXCEPTION; + + auto const& amountIssue = dstAmount.issue(); if (granularPermissions.contains(PaymentMint) && !isXRP(amountIssue) && amountIssue.account == tx[sfAccount]) return tesSUCCESS; diff --git a/src/xrpld/app/tx/detail/applySteps.cpp b/src/xrpld/app/tx/detail/applySteps.cpp index 34259ebef0..03ef7244f8 100644 --- a/src/xrpld/app/tx/detail/applySteps.cpp +++ b/src/xrpld/app/tx/detail/applySteps.cpp @@ -97,8 +97,8 @@ with_txn_type(TxType txnType, F&& f) #pragma push_macro("TRANSACTION") #undef TRANSACTION -#define TRANSACTION(tag, value, name, delegatable, fields) \ - case tag: \ +#define TRANSACTION(tag, value, name, ...) \ + case tag: \ return f.template operator()(); #include From fbd60fc0007d95caa3fbbfcfcbbfc097c25ed502 Mon Sep 17 00:00:00 2001 From: Ayaz Salikhov Date: Thu, 11 Sep 2025 13:58:11 +0100 Subject: [PATCH 174/244] ci: Use pre-commit reusable workflow (#5772) --- .github/workflows/check-format.yml | 44 ------------------------------ .github/workflows/on-pr.yml | 9 ------ .github/workflows/pre-commit.yml | 14 ++++++++++ 3 files changed, 14 insertions(+), 53 deletions(-) delete mode 100644 .github/workflows/check-format.yml create mode 100644 .github/workflows/pre-commit.yml diff --git a/.github/workflows/check-format.yml b/.github/workflows/check-format.yml deleted file mode 100644 index c63589017d..0000000000 --- a/.github/workflows/check-format.yml +++ /dev/null @@ -1,44 +0,0 @@ -# This workflow checks if the code is properly formatted. -name: Check format - -# This workflow can only be triggered by other workflows. -on: workflow_call - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }}-format - cancel-in-progress: true - -defaults: - run: - shell: bash - -jobs: - pre-commit: - runs-on: ubuntu-latest - container: ghcr.io/xrplf/ci/tools-rippled-pre-commit - steps: - - name: Checkout repository - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - - name: Prepare runner - uses: XRPLF/actions/.github/actions/prepare-runner@638e0dc11ea230f91bd26622fb542116bb5254d5 - - name: Format code - run: pre-commit run --show-diff-on-failure --color=always --all-files - - name: Check for differences - env: - MESSAGE: | - One or more files did not conform to the formatting. Maybe you did - not run 'pre-commit' before committing, or your version of - 'clang-format' or 'prettier' has an incompatibility with the ones - used here (see the "Check configuration" step above). - - Run 'pre-commit run --all-files' in your repo, and then commit and - push the changes. - run: | - DIFF=$(git status --porcelain) - if [ -n "${DIFF}" ]; then - # Print the files that changed to give the contributor a hint about - # what to expect when running pre-commit on their own machine. - git status - echo "${MESSAGE}" - exit 1 - fi diff --git a/.github/workflows/on-pr.yml b/.github/workflows/on-pr.yml index c480cc5476..24f27d5162 100644 --- a/.github/workflows/on-pr.yml +++ b/.github/workflows/on-pr.yml @@ -50,12 +50,9 @@ jobs: files: | # These paths are unique to `on-pr.yml`. .github/scripts/levelization/** - .github/workflows/check-format.yml .github/workflows/check-levelization.yml .github/workflows/notify-clio.yml .github/workflows/on-pr.yml - .clang-format - .pre-commit-config.yaml # Keep the paths below in sync with those in `on-trigger.yml`. .github/actions/build-deps/** @@ -93,11 +90,6 @@ jobs: outputs: go: ${{ steps.go.outputs.go == 'true' }} - check-format: - needs: should-run - if: needs.should-run.outputs.go == 'true' - uses: ./.github/workflows/check-format.yml - check-levelization: needs: should-run if: needs.should-run.outputs.go == 'true' @@ -130,7 +122,6 @@ jobs: if: failure() || cancelled() needs: - build-test - - check-format - check-levelization runs-on: ubuntu-latest steps: diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml new file mode 100644 index 0000000000..ead137308d --- /dev/null +++ b/.github/workflows/pre-commit.yml @@ -0,0 +1,14 @@ +name: Run pre-commit hooks + +on: + pull_request: + push: + branches: [develop, release, master] + workflow_dispatch: + +jobs: + run-hooks: + uses: XRPLF/actions/.github/workflows/pre-commit.yml@af1b0f0d764cda2e5435f5ac97b240d4bd4d95d3 + with: + runs_on: ubuntu-latest + container: '{ "image": "ghcr.io/xrplf/ci/tools-rippled-pre-commit" }' From e6f8bc720fdbbe63ee907f5ac429aa9cfc7e56c9 Mon Sep 17 00:00:00 2001 From: tequ Date: Thu, 11 Sep 2025 23:17:06 +0900 Subject: [PATCH 175/244] Add additional metadata to simulate response (#5754) --- src/test/rpc/Simulate_test.cpp | 104 ++++++++++++++++++++++++++++ src/xrpld/rpc/handlers/Simulate.cpp | 14 ++++ 2 files changed, 118 insertions(+) diff --git a/src/test/rpc/Simulate_test.cpp b/src/test/rpc/Simulate_test.cpp index 5b3c0d2372..0a36a8a841 100644 --- a/src/test/rpc/Simulate_test.cpp +++ b/src/test/rpc/Simulate_test.cpp @@ -131,6 +131,32 @@ class Simulate_test : public beast::unit_test::suite std::to_string(env.current()->txCount())); } + void + testTxJsonMetadataField( + jtx::Env& env, + Json::Value const& tx, + std::function const& validate, + Json::Value const& expectedMetadataKey, + bool testSerialized = true) + { + env.close(); + + Json::Value params; + params[jss::tx_json] = tx; + validate( + env.rpc("json", "simulate", to_string(params)), + tx, + expectedMetadataKey); + validate(env.rpc("simulate", to_string(tx)), tx, expectedMetadataKey); + + BEAST_EXPECTS( + env.current()->txCount() == 0, + std::to_string(env.current()->txCount())); + } + Json::Value getJsonMetadata(Json::Value txResult) const { @@ -1186,6 +1212,83 @@ class Simulate_test : public beast::unit_test::suite } } + void + testSuccessfulTransactionAdditionalMetadata() + { + testcase("Successful transaction with additional metadata"); + + using namespace jtx; + Env env{*this, envconfig([&](std::unique_ptr cfg) { + cfg->NETWORK_ID = 1025; + return cfg; + })}; + + Account const alice("alice"); + + env.fund(XRP(10000), alice); + env.close(); + + { + auto validateOutput = [&](Json::Value const& resp, + Json::Value const& tx, + Json::Value const& expectedMetadataKey) { + auto result = resp[jss::result]; + + BEAST_EXPECT(result[jss::engine_result] == "tesSUCCESS"); + BEAST_EXPECT(result[jss::engine_result_code] == 0); + BEAST_EXPECT( + result[jss::engine_result_message] == + "The simulated transaction would have been applied."); + + if (BEAST_EXPECT( + result.isMember(jss::meta) || + result.isMember(jss::meta_blob))) + { + Json::Value const metadata = getJsonMetadata(result); + + BEAST_EXPECT(metadata[sfTransactionIndex.jsonName] == 0); + BEAST_EXPECT( + metadata[sfTransactionResult.jsonName] == "tesSUCCESS"); + BEAST_EXPECT( + metadata.isMember(expectedMetadataKey.asString())); + } + }; + + { + Json::Value tx; + tx[jss::Account] = env.master.human(); + tx[jss::TransactionType] = jss::Payment; + tx[sfDestination] = alice.human(); + tx[sfAmount] = "100"; + + // test delivered amount + testTxJsonMetadataField( + env, tx, validateOutput, jss::delivered_amount); + } + + { + Json::Value tx; + tx[jss::Account] = env.master.human(); + tx[jss::TransactionType] = jss::NFTokenMint; + tx[sfNFTokenTaxon] = 1; + + // test nft synthetic + testTxJsonMetadataField( + env, tx, validateOutput, jss::nftoken_id); + } + + { + Json::Value tx; + tx[jss::Account] = env.master.human(); + tx[jss::TransactionType] = jss::MPTokenIssuanceCreate; + + // test mpt issuance id + testTxJsonMetadataField( + env, tx, validateOutput, jss::mpt_issuance_id); + } + } + } + public: void run() override @@ -1202,6 +1305,7 @@ public: testMultisignedBadPubKey(); testDeleteExpiredCredentials(); testSuccessfulTransactionNetworkID(); + testSuccessfulTransactionAdditionalMetadata(); } }; diff --git a/src/xrpld/rpc/handlers/Simulate.cpp b/src/xrpld/rpc/handlers/Simulate.cpp index 3c175883c5..092b0b4562 100644 --- a/src/xrpld/rpc/handlers/Simulate.cpp +++ b/src/xrpld/rpc/handlers/Simulate.cpp @@ -24,10 +24,13 @@ #include #include #include +#include #include +#include #include #include +#include #include #include #include @@ -272,6 +275,17 @@ simulateTxn(RPC::JsonContext& context, std::shared_ptr transaction) else { jvResult[jss::meta] = result.metadata->getJson(JsonOptions::none); + RPC::insertDeliveredAmount( + jvResult[jss::meta], + view, + transaction->getSTransaction(), + *result.metadata); + RPC::insertNFTSyntheticInJson( + jvResult, transaction->getSTransaction(), *result.metadata); + RPC::insertMPTokenIssuanceID( + jvResult[jss::meta], + transaction->getSTransaction(), + *result.metadata); } } From 6fe0599cc26db01e4912b133a70b9d252281a568 Mon Sep 17 00:00:00 2001 From: Mayukha Vadari Date: Thu, 11 Sep 2025 10:49:26 -0400 Subject: [PATCH 176/244] refactor: clean up `CTID.h` (#5681) --- src/xrpld/rpc/CTID.h | 85 +++++++++++++++++++++++++++++++++----------- 1 file changed, 64 insertions(+), 21 deletions(-) diff --git a/src/xrpld/rpc/CTID.h b/src/xrpld/rpc/CTID.h index be531c536a..0e2b7e0d65 100644 --- a/src/xrpld/rpc/CTID.h +++ b/src/xrpld/rpc/CTID.h @@ -39,53 +39,96 @@ namespace RPC { // The Concise Transaction ID provides a way to identify a transaction // that includes which network the transaction was submitted to. +/** + * @brief Encodes ledger sequence, transaction index, and network ID into a CTID + * string. + * + * @param ledgerSeq Ledger sequence number (max 0x0FFF'FFFF). + * @param txnIndex Transaction index within the ledger (max 0xFFFF). + * @param networkID Network identifier (max 0xFFFF). + * @return Optional CTID string in uppercase hexadecimal, or std::nullopt if + * inputs are out of range. + */ inline std::optional encodeCTID(uint32_t ledgerSeq, uint32_t txnIndex, uint32_t networkID) noexcept { - if (ledgerSeq > 0x0FFF'FFFF || txnIndex > 0xFFFF || networkID > 0xFFFF) - return {}; + constexpr uint32_t maxLedgerSeq = 0x0FFF'FFFF; + constexpr uint32_t maxTxnIndex = 0xFFFF; + constexpr uint32_t maxNetworkID = 0xFFFF; + + if (ledgerSeq > maxLedgerSeq || txnIndex > maxTxnIndex || + networkID > maxNetworkID) + return std::nullopt; uint64_t ctidValue = - ((0xC000'0000ULL + static_cast(ledgerSeq)) << 32) + - (static_cast(txnIndex) << 16) + networkID; + ((0xC000'0000ULL + static_cast(ledgerSeq)) << 32) | + ((static_cast(txnIndex) << 16) | networkID); std::stringstream buffer; buffer << std::hex << std::uppercase << std::setfill('0') << std::setw(16) << ctidValue; - return {buffer.str()}; + return buffer.str(); } +/** + * @brief Decodes a CTID string or integer into its component parts. + * + * @tparam T Type of the CTID input (string, string_view, char*, integral). + * @param ctid CTID value to decode. + * @return Optional tuple of (ledgerSeq, txnIndex, networkID), or std::nullopt + * if invalid. + */ template inline std::optional> decodeCTID(T const ctid) noexcept { - uint64_t ctidValue{0}; + uint64_t ctidValue = 0; + if constexpr ( - std::is_same_v || std::is_same_v || - std::is_same_v || std::is_same_v) + std::is_same_v || std::is_same_v || + std::is_same_v || std::is_same_v) { std::string const ctidString(ctid); - if (ctidString.length() != 16) - return {}; + if (ctidString.size() != 16) + return std::nullopt; - if (!boost::regex_match(ctidString, boost::regex("^[0-9A-Fa-f]+$"))) - return {}; + static boost::regex const hexRegex("^[0-9A-Fa-f]{16}$"); + if (!boost::regex_match(ctidString, hexRegex)) + return std::nullopt; - ctidValue = std::stoull(ctidString, nullptr, 16); + try + { + ctidValue = std::stoull(ctidString, nullptr, 16); + } + // LCOV_EXCL_START + catch (...) + { + // should be impossible to hit given the length/regex check + return std::nullopt; + } + // LCOV_EXCL_STOP } else if constexpr (std::is_integral_v) - ctidValue = ctid; + { + ctidValue = static_cast(ctid); + } else - return {}; + { + return std::nullopt; + } - if ((ctidValue & 0xF000'0000'0000'0000ULL) != 0xC000'0000'0000'0000ULL) - return {}; + // Validate CTID prefix. + constexpr uint64_t ctidPrefixMask = 0xF000'0000'0000'0000ULL; + constexpr uint64_t ctidPrefix = 0xC000'0000'0000'0000ULL; + if ((ctidValue & ctidPrefixMask) != ctidPrefix) + return std::nullopt; - uint32_t ledger_seq = (ctidValue >> 32) & 0xFFFF'FFFUL; - uint16_t txn_index = (ctidValue >> 16) & 0xFFFFU; - uint16_t network_id = ctidValue & 0xFFFFU; - return {{ledger_seq, txn_index, network_id}}; + uint32_t ledgerSeq = static_cast((ctidValue >> 32) & 0x0FFF'FFFF); + uint16_t txnIndex = static_cast((ctidValue >> 16) & 0xFFFF); + uint16_t networkID = static_cast(ctidValue & 0xFFFF); + + return std::make_tuple(ledgerSeq, txnIndex, networkID); } } // namespace RPC From f69ad4eff66d26b8c57e6e1649850652abfbb3b9 Mon Sep 17 00:00:00 2001 From: Ayaz Salikhov Date: Thu, 11 Sep 2025 16:42:27 +0100 Subject: [PATCH 177/244] docs: Add remote to `conan lock create` command (#5770) * docs: Add remote to `conan lock create` command * Document error resolution for conan package issues * Update BUILD.md * Add more info about lockfiles --- BUILD.md | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/BUILD.md b/BUILD.md index 6b1594bb5e..fd7a0b855d 100644 --- a/BUILD.md +++ b/BUILD.md @@ -132,7 +132,7 @@ higher index than the default Conan Center remote, so it is consulted first. You can do this by running: ```bash -conan remote add --index 0 xrplf "https://conan.ripplex.io" +conan remote add --index 0 xrplf https://conan.ripplex.io ``` Alternatively, you can pull the patched recipes into the repository and use them @@ -479,12 +479,24 @@ It is implicitly used when running `conan` commands, you don't need to specify i You have to update this file every time you add a new dependency or change a revision or version of an existing dependency. -To do that, run the following command in the repository root: +> [!NOTE] +> Conan uses local cache by default when creating a lockfile. +> +> To ensure, that lockfile creation works the same way on all developer machines, you should clear the local cache before creating a new lockfile. + +To create a new lockfile, run the following commands in the repository root: ```bash +conan remove '*' --confirm +rm conan.lock +# This ensure that xrplf remote is the first to be consulted +conan remote add --force --index 0 xrplf https://conan.ripplex.io conan lock create . -o '&:jemalloc=True' -o '&:rocksdb=True' ``` +> [!NOTE] +> If some dependencies are exclusive for some OS, you may need to run the last command for them adding `--profile:all `. + ## Coverage report The coverage report is intended for developers using compilers GCC @@ -586,6 +598,11 @@ After any updates or changes to dependencies, you may need to do the following: 4. [Regenerate lockfile](#conan-lockfile). 5. Re-run [conan install](#build-and-test). +#### ERROR: Package not resolved + +If you're seeing an error like `ERROR: Package 'snappy/1.1.10' not resolved: Unable to find 'snappy/1.1.10#968fef506ff261592ec30c574d4a7809%1756234314.246' in remotes.`, +please add `xrplf` remote or re-run `conan export` for [patched recipes](#patched-recipes). + ### `protobuf/port_def.inc` file not found If `cmake --build .` results in an error due to a missing a protobuf file, then From 9bd1ce436aaed263190a422ae555ed49eba921e5 Mon Sep 17 00:00:00 2001 From: Jingchen Date: Fri, 12 Sep 2025 16:13:27 +0100 Subject: [PATCH 178/244] Fix code coverage error (#5765) * Fix the issue where COVERAGE_CXX_COMPILER_FLAGS is never used --- cmake/CodeCoverage.cmake | 112 ++++++++++++++++++----------------- cmake/RippledCov.cmake | 2 + cmake/RippledInterface.cmake | 4 -- 3 files changed, 61 insertions(+), 57 deletions(-) diff --git a/cmake/CodeCoverage.cmake b/cmake/CodeCoverage.cmake index ec601de453..c2b66c9cac 100644 --- a/cmake/CodeCoverage.cmake +++ b/cmake/CodeCoverage.cmake @@ -104,6 +104,11 @@ # 2025-08-28, Bronek Kozicki # - fix "At least one COMMAND must be given" CMake warning from policy CMP0175 # +# 2025-09-03, Jingchen Wu +# - remove the unused function append_coverage_compiler_flags and append_coverage_compiler_flags_to_target +# - add a new function add_code_coverage_to_target +# - remove some unused code +# # USAGE: # # 1. Copy this file into your cmake modules path. @@ -112,10 +117,8 @@ # using a CMake option() to enable it just optionally): # include(CodeCoverage) # -# 3. Append necessary compiler flags for all supported source files: -# append_coverage_compiler_flags() -# Or for specific target: -# append_coverage_compiler_flags_to_target(YOUR_TARGET_NAME) +# 3. Append necessary compiler flags and linker flags for all supported source files: +# add_code_coverage_to_target( ) # # 3.a (OPTIONAL) Set appropriate optimization flags, e.g. -O0, -O1 or -Og # @@ -204,67 +207,69 @@ endforeach() set(COVERAGE_COMPILER_FLAGS "-g --coverage" CACHE INTERNAL "") + +set(COVERAGE_CXX_COMPILER_FLAGS "") +set(COVERAGE_C_COMPILER_FLAGS "") +set(COVERAGE_CXX_LINKER_FLAGS "") +set(COVERAGE_C_LINKER_FLAGS "") + if(CMAKE_CXX_COMPILER_ID MATCHES "(GNU|Clang)") include(CheckCXXCompilerFlag) include(CheckCCompilerFlag) + include(CheckLinkerFlag) + + set(COVERAGE_CXX_COMPILER_FLAGS ${COVERAGE_COMPILER_FLAGS}) + set(COVERAGE_C_COMPILER_FLAGS ${COVERAGE_COMPILER_FLAGS}) + set(COVERAGE_CXX_LINKER_FLAGS ${COVERAGE_COMPILER_FLAGS}) + set(COVERAGE_C_LINKER_FLAGS ${COVERAGE_COMPILER_FLAGS}) check_cxx_compiler_flag(-fprofile-abs-path HAVE_cxx_fprofile_abs_path) if(HAVE_cxx_fprofile_abs_path) - set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path") + set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_CXX_COMPILER_FLAGS} -fprofile-abs-path") endif() check_c_compiler_flag(-fprofile-abs-path HAVE_c_fprofile_abs_path) if(HAVE_c_fprofile_abs_path) - set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path") + set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_C_COMPILER_FLAGS} -fprofile-abs-path") + endif() + + check_linker_flag(CXX -fprofile-abs-path HAVE_cxx_linker_fprofile_abs_path) + if(HAVE_cxx_linker_fprofile_abs_path) + set(COVERAGE_CXX_LINKER_FLAGS "${COVERAGE_CXX_LINKER_FLAGS} -fprofile-abs-path") + endif() + + check_linker_flag(C -fprofile-abs-path HAVE_c_linker_fprofile_abs_path) + if(HAVE_c_linker_fprofile_abs_path) + set(COVERAGE_C_LINKER_FLAGS "${COVERAGE_C_LINKER_FLAGS} -fprofile-abs-path") endif() check_cxx_compiler_flag(-fprofile-update=atomic HAVE_cxx_fprofile_update) if(HAVE_cxx_fprofile_update) - set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-update=atomic") + set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_CXX_COMPILER_FLAGS} -fprofile-update=atomic") endif() check_c_compiler_flag(-fprofile-update=atomic HAVE_c_fprofile_update) if(HAVE_c_fprofile_update) - set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-update=atomic") + set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_C_COMPILER_FLAGS} -fprofile-update=atomic") endif() -endif() -set(CMAKE_Fortran_FLAGS_COVERAGE - ${COVERAGE_COMPILER_FLAGS} - CACHE STRING "Flags used by the Fortran compiler during coverage builds." - FORCE ) -set(CMAKE_CXX_FLAGS_COVERAGE - ${COVERAGE_COMPILER_FLAGS} - CACHE STRING "Flags used by the C++ compiler during coverage builds." - FORCE ) -set(CMAKE_C_FLAGS_COVERAGE - ${COVERAGE_COMPILER_FLAGS} - CACHE STRING "Flags used by the C compiler during coverage builds." - FORCE ) -set(CMAKE_EXE_LINKER_FLAGS_COVERAGE - "" - CACHE STRING "Flags used for linking binaries during coverage builds." - FORCE ) -set(CMAKE_SHARED_LINKER_FLAGS_COVERAGE - "" - CACHE STRING "Flags used by the shared libraries linker during coverage builds." - FORCE ) -mark_as_advanced( - CMAKE_Fortran_FLAGS_COVERAGE - CMAKE_CXX_FLAGS_COVERAGE - CMAKE_C_FLAGS_COVERAGE - CMAKE_EXE_LINKER_FLAGS_COVERAGE - CMAKE_SHARED_LINKER_FLAGS_COVERAGE ) + check_linker_flag(CXX -fprofile-update=atomic HAVE_cxx_linker_fprofile_update) + if(HAVE_cxx_linker_fprofile_update) + set(COVERAGE_CXX_LINKER_FLAGS "${COVERAGE_CXX_LINKER_FLAGS} -fprofile-update=atomic") + endif() + + check_linker_flag(C -fprofile-update=atomic HAVE_c_linker_fprofile_update) + if(HAVE_c_linker_fprofile_update) + set(COVERAGE_C_LINKER_FLAGS "${COVERAGE_C_LINKER_FLAGS} -fprofile-update=atomic") + endif() + +endif() get_property(GENERATOR_IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) if(NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG)) message(WARNING "Code coverage results with an optimised (non-Debug) build may be misleading") endif() # NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG) -if(CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU") - link_libraries(gcov) -endif() - # Defines a target for running and collection code coverage information # Builds dependencies, runs the given executable and outputs reports. # NOTE! The executable should always have a ZERO as exit code otherwise @@ -454,18 +459,19 @@ function(setup_target_for_coverage_gcovr) ) endfunction() # setup_target_for_coverage_gcovr -function(append_coverage_compiler_flags) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE) - set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE) - message(STATUS "Appending code coverage compiler flags: ${COVERAGE_COMPILER_FLAGS}") -endfunction() # append_coverage_compiler_flags +function(add_code_coverage_to_target name scope) + separate_arguments(COVERAGE_CXX_COMPILER_FLAGS NATIVE_COMMAND "${COVERAGE_CXX_COMPILER_FLAGS}") + separate_arguments(COVERAGE_C_COMPILER_FLAGS NATIVE_COMMAND "${COVERAGE_C_COMPILER_FLAGS}") + separate_arguments(COVERAGE_CXX_LINKER_FLAGS NATIVE_COMMAND "${COVERAGE_CXX_LINKER_FLAGS}") + separate_arguments(COVERAGE_C_LINKER_FLAGS NATIVE_COMMAND "${COVERAGE_C_LINKER_FLAGS}") -# Setup coverage for specific library -function(append_coverage_compiler_flags_to_target name) - separate_arguments(_flag_list NATIVE_COMMAND "${COVERAGE_COMPILER_FLAGS}") - target_compile_options(${name} PRIVATE ${_flag_list}) - if(CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU") - target_link_libraries(${name} PRIVATE gcov) - endif() -endfunction() + # Add compiler options to the target + target_compile_options(${name} ${scope} + $<$:${COVERAGE_CXX_COMPILER_FLAGS}> + $<$:${COVERAGE_C_COMPILER_FLAGS}>) + + target_link_libraries (${name} ${scope} + $<$:${COVERAGE_CXX_LINKER_FLAGS} gcov> + $<$:${COVERAGE_C_LINKER_FLAGS} gcov> + ) +endfunction() # add_code_coverage_to_target diff --git a/cmake/RippledCov.cmake b/cmake/RippledCov.cmake index 3c48bb1c14..847915a51a 100644 --- a/cmake/RippledCov.cmake +++ b/cmake/RippledCov.cmake @@ -36,3 +36,5 @@ setup_target_for_coverage_gcovr( EXCLUDE "src/test" "include/xrpl/beast/test" "include/xrpl/beast/unit_test" "${CMAKE_BINARY_DIR}/pb-xrpl.libpb" DEPENDENCIES rippled ) + +add_code_coverage_to_target(opts INTERFACE) diff --git a/cmake/RippledInterface.cmake b/cmake/RippledInterface.cmake index 85e2717271..375338c788 100644 --- a/cmake/RippledInterface.cmake +++ b/cmake/RippledInterface.cmake @@ -28,15 +28,11 @@ target_compile_options (opts $<$,$>:-Wsuggest-override> $<$:-Wno-maybe-uninitialized> $<$:-fno-omit-frame-pointer> - $<$,$>:-g --coverage -fprofile-abs-path> - $<$,$>:-g --coverage> $<$:-pg> $<$,$>:-p>) target_link_libraries (opts INTERFACE - $<$,$>:-g --coverage -fprofile-abs-path> - $<$,$>:-g --coverage> $<$:-pg> $<$,$>:-p>) From 406c26cc72a7a8bf4a27baa02d74da81be977dd0 Mon Sep 17 00:00:00 2001 From: Ayaz Salikhov Date: Fri, 12 Sep 2025 18:09:42 +0100 Subject: [PATCH 179/244] ci: Fix conan secrets in `upload-conan-deps` (#5785) - Accounts for some variables that were changed and missed when the reusable workflow was removed. --- .github/workflows/upload-conan-deps.yml | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/.github/workflows/upload-conan-deps.yml b/.github/workflows/upload-conan-deps.yml index 5af72a9e41..6b94815284 100644 --- a/.github/workflows/upload-conan-deps.yml +++ b/.github/workflows/upload-conan-deps.yml @@ -34,6 +34,10 @@ on: - conanfile.py - conan.lock +env: + CONAN_REMOTE_NAME: xrplf + CONAN_REMOTE_URL: https://conan.ripplex.io + concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true @@ -67,6 +71,9 @@ jobs: - name: Setup Conan uses: ./.github/actions/setup-conan + with: + conan_remote_name: ${{ env.CONAN_REMOTE_NAME }} + conan_remote_url: ${{ env.CONAN_REMOTE_URL }} - name: Build dependencies uses: ./.github/actions/build-deps @@ -75,10 +82,10 @@ jobs: build_type: ${{ matrix.build_type }} force_build: ${{ github.event_name == 'schedule' || github.event.inputs.force_source_build == 'true' }} - - name: Login to Conan + - name: Log into Conan remote if: github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' - run: conan remote login -p ${{ secrets.CONAN_PASSWORD }} ${{ inputs.conan_remote_name }} ${{ secrets.CONAN_USERNAME }} + run: conan remote login ${{ env.CONAN_REMOTE_NAME }} "${{ secrets.CONAN_REMOTE_USERNAME }}" --password "${{ secrets.CONAN_REMOTE_PASSWORD }}" - name: Upload Conan packages if: github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' && github.event_name != 'schedule' - run: conan upload "*" -r=${{ inputs.conan_remote_name }} --confirm ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }} + run: conan upload "*" -r=${{ env.CONAN_REMOTE_NAME }} --confirm ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }} From bd182c0a3e0c153ef15ce6721db57c40e2369ccd Mon Sep 17 00:00:00 2001 From: Jingchen Date: Mon, 15 Sep 2025 14:51:19 +0100 Subject: [PATCH 180/244] fix: Skip processing transaction batch if the batch is empty (#5670) Avoids an assertion failure in NetworkOPsImp::apply in the unlikely event that all incoming transactions are invalid. --- src/test/app/NetworkOPs_test.cpp | 80 ++++++++++++++++++++++++++++ src/xrpld/app/misc/NetworkOPs.cpp | 5 ++ src/xrpld/overlay/detail/PeerImp.cpp | 7 ++- 3 files changed, 90 insertions(+), 2 deletions(-) create mode 100644 src/test/app/NetworkOPs_test.cpp diff --git a/src/test/app/NetworkOPs_test.cpp b/src/test/app/NetworkOPs_test.cpp new file mode 100644 index 0000000000..edea55105b --- /dev/null +++ b/src/test/app/NetworkOPs_test.cpp @@ -0,0 +1,80 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Dev Null Productions + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include + +#include + +namespace ripple { +namespace test { + +class NetworkOPs_test : public beast::unit_test::suite +{ +public: + void + run() override + { + testAllBadHeldTransactions(); + } + + void + testAllBadHeldTransactions() + { + // All trasactions are already marked as SF_BAD, and we should be able + // to handle the case properly without an assertion failure + testcase("No valid transactions in batch"); + + std::string logs; + + { + using namespace jtx; + auto const alice = Account{"alice"}; + Env env{ + *this, + envconfig(), + std::make_unique(&logs), + beast::severities::kAll}; + env.memoize(env.master); + env.memoize(alice); + + auto const jtx = env.jt(ticket::create(alice, 1), seq(1), fee(10)); + + auto transacionId = jtx.stx->getTransactionID(); + env.app().getHashRouter().setFlags( + transacionId, HashRouterFlags::HELD); + + env(jtx, json(jss::Sequence, 1), ter(terNO_ACCOUNT)); + + env.app().getHashRouter().setFlags( + transacionId, HashRouterFlags::BAD); + + env.close(); + } + + BEAST_EXPECT( + logs.find("No transaction to process!") != std::string::npos); + } +}; + +BEAST_DEFINE_TESTSUITE(NetworkOPs, app, ripple); + +} // namespace test +} // namespace ripple diff --git a/src/xrpld/app/misc/NetworkOPs.cpp b/src/xrpld/app/misc/NetworkOPs.cpp index 403090c390..b9069442f8 100644 --- a/src/xrpld/app/misc/NetworkOPs.cpp +++ b/src/xrpld/app/misc/NetworkOPs.cpp @@ -1452,6 +1452,11 @@ NetworkOPsImp::processTransactionSet(CanonicalTXSet const& set) for (auto& t : transactions) mTransactions.push_back(std::move(t)); } + if (mTransactions.empty()) + { + JLOG(m_journal.debug()) << "No transaction to process!"; + return; + } doTransactionSyncBatch(lock, [&](std::unique_lock const&) { XRPL_ASSERT( diff --git a/src/xrpld/overlay/detail/PeerImp.cpp b/src/xrpld/overlay/detail/PeerImp.cpp index 69f25e1eb4..2cd9432eb8 100644 --- a/src/xrpld/overlay/detail/PeerImp.cpp +++ b/src/xrpld/overlay/detail/PeerImp.cpp @@ -2880,6 +2880,9 @@ PeerImp::checkTransaction( (stx->getFieldU32(sfLastLedgerSequence) < app_.getLedgerMaster().getValidLedgerIndex())) { + JLOG(p_journal_.info()) + << "Marking transaction " << stx->getTransactionID() + << "as BAD because it's expired"; app_.getHashRouter().setFlags( stx->getTransactionID(), HashRouterFlags::BAD); charge(Resource::feeUselessData, "expired tx"); @@ -2936,7 +2939,7 @@ PeerImp::checkTransaction( { if (!validReason.empty()) { - JLOG(p_journal_.trace()) + JLOG(p_journal_.debug()) << "Exception checking transaction: " << validReason; } @@ -2963,7 +2966,7 @@ PeerImp::checkTransaction( { if (!reason.empty()) { - JLOG(p_journal_.trace()) + JLOG(p_journal_.debug()) << "Exception checking transaction: " << reason; } app_.getHashRouter().setFlags( From 37c377a1b6ab18055bd3c8b954c5de97aaee683b Mon Sep 17 00:00:00 2001 From: Denis Angell Date: Mon, 15 Sep 2025 16:48:47 +0200 Subject: [PATCH 181/244] Fix: EscrowTokenV1 (#5571) * resolves an accounting inconsistency in MPT escrows where transfer fees were not properly handled when unlocking escrowed tokens. --- include/xrpl/protocol/detail/features.macro | 1 + src/test/app/EscrowToken_test.cpp | 68 +++++++++++++++++++++ src/xrpld/app/tx/detail/Escrow.cpp | 9 ++- src/xrpld/ledger/View.h | 3 +- src/xrpld/ledger/detail/View.cpp | 42 ++++++++++--- 5 files changed, 113 insertions(+), 10 deletions(-) diff --git a/include/xrpl/protocol/detail/features.macro b/include/xrpl/protocol/detail/features.macro index 9aacbbe3d9..f04e9f3641 100644 --- a/include/xrpl/protocol/detail/features.macro +++ b/include/xrpl/protocol/detail/features.macro @@ -32,6 +32,7 @@ // If you add an amendment here, then do not forget to increment `numFeatures` // in include/xrpl/protocol/Feature.h. +XRPL_FIX (TokenEscrowV1, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (DelegateV1_1, Supported::no, VoteBehavior::DefaultNo) XRPL_FIX (PriceOracleOrder, Supported::no, VoteBehavior::DefaultNo) XRPL_FIX (MPTDeliveredAmount, Supported::no, VoteBehavior::DefaultNo) diff --git a/src/test/app/EscrowToken_test.cpp b/src/test/app/EscrowToken_test.cpp index e81064c825..28c9a5b167 100644 --- a/src/test/app/EscrowToken_test.cpp +++ b/src/test/app/EscrowToken_test.cpp @@ -3501,6 +3501,10 @@ struct EscrowToken_test : public beast::unit_test::suite BEAST_EXPECT( transferRate.value == std::uint32_t(1'000'000'000 * 1.25)); + BEAST_EXPECT(mptEscrowed(env, alice, MPT) == 125); + BEAST_EXPECT(issuerMPTEscrowed(env, MPT) == 125); + BEAST_EXPECT(env.balance(gw, MPT) == MPT(20'000)); + // bob can finish escrow env(escrow::finish(bob, alice, seq1), escrow::condition(escrow::cb1), @@ -3510,6 +3514,15 @@ struct EscrowToken_test : public beast::unit_test::suite BEAST_EXPECT(env.balance(alice, MPT) == preAlice - delta); BEAST_EXPECT(env.balance(bob, MPT) == MPT(10'100)); + + auto const escrowedWithFix = + env.current()->rules().enabled(fixTokenEscrowV1) ? 0 : 25; + auto const outstandingWithFix = + env.current()->rules().enabled(fixTokenEscrowV1) ? MPT(19'975) + : MPT(20'000); + BEAST_EXPECT(mptEscrowed(env, alice, MPT) == escrowedWithFix); + BEAST_EXPECT(issuerMPTEscrowed(env, MPT) == escrowedWithFix); + BEAST_EXPECT(env.balance(gw, MPT) == outstandingWithFix); } // test locked rate: cancel @@ -3554,6 +3567,60 @@ struct EscrowToken_test : public beast::unit_test::suite BEAST_EXPECT(env.balance(alice, MPT) == preAlice); BEAST_EXPECT(env.balance(bob, MPT) == preBob); + BEAST_EXPECT(env.balance(gw, MPT) == MPT(20'000)); + BEAST_EXPECT(mptEscrowed(env, alice, MPT) == 0); + BEAST_EXPECT(issuerMPTEscrowed(env, MPT) == 0); + } + + // test locked rate: issuer is destination + { + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + + MPTTester mptGw(env, gw, {.holders = {alice, bob}}); + mptGw.create( + {.transferFee = 25000, + .ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanEscrow | tfMPTCanTransfer}); + mptGw.authorize({.account = alice}); + mptGw.authorize({.account = bob}); + auto const MPT = mptGw["MPT"]; + env(pay(gw, alice, MPT(10'000))); + env(pay(gw, bob, MPT(10'000))); + env.close(); + + // alice can create escrow w/ xfer rate + auto const preAlice = env.balance(alice, MPT); + auto const seq1 = env.seq(alice); + auto const delta = MPT(125); + env(escrow::create(alice, gw, MPT(125)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150)); + env.close(); + auto const transferRate = escrow::rate(env, alice, seq1); + BEAST_EXPECT( + transferRate.value == std::uint32_t(1'000'000'000 * 1.25)); + + BEAST_EXPECT(mptEscrowed(env, alice, MPT) == 125); + BEAST_EXPECT(issuerMPTEscrowed(env, MPT) == 125); + BEAST_EXPECT(env.balance(gw, MPT) == MPT(20'000)); + + // bob can finish escrow + env(escrow::finish(gw, alice, seq1), + escrow::condition(escrow::cb1), + escrow::fulfillment(escrow::fb1), + fee(baseFee * 150)); + env.close(); + + BEAST_EXPECT(env.balance(alice, MPT) == preAlice - delta); + BEAST_EXPECT(mptEscrowed(env, alice, MPT) == 0); + BEAST_EXPECT(issuerMPTEscrowed(env, MPT) == 0); + BEAST_EXPECT(env.balance(gw, MPT) == MPT(19'875)); } } @@ -3878,6 +3945,7 @@ public: FeatureBitset const all{testable_amendments()}; testIOUWithFeats(all); testMPTWithFeats(all); + testMPTWithFeats(all - fixTokenEscrowV1); } }; diff --git a/src/xrpld/app/tx/detail/Escrow.cpp b/src/xrpld/app/tx/detail/Escrow.cpp index dd0ffac778..3b05aa0007 100644 --- a/src/xrpld/app/tx/detail/Escrow.cpp +++ b/src/xrpld/app/tx/detail/Escrow.cpp @@ -1007,8 +1007,13 @@ escrowUnlockApplyHelper( // compute balance to transfer finalAmt = amount.value() - xferFee; } - - return rippleUnlockEscrowMPT(view, sender, receiver, finalAmt, journal); + return rippleUnlockEscrowMPT( + view, + sender, + receiver, + finalAmt, + view.rules().enabled(fixTokenEscrowV1) ? amount : finalAmt, + journal); } TER diff --git a/src/xrpld/ledger/View.h b/src/xrpld/ledger/View.h index cfd3599f78..faad633e00 100644 --- a/src/xrpld/ledger/View.h +++ b/src/xrpld/ledger/View.h @@ -719,7 +719,8 @@ rippleUnlockEscrowMPT( ApplyView& view, AccountID const& uGrantorID, AccountID const& uGranteeID, - STAmount const& saAmount, + STAmount const& netAmount, + STAmount const& grossAmount, beast::Journal j); /** Calls static accountSendIOU if saAmount represents Issue. diff --git a/src/xrpld/ledger/detail/View.cpp b/src/xrpld/ledger/detail/View.cpp index 708d5b29f7..473efa58fb 100644 --- a/src/xrpld/ledger/detail/View.cpp +++ b/src/xrpld/ledger/detail/View.cpp @@ -3006,11 +3006,17 @@ rippleUnlockEscrowMPT( ApplyView& view, AccountID const& sender, AccountID const& receiver, - STAmount const& amount, + STAmount const& netAmount, + STAmount const& grossAmount, beast::Journal j) { - auto const issuer = amount.getIssuer(); - auto const mptIssue = amount.get(); + if (!view.rules().enabled(fixTokenEscrowV1)) + XRPL_ASSERT( + netAmount == grossAmount, + "ripple::rippleUnlockEscrowMPT : netAmount == grossAmount"); + + auto const& issuer = netAmount.getIssuer(); + auto const& mptIssue = netAmount.get(); auto const mptID = keylet::mptIssuance(mptIssue.getMptID()); auto sleIssuance = view.peek(mptID); if (!sleIssuance) @@ -3031,7 +3037,7 @@ rippleUnlockEscrowMPT( } // LCOV_EXCL_STOP auto const locked = sleIssuance->getFieldU64(sfLockedAmount); - auto const redeem = amount.mpt().value(); + auto const redeem = grossAmount.mpt().value(); // Underflow check for subtraction if (!canSubtract( @@ -3064,7 +3070,7 @@ rippleUnlockEscrowMPT( } // LCOV_EXCL_STOP auto current = sle->getFieldU64(sfMPTAmount); - auto delta = amount.mpt().value(); + auto delta = netAmount.mpt().value(); // Overflow check for addition if (!canAdd(STAmount(mptIssue, current), STAmount(mptIssue, delta))) @@ -3082,7 +3088,7 @@ rippleUnlockEscrowMPT( { // Decrease the Issuance OutstandingAmount auto const outstanding = sleIssuance->getFieldU64(sfOutstandingAmount); - auto const redeem = amount.mpt().value(); + auto const redeem = netAmount.mpt().value(); // Underflow check for subtraction if (!canSubtract( @@ -3126,7 +3132,7 @@ rippleUnlockEscrowMPT( } // LCOV_EXCL_STOP auto const locked = sle->getFieldU64(sfLockedAmount); - auto const delta = amount.mpt().value(); + auto const delta = grossAmount.mpt().value(); // Underflow check for subtraction if (!canSubtract(STAmount(mptIssue, locked), STAmount(mptIssue, delta))) @@ -3144,6 +3150,28 @@ rippleUnlockEscrowMPT( sle->setFieldU64(sfLockedAmount, newLocked); view.update(sle); } + + // Note: The gross amount is the amount that was locked, the net + // amount is the amount that is being unlocked. The difference is the fee + // that was charged for the transfer. If this difference is greater than + // zero, we need to update the outstanding amount. + auto const diff = grossAmount.mpt().value() - netAmount.mpt().value(); + if (diff != 0) + { + auto const outstanding = sleIssuance->getFieldU64(sfOutstandingAmount); + // Underflow check for subtraction + if (!canSubtract( + STAmount(mptIssue, outstanding), STAmount(mptIssue, diff))) + { // LCOV_EXCL_START + JLOG(j.error()) + << "rippleUnlockEscrowMPT: insufficient outstanding amount for " + << mptIssue.getMptID() << ": " << outstanding << " < " << diff; + return tecINTERNAL; + } // LCOV_EXCL_STOP + + sleIssuance->setFieldU64(sfOutstandingAmount, outstanding - diff); + view.update(sleIssuance); + } return tesSUCCESS; } From 4caebfbd0eed74283705f4a2723761bd345a56da Mon Sep 17 00:00:00 2001 From: Bart Date: Mon, 15 Sep 2025 12:26:08 -0400 Subject: [PATCH 182/244] refactor: Wrap GitHub CI conditionals in curly braces (#5796) This change wraps all GitHub conditionals in `${{ .. }}`, both for consistency and to reduce unexpected failures, because it was previously noticed that not all conditionals work without those curly braces. --- .github/workflows/on-pr.yml | 6 +++--- .github/workflows/upload-conan-deps.yml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/on-pr.yml b/.github/workflows/on-pr.yml index 24f27d5162..23f65c14cb 100644 --- a/.github/workflows/on-pr.yml +++ b/.github/workflows/on-pr.yml @@ -92,12 +92,12 @@ jobs: check-levelization: needs: should-run - if: needs.should-run.outputs.go == 'true' + if: ${{ needs.should-run.outputs.go == 'true' }} uses: ./.github/workflows/check-levelization.yml build-test: needs: should-run - if: needs.should-run.outputs.go == 'true' + if: ${{ needs.should-run.outputs.go == 'true' }} uses: ./.github/workflows/build-test.yml strategy: matrix: @@ -111,7 +111,7 @@ jobs: needs: - should-run - build-test - if: needs.should-run.outputs.go == 'true' + if: ${{ needs.should-run.outputs.go == 'true' }} uses: ./.github/workflows/notify-clio.yml secrets: clio_notify_token: ${{ secrets.CLIO_NOTIFY_TOKEN }} diff --git a/.github/workflows/upload-conan-deps.yml b/.github/workflows/upload-conan-deps.yml index 6b94815284..c52b3c89d3 100644 --- a/.github/workflows/upload-conan-deps.yml +++ b/.github/workflows/upload-conan-deps.yml @@ -83,9 +83,9 @@ jobs: force_build: ${{ github.event_name == 'schedule' || github.event.inputs.force_source_build == 'true' }} - name: Log into Conan remote - if: github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' + if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' }} run: conan remote login ${{ env.CONAN_REMOTE_NAME }} "${{ secrets.CONAN_REMOTE_USERNAME }}" --password "${{ secrets.CONAN_REMOTE_PASSWORD }}" - name: Upload Conan packages - if: github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' && github.event_name != 'schedule' + if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' && github.event_name != 'schedule' }} run: conan upload "*" -r=${{ env.CONAN_REMOTE_NAME }} --confirm ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }} From 3e4e9a2ddce8b4c1fc536574cc6d4ab3dcbd6a23 Mon Sep 17 00:00:00 2001 From: Bart Date: Mon, 15 Sep 2025 13:28:47 -0400 Subject: [PATCH 183/244] Only notify clio for PRs targeting the release and master branches (#5794) Clio should only be notified when releases are about to be made, instead of for all PR, so this change only notifies Clio when a PR targets the release or master branch. --- .github/workflows/on-pr.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/on-pr.yml b/.github/workflows/on-pr.yml index 23f65c14cb..9befd31e71 100644 --- a/.github/workflows/on-pr.yml +++ b/.github/workflows/on-pr.yml @@ -111,7 +111,7 @@ jobs: needs: - should-run - build-test - if: ${{ needs.should-run.outputs.go == 'true' }} + if: ${{ needs.should-run.outputs.go == 'true' && contains(fromJSON('["release", "master"]'), github.ref_name) }} uses: ./.github/workflows/notify-clio.yml secrets: clio_notify_token: ${{ secrets.CLIO_NOTIFY_TOKEN }} From ccb9f1e42d13bf83c9c953fb562938e3734a699e Mon Sep 17 00:00:00 2001 From: yinyiqian1 Date: Mon, 15 Sep 2025 15:42:36 -0400 Subject: [PATCH 184/244] Support DynamicMPT XLS-94d (#5705) * extends the functionality of the MPTokenIssuanceSet transaction, allowing the issuer to update fields or flags that were explicitly marked as mutable during creation. --- include/xrpl/protocol/LedgerFormats.h | 9 + include/xrpl/protocol/TxFlags.h | 33 + include/xrpl/protocol/detail/features.macro | 1 + .../xrpl/protocol/detail/ledger_entries.macro | 1 + include/xrpl/protocol/detail/sfields.macro | 1 + .../xrpl/protocol/detail/transactions.macro | 4 + src/test/app/MPToken_test.cpp | 920 +++++++++++++++++- src/test/jtx/impl/mpt.cpp | 93 +- src/test/jtx/mpt.h | 16 + .../app/tx/detail/MPTokenIssuanceCreate.cpp | 12 + .../app/tx/detail/MPTokenIssuanceCreate.h | 1 + .../app/tx/detail/MPTokenIssuanceSet.cpp | 150 ++- 12 files changed, 1215 insertions(+), 26 deletions(-) diff --git a/include/xrpl/protocol/LedgerFormats.h b/include/xrpl/protocol/LedgerFormats.h index e3efe8fec2..711754df94 100644 --- a/include/xrpl/protocol/LedgerFormats.h +++ b/include/xrpl/protocol/LedgerFormats.h @@ -188,6 +188,15 @@ enum LedgerSpecificFlags { lsfMPTCanTransfer = 0x00000020, lsfMPTCanClawback = 0x00000040, + lsfMPTCanMutateCanLock = 0x00000002, + lsfMPTCanMutateRequireAuth = 0x00000004, + lsfMPTCanMutateCanEscrow = 0x00000008, + lsfMPTCanMutateCanTrade = 0x00000010, + lsfMPTCanMutateCanTransfer = 0x00000020, + lsfMPTCanMutateCanClawback = 0x00000040, + lsfMPTCanMutateMetadata = 0x00010000, + lsfMPTCanMutateTransferFee = 0x00020000, + // ltMPTOKEN lsfMPTAuthorized = 0x00000002, diff --git a/include/xrpl/protocol/TxFlags.h b/include/xrpl/protocol/TxFlags.h index a37474b780..c376180ac0 100644 --- a/include/xrpl/protocol/TxFlags.h +++ b/include/xrpl/protocol/TxFlags.h @@ -151,6 +151,20 @@ constexpr std::uint32_t const tfMPTCanClawback = lsfMPTCanClawback; constexpr std::uint32_t const tfMPTokenIssuanceCreateMask = ~(tfUniversal | tfMPTCanLock | tfMPTRequireAuth | tfMPTCanEscrow | tfMPTCanTrade | tfMPTCanTransfer | tfMPTCanClawback); +// MPTokenIssuanceCreate MutableFlags: +// Indicating specific fields or flags may be changed after issuance. +constexpr std::uint32_t const tfMPTCanMutateCanLock = lsfMPTCanMutateCanLock; +constexpr std::uint32_t const tfMPTCanMutateRequireAuth = lsfMPTCanMutateRequireAuth; +constexpr std::uint32_t const tfMPTCanMutateCanEscrow = lsfMPTCanMutateCanEscrow; +constexpr std::uint32_t const tfMPTCanMutateCanTrade = lsfMPTCanMutateCanTrade; +constexpr std::uint32_t const tfMPTCanMutateCanTransfer = lsfMPTCanMutateCanTransfer; +constexpr std::uint32_t const tfMPTCanMutateCanClawback = lsfMPTCanMutateCanClawback; +constexpr std::uint32_t const tfMPTCanMutateMetadata = lsfMPTCanMutateMetadata; +constexpr std::uint32_t const tfMPTCanMutateTransferFee = lsfMPTCanMutateTransferFee; +constexpr std::uint32_t const tfMPTokenIssuanceCreateMutableMask = + ~(tfMPTCanMutateCanLock | tfMPTCanMutateRequireAuth | tfMPTCanMutateCanEscrow | tfMPTCanMutateCanTrade + | tfMPTCanMutateCanTransfer | tfMPTCanMutateCanClawback | tfMPTCanMutateMetadata | tfMPTCanMutateTransferFee); + // MPTokenAuthorize flags: constexpr std::uint32_t const tfMPTUnauthorize = 0x00000001; constexpr std::uint32_t const tfMPTokenAuthorizeMask = ~(tfUniversal | tfMPTUnauthorize); @@ -161,6 +175,25 @@ constexpr std::uint32_t const tfMPTUnlock = 0x00000002; constexpr std::uint32_t const tfMPTokenIssuanceSetMask = ~(tfUniversal | tfMPTLock | tfMPTUnlock); constexpr std::uint32_t const tfMPTokenIssuanceSetPermissionMask = ~(tfUniversal | tfMPTLock | tfMPTUnlock); +// MPTokenIssuanceSet MutableFlags: +// Set or Clear flags. +constexpr std::uint32_t const tfMPTSetCanLock = 0x00000001; +constexpr std::uint32_t const tfMPTClearCanLock = 0x00000002; +constexpr std::uint32_t const tfMPTSetRequireAuth = 0x00000004; +constexpr std::uint32_t const tfMPTClearRequireAuth = 0x00000008; +constexpr std::uint32_t const tfMPTSetCanEscrow = 0x00000010; +constexpr std::uint32_t const tfMPTClearCanEscrow = 0x00000020; +constexpr std::uint32_t const tfMPTSetCanTrade = 0x00000040; +constexpr std::uint32_t const tfMPTClearCanTrade = 0x00000080; +constexpr std::uint32_t const tfMPTSetCanTransfer = 0x00000100; +constexpr std::uint32_t const tfMPTClearCanTransfer = 0x00000200; +constexpr std::uint32_t const tfMPTSetCanClawback = 0x00000400; +constexpr std::uint32_t const tfMPTClearCanClawback = 0x00000800; +constexpr std::uint32_t const tfMPTokenIssuanceSetMutableMask = ~(tfMPTSetCanLock | tfMPTClearCanLock | + tfMPTSetRequireAuth | tfMPTClearRequireAuth | tfMPTSetCanEscrow | tfMPTClearCanEscrow | + tfMPTSetCanTrade | tfMPTClearCanTrade | tfMPTSetCanTransfer | tfMPTClearCanTransfer | + tfMPTSetCanClawback | tfMPTClearCanClawback); + // MPTokenIssuanceDestroy flags: constexpr std::uint32_t const tfMPTokenIssuanceDestroyMask = ~tfUniversal; diff --git a/include/xrpl/protocol/detail/features.macro b/include/xrpl/protocol/detail/features.macro index f04e9f3641..a9f5d95624 100644 --- a/include/xrpl/protocol/detail/features.macro +++ b/include/xrpl/protocol/detail/features.macro @@ -32,6 +32,7 @@ // If you add an amendment here, then do not forget to increment `numFeatures` // in include/xrpl/protocol/Feature.h. +XRPL_FEATURE(DynamicMPT, Supported::no, VoteBehavior::DefaultNo) XRPL_FIX (TokenEscrowV1, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (DelegateV1_1, Supported::no, VoteBehavior::DefaultNo) XRPL_FIX (PriceOracleOrder, Supported::no, VoteBehavior::DefaultNo) diff --git a/include/xrpl/protocol/detail/ledger_entries.macro b/include/xrpl/protocol/detail/ledger_entries.macro index ac9ebc6069..1066986223 100644 --- a/include/xrpl/protocol/detail/ledger_entries.macro +++ b/include/xrpl/protocol/detail/ledger_entries.macro @@ -412,6 +412,7 @@ LEDGER_ENTRY(ltMPTOKEN_ISSUANCE, 0x007e, MPTokenIssuance, mpt_issuance, ({ {sfPreviousTxnID, soeREQUIRED}, {sfPreviousTxnLgrSeq, soeREQUIRED}, {sfDomainID, soeOPTIONAL}, + {sfMutableFlags, soeDEFAULT}, })) /** A ledger object which tracks MPToken diff --git a/include/xrpl/protocol/detail/sfields.macro b/include/xrpl/protocol/detail/sfields.macro index 537fcae479..96192324fd 100644 --- a/include/xrpl/protocol/detail/sfields.macro +++ b/include/xrpl/protocol/detail/sfields.macro @@ -114,6 +114,7 @@ TYPED_SFIELD(sfVoteWeight, UINT32, 48) TYPED_SFIELD(sfFirstNFTokenSequence, UINT32, 50) TYPED_SFIELD(sfOracleDocumentID, UINT32, 51) TYPED_SFIELD(sfPermissionValue, UINT32, 52) +TYPED_SFIELD(sfMutableFlags, UINT32, 53) // 64-bit integers (common) TYPED_SFIELD(sfIndexNext, UINT64, 1) diff --git a/include/xrpl/protocol/detail/transactions.macro b/include/xrpl/protocol/detail/transactions.macro index bfbc18aa1b..3aaa5a40a3 100644 --- a/include/xrpl/protocol/detail/transactions.macro +++ b/include/xrpl/protocol/detail/transactions.macro @@ -548,6 +548,7 @@ TRANSACTION(ttMPTOKEN_ISSUANCE_CREATE, 54, MPTokenIssuanceCreate, {sfMaximumAmount, soeOPTIONAL}, {sfMPTokenMetadata, soeOPTIONAL}, {sfDomainID, soeOPTIONAL}, + {sfMutableFlags, soeOPTIONAL}, })) /** This transaction type destroys a MPTokensIssuance instance */ @@ -566,6 +567,9 @@ TRANSACTION(ttMPTOKEN_ISSUANCE_SET, 56, MPTokenIssuanceSet, {sfMPTokenIssuanceID, soeREQUIRED}, {sfHolder, soeOPTIONAL}, {sfDomainID, soeOPTIONAL}, + {sfMPTokenMetadata, soeOPTIONAL}, + {sfTransferFee, soeOPTIONAL}, + {sfMutableFlags, soeOPTIONAL}, })) /** This transaction type authorizes a MPToken instance */ diff --git a/src/test/app/MPToken_test.cpp b/src/test/app/MPToken_test.cpp index 6470962f2f..1410370c33 100644 --- a/src/test/app/MPToken_test.cpp +++ b/src/test/app/MPToken_test.cpp @@ -589,7 +589,8 @@ class MPToken_test : public beast::unit_test::suite .flags = 0x00000008, .err = temINVALID_FLAG}); - if (!features[featureSingleAssetVault]) + if (!features[featureSingleAssetVault] && + !features[featureDynamicMPT]) { // test invalid flags - nothing is being changed mptAlice.set( @@ -623,7 +624,8 @@ class MPToken_test : public beast::unit_test::suite .flags = 0x00000000, .err = temMALFORMED}); - if (!features[featurePermissionedDomains]) + if (!features[featurePermissionedDomains] || + !features[featureSingleAssetVault]) { // cannot set DomainID since PD is not enabled mptAlice.set( @@ -631,7 +633,7 @@ class MPToken_test : public beast::unit_test::suite .domainID = uint256(42), .err = temDISABLED}); } - else + else if (features[featureSingleAssetVault]) { // cannot set DomainID since Holder is set mptAlice.set( @@ -2738,6 +2740,882 @@ class MPToken_test : public beast::unit_test::suite } } + void + testInvalidCreateDynamic(FeatureBitset features) + { + testcase("invalid MPTokenIssuanceCreate for DynamicMPT"); + + using namespace test::jtx; + Account const alice("alice"); + + // Can not provide MutableFlags when DynamicMPT amendment is not enabled + { + Env env{*this, features - featureDynamicMPT}; + MPTTester mptAlice(env, alice); + mptAlice.create( + {.ownerCount = 0, .mutableFlags = 2, .err = temDISABLED}); + mptAlice.create( + {.ownerCount = 0, .mutableFlags = 0, .err = temDISABLED}); + } + + // MutableFlags contains invalid values + { + Env env{*this, features}; + MPTTester mptAlice(env, alice); + + // Value 1 is reserved for MPT lock. + mptAlice.create( + {.ownerCount = 0, .mutableFlags = 1, .err = temINVALID_FLAG}); + mptAlice.create( + {.ownerCount = 0, .mutableFlags = 17, .err = temINVALID_FLAG}); + mptAlice.create( + {.ownerCount = 0, + .mutableFlags = 65535, + .err = temINVALID_FLAG}); + + // MutableFlags can not be 0 + mptAlice.create( + {.ownerCount = 0, .mutableFlags = 0, .err = temINVALID_FLAG}); + } + } + + void + testInvalidSetDynamic(FeatureBitset features) + { + testcase("invalid MPTokenIssuanceSet for DynamicMPT"); + + using namespace test::jtx; + Account const alice("alice"); + Account const bob("bob"); + + // Can not provide MutableFlags, MPTokenMetadata or TransferFee when + // DynamicMPT amendment is not enabled + { + Env env{*this, features - featureDynamicMPT}; + MPTTester mptAlice(env, alice, {.holders = {bob}}); + auto const mptID = makeMptID(env.seq(alice), alice); + + // MutableFlags is not allowed when DynamicMPT is not enabled + mptAlice.set( + {.account = alice, + .id = mptID, + .mutableFlags = 2, + .err = temDISABLED}); + mptAlice.set( + {.account = alice, + .id = mptID, + .mutableFlags = 0, + .err = temDISABLED}); + + // MPTokenMetadata is not allowed when DynamicMPT is not enabled + mptAlice.set( + {.account = alice, + .id = mptID, + .metadata = "test", + .err = temDISABLED}); + mptAlice.set( + {.account = alice, + .id = mptID, + .metadata = "", + .err = temDISABLED}); + + // TransferFee is not allowed when DynamicMPT is not enabled + mptAlice.set( + {.account = alice, + .id = mptID, + .transferFee = 100, + .err = temDISABLED}); + mptAlice.set( + {.account = alice, + .id = mptID, + .transferFee = 0, + .err = temDISABLED}); + } + + // Can not provide holder when MutableFlags, MPTokenMetadata or + // TransferFee is present + { + Env env{*this, features}; + MPTTester mptAlice(env, alice, {.holders = {bob}}); + auto const mptID = makeMptID(env.seq(alice), alice); + + // Holder is not allowed when MutableFlags is present + mptAlice.set( + {.account = alice, + .holder = bob, + .id = mptID, + .mutableFlags = 2, + .err = temMALFORMED}); + + // Holder is not allowed when MPTokenMetadata is present + mptAlice.set( + {.account = alice, + .holder = bob, + .id = mptID, + .metadata = "test", + .err = temMALFORMED}); + + // Holder is not allowed when TransferFee is present + mptAlice.set( + {.account = alice, + .holder = bob, + .id = mptID, + .transferFee = 100, + .err = temMALFORMED}); + } + + // Can not set Flags when MutableFlags, MPTokenMetadata or + // TransferFee is present + { + Env env{*this, features}; + MPTTester mptAlice(env, alice, {.holders = {bob}}); + mptAlice.create( + {.ownerCount = 1, + .mutableFlags = tfMPTCanMutateMetadata | + tfMPTCanMutateCanLock | tfMPTCanMutateTransferFee}); + + // Setting flags is not allowed when MutableFlags is present + mptAlice.set( + {.account = alice, + .flags = tfMPTCanLock, + .mutableFlags = 2, + .err = temMALFORMED}); + + // Setting flags is not allowed when MPTokenMetadata is present + mptAlice.set( + {.account = alice, + .flags = tfMPTCanLock, + .metadata = "test", + .err = temMALFORMED}); + + // setting flags is not allowed when TransferFee is present + mptAlice.set( + {.account = alice, + .flags = tfMPTCanLock, + .transferFee = 100, + .err = temMALFORMED}); + } + + // Flags being 0 or tfFullyCanonicalSig is fine + { + Env env{*this, features}; + MPTTester mptAlice(env, alice, {.holders = {bob}}); + + mptAlice.create( + {.transferFee = 10, + .ownerCount = 1, + .flags = tfMPTCanTransfer, + .mutableFlags = + tfMPTCanMutateTransferFee | tfMPTCanMutateMetadata}); + + mptAlice.set( + {.account = alice, + .flags = 0, + .transferFee = 100, + .metadata = "test"}); + mptAlice.set( + {.account = alice, + .flags = tfFullyCanonicalSig, + .transferFee = 200, + .metadata = "test2"}); + } + + // Invalid MutableFlags + { + Env env{*this, features}; + MPTTester mptAlice(env, alice, {.holders = {bob}}); + auto const mptID = makeMptID(env.seq(alice), alice); + + for (auto const flags : {10000, 0, 5000}) + { + mptAlice.set( + {.account = alice, + .id = mptID, + .mutableFlags = flags, + .err = temINVALID_FLAG}); + } + } + + // Can not set and clear the same mutable flag + { + Env env{*this, features}; + MPTTester mptAlice(env, alice, {.holders = {bob}}); + auto const mptID = makeMptID(env.seq(alice), alice); + + auto const flagCombinations = { + tfMPTSetCanLock | tfMPTClearCanLock, + tfMPTSetRequireAuth | tfMPTClearRequireAuth, + tfMPTSetCanEscrow | tfMPTClearCanEscrow, + tfMPTSetCanTrade | tfMPTClearCanTrade, + tfMPTSetCanTransfer | tfMPTClearCanTransfer, + tfMPTSetCanClawback | tfMPTClearCanClawback, + tfMPTSetCanLock | tfMPTClearCanLock | tfMPTClearCanTrade, + tfMPTSetCanTransfer | tfMPTClearCanTransfer | + tfMPTSetCanEscrow | tfMPTClearCanClawback}; + + for (auto const& mutableFlags : flagCombinations) + { + mptAlice.set( + {.account = alice, + .id = mptID, + .mutableFlags = mutableFlags, + .err = temINVALID_FLAG}); + } + } + + // Can not mutate flag which is not mutable + { + Env env{*this, features}; + MPTTester mptAlice(env, alice, {.holders = {bob}}); + + mptAlice.create({.ownerCount = 1}); + + auto const mutableFlags = { + tfMPTSetCanLock, + tfMPTClearCanLock, + tfMPTSetRequireAuth, + tfMPTClearRequireAuth, + tfMPTSetCanEscrow, + tfMPTClearCanEscrow, + tfMPTSetCanTrade, + tfMPTClearCanTrade, + tfMPTSetCanTransfer, + tfMPTClearCanTransfer, + tfMPTSetCanClawback, + tfMPTClearCanClawback}; + + for (auto const& mutableFlag : mutableFlags) + { + mptAlice.set( + {.account = alice, + .mutableFlags = mutableFlag, + .err = tecNO_PERMISSION}); + } + } + + // Metadata exceeding max length + { + Env env{*this, features}; + MPTTester mptAlice(env, alice, {.holders = {bob}}); + + mptAlice.create( + {.ownerCount = 1, .mutableFlags = tfMPTCanMutateMetadata}); + + std::string metadata(maxMPTokenMetadataLength + 1, 'a'); + mptAlice.set( + {.account = alice, .metadata = metadata, .err = temMALFORMED}); + } + + // Can not mutate metadata when it is not mutable + { + Env env{*this, features}; + MPTTester mptAlice(env, alice, {.holders = {bob}}); + + mptAlice.create({.ownerCount = 1}); + mptAlice.set( + {.account = alice, + .metadata = "test", + .err = tecNO_PERMISSION}); + } + + // Transfer fee exceeding the max value + { + Env env{*this, features}; + MPTTester mptAlice(env, alice, {.holders = {bob}}); + auto const mptID = makeMptID(env.seq(alice), alice); + + mptAlice.create( + {.ownerCount = 1, .mutableFlags = tfMPTCanMutateTransferFee}); + + mptAlice.set( + {.account = alice, + .id = mptID, + .transferFee = maxTransferFee + 1, + .err = temBAD_TRANSFER_FEE}); + } + + // Test setting non-zero transfer fee and clearing MPTCanTransfer at the + // same time + { + Env env{*this, features}; + MPTTester mptAlice(env, alice, {.holders = {bob}}); + + mptAlice.create( + {.transferFee = 100, + .ownerCount = 1, + .flags = tfMPTCanTransfer, + .mutableFlags = + tfMPTCanMutateTransferFee | tfMPTCanMutateCanTransfer}); + + // Can not set non-zero transfer fee and clear MPTCanTransfer at the + // same time + mptAlice.set( + {.account = alice, + .mutableFlags = tfMPTClearCanTransfer, + .transferFee = 1, + .err = temMALFORMED}); + + // Can set transfer fee to zero and clear MPTCanTransfer at the same + // time. tfMPTCanTransfer will be cleared and TransferFee field will + // be removed. + mptAlice.set( + {.account = alice, + .mutableFlags = tfMPTClearCanTransfer, + .transferFee = 0}); + BEAST_EXPECT(!mptAlice.isTransferFeePresent()); + } + + // Can not set non-zero transfer fee when MPTCanTransfer is not set + { + Env env{*this, features}; + MPTTester mptAlice(env, alice, {.holders = {bob}}); + + mptAlice.create( + {.ownerCount = 1, + .mutableFlags = + tfMPTCanMutateTransferFee | tfMPTCanMutateCanTransfer}); + + mptAlice.set( + {.account = alice, + .transferFee = 100, + .err = tecNO_PERMISSION}); + + // Can not set transfer fee even when trying to set MPTCanTransfer + // at the same time. MPTCanTransfer must be set first, then transfer + // fee can be set in a separate transaction. + mptAlice.set( + {.account = alice, + .mutableFlags = tfMPTSetCanTransfer, + .transferFee = 100, + .err = tecNO_PERMISSION}); + } + + // Can not mutate transfer fee when it is not mutable + { + Env env{*this, features}; + MPTTester mptAlice(env, alice, {.holders = {bob}}); + + mptAlice.create( + {.transferFee = 10, + .ownerCount = 1, + .flags = tfMPTCanTransfer}); + + mptAlice.set( + {.account = alice, + .transferFee = 100, + .err = tecNO_PERMISSION}); + + mptAlice.set( + {.account = alice, .transferFee = 0, .err = tecNO_PERMISSION}); + } + + // Set some flags mutable. Can not mutate the others + { + Env env{*this, features}; + MPTTester mptAlice(env, alice, {.holders = {bob}}); + + mptAlice.create( + {.ownerCount = 1, + .mutableFlags = tfMPTCanMutateCanTrade | + tfMPTCanMutateCanTransfer | tfMPTCanMutateMetadata}); + + // Can not mutate transfer fee + mptAlice.set( + {.account = alice, + .transferFee = 100, + .err = tecNO_PERMISSION}); + + auto const invalidFlags = { + tfMPTSetCanLock, + tfMPTClearCanLock, + tfMPTSetRequireAuth, + tfMPTClearRequireAuth, + tfMPTSetCanEscrow, + tfMPTClearCanEscrow, + tfMPTSetCanClawback, + tfMPTClearCanClawback}; + + // Can not mutate flags which are not mutable + for (auto const& mutableFlag : invalidFlags) + { + mptAlice.set( + {.account = alice, + .mutableFlags = mutableFlag, + .err = tecNO_PERMISSION}); + } + + // Can mutate MPTCanTrade + mptAlice.set({.account = alice, .mutableFlags = tfMPTSetCanTrade}); + mptAlice.set( + {.account = alice, .mutableFlags = tfMPTClearCanTrade}); + + // Can mutate MPTCanTransfer + mptAlice.set( + {.account = alice, .mutableFlags = tfMPTSetCanTransfer}); + mptAlice.set( + {.account = alice, .mutableFlags = tfMPTClearCanTransfer}); + + // Can mutate metadata + mptAlice.set({.account = alice, .metadata = "test"}); + mptAlice.set({.account = alice, .metadata = ""}); + } + } + + void + testMutateMPT(FeatureBitset features) + { + testcase("Mutate MPT"); + using namespace test::jtx; + + Account const alice("alice"); + + // Mutate metadata + { + Env env{*this, features}; + MPTTester mptAlice(env, alice); + mptAlice.create( + {.metadata = "test", + .ownerCount = 1, + .mutableFlags = tfMPTCanMutateMetadata}); + + std::vector metadatas = { + "mutate metadata", + "mutate metadata 2", + "mutate metadata 3", + "mutate metadata 3", + "test", + "mutate metadata"}; + + for (auto const& metadata : metadatas) + { + mptAlice.set({.account = alice, .metadata = metadata}); + BEAST_EXPECT(mptAlice.checkMetadata(metadata)); + } + + // Metadata being empty will remove the field + mptAlice.set({.account = alice, .metadata = ""}); + BEAST_EXPECT(!mptAlice.isMetadataPresent()); + } + + // Mutate transfer fee + { + Env env{*this, features}; + MPTTester mptAlice(env, alice); + mptAlice.create( + {.transferFee = 100, + .metadata = "test", + .ownerCount = 1, + .flags = tfMPTCanTransfer, + .mutableFlags = tfMPTCanMutateTransferFee}); + + for (std::uint16_t const fee : std::initializer_list{ + 1, 10, 100, 200, 500, 1000, maxTransferFee}) + { + mptAlice.set({.account = alice, .transferFee = fee}); + BEAST_EXPECT(mptAlice.checkTransferFee(fee)); + } + + // Setting TransferFee to zero will remove the field + mptAlice.set({.account = alice, .transferFee = 0}); + BEAST_EXPECT(!mptAlice.isTransferFeePresent()); + + // Set transfer fee again + mptAlice.set({.account = alice, .transferFee = 10}); + BEAST_EXPECT(mptAlice.checkTransferFee(10)); + } + + // Test flag toggling + { + auto testFlagToggle = [&](std::uint32_t createFlags, + std::uint32_t setFlags, + std::uint32_t clearFlags) { + Env env{*this, features}; + MPTTester mptAlice(env, alice); + + // Create the MPT object with the specified initial flags + mptAlice.create( + {.metadata = "test", + .ownerCount = 1, + .mutableFlags = createFlags}); + + // Set and clear the flag multiple times + mptAlice.set({.account = alice, .mutableFlags = setFlags}); + mptAlice.set({.account = alice, .mutableFlags = clearFlags}); + mptAlice.set({.account = alice, .mutableFlags = clearFlags}); + mptAlice.set({.account = alice, .mutableFlags = setFlags}); + mptAlice.set({.account = alice, .mutableFlags = setFlags}); + mptAlice.set({.account = alice, .mutableFlags = clearFlags}); + mptAlice.set({.account = alice, .mutableFlags = setFlags}); + mptAlice.set({.account = alice, .mutableFlags = clearFlags}); + }; + + testFlagToggle( + tfMPTCanMutateCanLock, tfMPTCanLock, tfMPTClearCanLock); + testFlagToggle( + tfMPTCanMutateRequireAuth, + tfMPTSetRequireAuth, + tfMPTClearRequireAuth); + testFlagToggle( + tfMPTCanMutateCanEscrow, + tfMPTSetCanEscrow, + tfMPTClearCanEscrow); + testFlagToggle( + tfMPTCanMutateCanTrade, tfMPTSetCanTrade, tfMPTClearCanTrade); + testFlagToggle( + tfMPTCanMutateCanTransfer, + tfMPTSetCanTransfer, + tfMPTClearCanTransfer); + testFlagToggle( + tfMPTCanMutateCanClawback, + tfMPTSetCanClawback, + tfMPTClearCanClawback); + } + } + + void + testMutateCanLock(FeatureBitset features) + { + testcase("Mutate MPTCanLock"); + using namespace test::jtx; + + Account const alice("alice"); + Account const bob("bob"); + + // Individual lock + { + Env env{*this, features}; + MPTTester mptAlice(env, alice, {.holders = {bob}}); + mptAlice.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanLock | tfMPTCanTransfer, + .mutableFlags = tfMPTCanMutateCanLock | + tfMPTCanMutateCanTrade | tfMPTCanMutateTransferFee}); + mptAlice.authorize({.account = bob, .holderCount = 1}); + + // Lock bob's mptoken + mptAlice.set({.account = alice, .holder = bob, .flags = tfMPTLock}); + + // Can mutate the mutable flags and fields + mptAlice.set({.account = alice, .mutableFlags = tfMPTClearCanLock}); + mptAlice.set({.account = alice, .mutableFlags = tfMPTSetCanLock}); + mptAlice.set({.account = alice, .mutableFlags = tfMPTClearCanLock}); + mptAlice.set({.account = alice, .mutableFlags = tfMPTSetCanTrade}); + mptAlice.set( + {.account = alice, .mutableFlags = tfMPTClearCanTrade}); + mptAlice.set({.account = alice, .transferFee = 200}); + } + + // Global lock + { + Env env{*this, features}; + MPTTester mptAlice(env, alice, {.holders = {bob}}); + mptAlice.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanLock, + .mutableFlags = tfMPTCanMutateCanLock | + tfMPTCanMutateCanClawback | tfMPTCanMutateMetadata}); + mptAlice.authorize({.account = bob, .holderCount = 1}); + + // Lock issuance + mptAlice.set({.account = alice, .flags = tfMPTLock}); + + // Can mutate the mutable flags and fields + mptAlice.set({.account = alice, .mutableFlags = tfMPTClearCanLock}); + mptAlice.set({.account = alice, .mutableFlags = tfMPTSetCanLock}); + mptAlice.set({.account = alice, .mutableFlags = tfMPTClearCanLock}); + mptAlice.set( + {.account = alice, .mutableFlags = tfMPTSetCanClawback}); + mptAlice.set( + {.account = alice, .mutableFlags = tfMPTClearCanClawback}); + mptAlice.set({.account = alice, .metadata = "mutate"}); + } + + // Test lock and unlock after mutating MPTCanLock + { + Env env{*this, features}; + MPTTester mptAlice(env, alice, {.holders = {bob}}); + mptAlice.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanLock, + .mutableFlags = tfMPTCanMutateCanLock | + tfMPTCanMutateCanClawback | tfMPTCanMutateMetadata}); + mptAlice.authorize({.account = bob, .holderCount = 1}); + + // Can lock and unlock + mptAlice.set({.account = alice, .flags = tfMPTLock}); + mptAlice.set({.account = alice, .holder = bob, .flags = tfMPTLock}); + mptAlice.set({.account = alice, .flags = tfMPTUnlock}); + mptAlice.set( + {.account = alice, .holder = bob, .flags = tfMPTUnlock}); + + // Clear lsfMPTCanLock + mptAlice.set({.account = alice, .mutableFlags = tfMPTClearCanLock}); + + // Can not lock or unlock + mptAlice.set( + {.account = alice, + .flags = tfMPTLock, + .err = tecNO_PERMISSION}); + mptAlice.set( + {.account = alice, + .flags = tfMPTUnlock, + .err = tecNO_PERMISSION}); + mptAlice.set( + {.account = alice, + .holder = bob, + .flags = tfMPTLock, + .err = tecNO_PERMISSION}); + mptAlice.set( + {.account = alice, + .holder = bob, + .flags = tfMPTUnlock, + .err = tecNO_PERMISSION}); + + // Set MPTCanLock again + mptAlice.set({.account = alice, .mutableFlags = tfMPTSetCanLock}); + + // Can lock and unlock again + mptAlice.set({.account = alice, .flags = tfMPTLock}); + mptAlice.set({.account = alice, .holder = bob, .flags = tfMPTLock}); + mptAlice.set({.account = alice, .flags = tfMPTUnlock}); + mptAlice.set( + {.account = alice, .holder = bob, .flags = tfMPTUnlock}); + } + } + + void + testMutateRequireAuth(FeatureBitset features) + { + testcase("Mutate MPTRequireAuth"); + using namespace test::jtx; + + Env env{*this, features}; + Account const alice("alice"); + Account const bob("bob"); + + MPTTester mptAlice(env, alice, {.holders = {bob}}); + mptAlice.create( + {.ownerCount = 1, + .flags = tfMPTRequireAuth, + .mutableFlags = tfMPTCanMutateRequireAuth}); + + mptAlice.authorize({.account = bob}); + mptAlice.authorize({.account = alice, .holder = bob}); + + // Pay to bob + mptAlice.pay(alice, bob, 1000); + + // Unauthorize bob + mptAlice.authorize( + {.account = alice, .holder = bob, .flags = tfMPTUnauthorize}); + + // Can not pay to bob + mptAlice.pay(bob, alice, 100, tecNO_AUTH); + + // Clear RequireAuth + mptAlice.set({.account = alice, .mutableFlags = tfMPTClearRequireAuth}); + + // Can pay to bob + mptAlice.pay(alice, bob, 1000); + + // Set RequireAuth again + mptAlice.set({.account = alice, .mutableFlags = tfMPTSetRequireAuth}); + + // Can not pay to bob since he is not authorized + mptAlice.pay(bob, alice, 100, tecNO_AUTH); + + // Authorize bob again + mptAlice.authorize({.account = alice, .holder = bob}); + + // Can pay to bob again + mptAlice.pay(alice, bob, 100); + } + + void + testMutateCanEscrow(FeatureBitset features) + { + testcase("Mutate MPTCanEscrow"); + using namespace test::jtx; + using namespace std::literals; + + Env env{*this, features}; + auto const baseFee = env.current()->fees().base; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const carol = Account("carol"); + + MPTTester mptAlice(env, alice, {.holders = {carol, bob}}); + mptAlice.create( + {.ownerCount = 1, + .holderCount = 0, + .flags = tfMPTCanTransfer, + .mutableFlags = tfMPTCanMutateCanEscrow}); + mptAlice.authorize({.account = carol}); + mptAlice.authorize({.account = bob}); + + auto const MPT = mptAlice["MPT"]; + env(pay(alice, carol, MPT(10'000))); + env(pay(alice, bob, MPT(10'000))); + env.close(); + + // MPTCanEscrow is not enabled + env(escrow::create(carol, bob, MPT(3)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecNO_PERMISSION)); + + // MPTCanEscrow is enabled now + mptAlice.set({.account = alice, .mutableFlags = tfMPTSetCanEscrow}); + env(escrow::create(carol, bob, MPT(3)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150)); + + // Clear MPTCanEscrow + mptAlice.set({.account = alice, .mutableFlags = tfMPTClearCanEscrow}); + env(escrow::create(carol, bob, MPT(3)), + escrow::condition(escrow::cb1), + escrow::finish_time(env.now() + 1s), + fee(baseFee * 150), + ter(tecNO_PERMISSION)); + } + + void + testMutateCanTransfer(FeatureBitset features) + { + testcase("Mutate MPTCanTransfer"); + + using namespace test::jtx; + Account const alice("alice"); + Account const bob("bob"); + Account const carol("carol"); + + { + Env env{*this, features}; + + MPTTester mptAlice(env, alice, {.holders = {bob, carol}}); + mptAlice.create( + {.ownerCount = 1, + .mutableFlags = + tfMPTCanMutateCanTransfer | tfMPTCanMutateTransferFee}); + + mptAlice.authorize({.account = bob}); + mptAlice.authorize({.account = carol}); + + // Pay to bob + mptAlice.pay(alice, bob, 1000); + + // Bob can not pay carol since MPTCanTransfer is not set + mptAlice.pay(bob, carol, 50, tecNO_AUTH); + + // Can not set non-zero transfer fee when MPTCanTransfer is not set + mptAlice.set( + {.account = alice, + .transferFee = 100, + .err = tecNO_PERMISSION}); + + // Can not set non-zero transfer fee even when trying to set + // MPTCanTransfer at the same time + mptAlice.set( + {.account = alice, + .mutableFlags = tfMPTSetCanTransfer, + .transferFee = 100, + .err = tecNO_PERMISSION}); + + // Alice sets MPTCanTransfer + mptAlice.set( + {.account = alice, .mutableFlags = tfMPTSetCanTransfer}); + + // Can set transfer fee now + BEAST_EXPECT(!mptAlice.isTransferFeePresent()); + mptAlice.set({.account = alice, .transferFee = 100}); + BEAST_EXPECT(mptAlice.isTransferFeePresent()); + + // Bob can pay carol + mptAlice.pay(bob, carol, 50); + + // Alice clears MPTCanTransfer + mptAlice.set( + {.account = alice, .mutableFlags = tfMPTClearCanTransfer}); + + // TransferFee field is removed when MPTCanTransfer is cleared + BEAST_EXPECT(!mptAlice.isTransferFeePresent()); + + // Bob can not pay + mptAlice.pay(bob, carol, 50, tecNO_AUTH); + } + + // Can set transfer fee to zero when MPTCanTransfer is not set, but + // tfMPTCanMutateTransferFee is set. + { + Env env{*this, features}; + + MPTTester mptAlice(env, alice, {.holders = {bob, carol}}); + mptAlice.create( + {.transferFee = 100, + .ownerCount = 1, + .flags = tfMPTCanTransfer, + .mutableFlags = + tfMPTCanMutateTransferFee | tfMPTCanMutateCanTransfer}); + + BEAST_EXPECT(mptAlice.checkTransferFee(100)); + + // Clear MPTCanTransfer and transfer fee is removed + mptAlice.set( + {.account = alice, .mutableFlags = tfMPTClearCanTransfer}); + BEAST_EXPECT(!mptAlice.isTransferFeePresent()); + + // Can still set transfer fee to zero, although it is already zero + mptAlice.set({.account = alice, .transferFee = 0}); + + // TransferFee field is still not present + BEAST_EXPECT(!mptAlice.isTransferFeePresent()); + } + } + + void + testMutateCanClawback(FeatureBitset features) + { + testcase("Mutate MPTCanClawback"); + + using namespace test::jtx; + Env env(*this, features); + Account const alice{"alice"}; + Account const bob{"bob"}; + + MPTTester mptAlice(env, alice, {.holders = {bob}}); + + mptAlice.create( + {.ownerCount = 1, + .holderCount = 0, + .mutableFlags = tfMPTCanMutateCanClawback}); + + // Bob creates an MPToken + mptAlice.authorize({.account = bob}); + + // Alice pays bob 100 tokens + mptAlice.pay(alice, bob, 100); + + // MPTCanClawback is not enabled + mptAlice.claw(alice, bob, 1, tecNO_PERMISSION); + + // Enable MPTCanClawback + mptAlice.set({.account = alice, .mutableFlags = tfMPTSetCanClawback}); + + // Can clawback now + mptAlice.claw(alice, bob, 1); + + // Clear MPTCanClawback + mptAlice.set({.account = alice, .mutableFlags = tfMPTClearCanClawback}); + + // Can not clawback + mptAlice.claw(alice, bob, 1, tecNO_PERMISSION); + } + public: void run() override @@ -2747,39 +3625,39 @@ public: // MPTokenIssuanceCreate testCreateValidation(all - featureSingleAssetVault); - testCreateValidation( - (all | featureSingleAssetVault) - featurePermissionedDomains); - testCreateValidation(all | featureSingleAssetVault); + testCreateValidation(all - featurePermissionedDomains); + testCreateValidation(all); testCreateEnabled(all - featureSingleAssetVault); - testCreateEnabled(all | featureSingleAssetVault); + testCreateEnabled(all); // MPTokenIssuanceDestroy testDestroyValidation(all - featureSingleAssetVault); - testDestroyValidation(all | featureSingleAssetVault); + testDestroyValidation(all); testDestroyEnabled(all - featureSingleAssetVault); - testDestroyEnabled(all | featureSingleAssetVault); + testDestroyEnabled(all); // MPTokenAuthorize testAuthorizeValidation(all - featureSingleAssetVault); - testAuthorizeValidation(all | featureSingleAssetVault); + testAuthorizeValidation(all); testAuthorizeEnabled(all - featureSingleAssetVault); - testAuthorizeEnabled(all | featureSingleAssetVault); + testAuthorizeEnabled(all); // MPTokenIssuanceSet + testSetValidation(all - featureSingleAssetVault - featureDynamicMPT); testSetValidation(all - featureSingleAssetVault); - testSetValidation( - (all | featureSingleAssetVault) - featurePermissionedDomains); - testSetValidation(all | featureSingleAssetVault); + testSetValidation(all - featureDynamicMPT); + testSetValidation(all - featurePermissionedDomains); + testSetValidation(all); testSetEnabled(all - featureSingleAssetVault); - testSetEnabled(all | featureSingleAssetVault); + testSetEnabled(all); // MPT clawback testClawbackValidation(all); testClawback(all); // Test Direct Payment - testPayment(all | featureSingleAssetVault); + testPayment(all); testDepositPreauth(all); testDepositPreauth(all - featureCredentials); @@ -2794,6 +3672,16 @@ public: // Test helpers testHelperFunctions(); + + // Dynamic MPT + testInvalidCreateDynamic(all); + testInvalidSetDynamic(all); + testMutateMPT(all); + testMutateCanLock(all); + testMutateRequireAuth(all); + testMutateCanEscrow(all); + testMutateCanTransfer(all); + testMutateCanClawback(all); } }; diff --git a/src/test/jtx/impl/mpt.cpp b/src/test/jtx/impl/mpt.cpp index 9f7a611feb..f35b1b1ebb 100644 --- a/src/test/jtx/impl/mpt.cpp +++ b/src/test/jtx/impl/mpt.cpp @@ -102,6 +102,8 @@ MPTTester::create(MPTCreate const& arg) jv[sfMaximumAmount] = std::to_string(*arg.maxAmt); if (arg.domainID) jv[sfDomainID] = to_string(*arg.domainID); + if (arg.mutableFlags) + jv[sfMutableFlags] = *arg.mutableFlags; if (submit(arg, jv) != tesSUCCESS) { // Verify issuance doesn't exist @@ -240,19 +242,59 @@ MPTTester::set(MPTSet const& arg) jv[sfDelegate] = arg.delegate->human(); if (arg.domainID) jv[sfDomainID] = to_string(*arg.domainID); - if (submit(arg, jv) == tesSUCCESS && arg.flags.value_or(0)) + if (arg.mutableFlags) + jv[sfMutableFlags] = *arg.mutableFlags; + if (arg.transferFee) + jv[sfTransferFee] = *arg.transferFee; + if (arg.metadata) + jv[sfMPTokenMetadata] = strHex(*arg.metadata); + if (submit(arg, jv) == tesSUCCESS && (arg.flags || arg.mutableFlags)) { auto require = [&](std::optional const& holder, bool unchanged) { auto flags = getFlags(holder); if (!unchanged) { - if (*arg.flags & tfMPTLock) - flags |= lsfMPTLocked; - else if (*arg.flags & tfMPTUnlock) - flags &= ~lsfMPTLocked; - else - Throw("Invalid flags"); + if (arg.flags) + { + if (*arg.flags & tfMPTLock) + flags |= lsfMPTLocked; + else if (*arg.flags & tfMPTUnlock) + flags &= ~lsfMPTLocked; + } + + if (arg.mutableFlags) + { + if (*arg.mutableFlags & tfMPTSetCanLock) + flags |= lsfMPTCanLock; + else if (*arg.mutableFlags & tfMPTClearCanLock) + flags &= ~lsfMPTCanLock; + + if (*arg.mutableFlags & tfMPTSetRequireAuth) + flags |= lsfMPTRequireAuth; + else if (*arg.mutableFlags & tfMPTClearRequireAuth) + flags &= ~lsfMPTRequireAuth; + + if (*arg.mutableFlags & tfMPTSetCanEscrow) + flags |= lsfMPTCanEscrow; + else if (*arg.mutableFlags & tfMPTClearCanEscrow) + flags &= ~lsfMPTCanEscrow; + + if (*arg.mutableFlags & tfMPTSetCanClawback) + flags |= lsfMPTCanClawback; + else if (*arg.mutableFlags & tfMPTClearCanClawback) + flags &= ~lsfMPTCanClawback; + + if (*arg.mutableFlags & tfMPTSetCanTrade) + flags |= lsfMPTCanTrade; + else if (*arg.mutableFlags & tfMPTClearCanTrade) + flags &= ~lsfMPTCanTrade; + + if (*arg.mutableFlags & tfMPTSetCanTransfer) + flags |= lsfMPTCanTransfer; + else if (*arg.mutableFlags & tfMPTClearCanTransfer) + flags &= ~lsfMPTCanTransfer; + } } env_.require(mptflags(*this, flags, holder)); }; @@ -313,6 +355,43 @@ MPTTester::checkFlags( return expectedFlags == getFlags(holder); } +[[nodiscard]] bool +MPTTester::checkMetadata(std::string const& metadata) const +{ + return forObject([&](SLEP const& sle) -> bool { + if (sle->isFieldPresent(sfMPTokenMetadata)) + return strHex(sle->getFieldVL(sfMPTokenMetadata)) == + strHex(metadata); + return false; + }); +} + +[[nodiscard]] bool +MPTTester::isMetadataPresent() const +{ + return forObject([&](SLEP const& sle) -> bool { + return sle->isFieldPresent(sfMPTokenMetadata); + }); +} + +[[nodiscard]] bool +MPTTester::checkTransferFee(std::uint16_t transferFee) const +{ + return forObject([&](SLEP const& sle) -> bool { + if (sle->isFieldPresent(sfTransferFee)) + return sle->getFieldU16(sfTransferFee) == transferFee; + return false; + }); +} + +[[nodiscard]] bool +MPTTester::isTransferFeePresent() const +{ + return forObject([&](SLEP const& sle) -> bool { + return sle->isFieldPresent(sfTransferFee); + }); +} + void MPTTester::pay( Account const& src, diff --git a/src/test/jtx/mpt.h b/src/test/jtx/mpt.h index 4756ca723d..2eacac68ec 100644 --- a/src/test/jtx/mpt.h +++ b/src/test/jtx/mpt.h @@ -106,6 +106,7 @@ struct MPTCreate std::optional holderCount = std::nullopt; bool fund = true; std::optional flags = {0}; + std::optional mutableFlags = std::nullopt; std::optional domainID = std::nullopt; std::optional err = std::nullopt; }; @@ -139,6 +140,9 @@ struct MPTSet std::optional ownerCount = std::nullopt; std::optional holderCount = std::nullopt; std::optional flags = std::nullopt; + std::optional mutableFlags = std::nullopt; + std::optional transferFee = std::nullopt; + std::optional metadata = std::nullopt; std::optional delegate = std::nullopt; std::optional domainID = std::nullopt; std::optional err = std::nullopt; @@ -182,6 +186,18 @@ public: uint32_t const expectedFlags, std::optional const& holder = std::nullopt) const; + [[nodiscard]] bool + checkMetadata(std::string const& metadata) const; + + [[nodiscard]] bool + isMetadataPresent() const; + + [[nodiscard]] bool + checkTransferFee(std::uint16_t transferFee) const; + + [[nodiscard]] bool + isTransferFeePresent() const; + Account const& issuer() const { diff --git a/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.cpp b/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.cpp index da3b57c8fe..6a6e598f42 100644 --- a/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.cpp +++ b/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.cpp @@ -36,9 +36,17 @@ MPTokenIssuanceCreate::preflight(PreflightContext const& ctx) ctx.rules.enabled(featureSingleAssetVault))) return temDISABLED; + if (ctx.tx.isFieldPresent(sfMutableFlags) && + !ctx.rules.enabled(featureDynamicMPT)) + return temDISABLED; + if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) return ret; + if (auto const mutableFlags = ctx.tx[~sfMutableFlags]; mutableFlags && + (!*mutableFlags || *mutableFlags & tfMPTokenIssuanceCreateMutableMask)) + return temINVALID_FLAG; + if (ctx.tx.getFlags() & tfMPTokenIssuanceCreateMask) return temINVALID_FLAG; @@ -132,6 +140,9 @@ MPTokenIssuanceCreate::create( if (args.domainId) (*mptIssuance)[sfDomainID] = *args.domainId; + if (args.mutableFlags) + (*mptIssuance)[sfMutableFlags] = *args.mutableFlags; + view.insert(mptIssuance); } @@ -158,6 +169,7 @@ MPTokenIssuanceCreate::doApply() .transferFee = tx[~sfTransferFee], .metadata = tx[~sfMPTokenMetadata], .domainId = tx[~sfDomainID], + .mutableFlags = tx[~sfMutableFlags], }); return result ? tesSUCCESS : result.error(); } diff --git a/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.h b/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.h index ea01908dff..0527b9602f 100644 --- a/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.h +++ b/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.h @@ -38,6 +38,7 @@ struct MPTCreateArgs std::optional transferFee{}; std::optional const& metadata{}; std::optional domainId{}; + std::optional mutableFlags{}; }; class MPTokenIssuanceCreate : public Transactor diff --git a/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp b/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp index e05862af37..83b771c705 100644 --- a/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp +++ b/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp @@ -26,6 +26,24 @@ namespace ripple { +// Maps set/clear mutable flags in an MPTokenIssuanceSet transaction to the +// corresponding ledger mutable flags that control whether the change is +// allowed. +struct MPTMutabilityFlags +{ + std::uint32_t setFlag; + std::uint32_t clearFlag; + std::uint32_t canMutateFlag; +}; + +static constexpr std::array mptMutabilityFlags = { + {{tfMPTSetCanLock, tfMPTClearCanLock, lsfMPTCanMutateCanLock}, + {tfMPTSetRequireAuth, tfMPTClearRequireAuth, lsfMPTCanMutateRequireAuth}, + {tfMPTSetCanEscrow, tfMPTClearCanEscrow, lsfMPTCanMutateCanEscrow}, + {tfMPTSetCanTrade, tfMPTClearCanTrade, lsfMPTCanMutateCanTrade}, + {tfMPTSetCanTransfer, tfMPTClearCanTransfer, lsfMPTCanMutateCanTransfer}, + {tfMPTSetCanClawback, tfMPTClearCanClawback, lsfMPTCanMutateCanClawback}}}; + NotTEC MPTokenIssuanceSet::preflight(PreflightContext const& ctx) { @@ -37,6 +55,14 @@ MPTokenIssuanceSet::preflight(PreflightContext const& ctx) ctx.rules.enabled(featureSingleAssetVault))) return temDISABLED; + auto const mutableFlags = ctx.tx[~sfMutableFlags]; + auto const metadata = ctx.tx[~sfMPTokenMetadata]; + auto const transferFee = ctx.tx[~sfTransferFee]; + auto const isMutate = mutableFlags || metadata || transferFee; + + if (isMutate && !ctx.rules.enabled(featureDynamicMPT)) + return temDISABLED; + if (ctx.tx.isFieldPresent(sfDomainID) && ctx.tx.isFieldPresent(sfHolder)) return temMALFORMED; @@ -57,13 +83,54 @@ MPTokenIssuanceSet::preflight(PreflightContext const& ctx) if (holderID && accountID == holderID) return temMALFORMED; - if (ctx.rules.enabled(featureSingleAssetVault)) + if (ctx.rules.enabled(featureSingleAssetVault) || + ctx.rules.enabled(featureDynamicMPT)) { // Is this transaction actually changing anything ? - if (txFlags == 0 && !ctx.tx.isFieldPresent(sfDomainID)) + if (txFlags == 0 && !ctx.tx.isFieldPresent(sfDomainID) && !isMutate) return temMALFORMED; } + if (ctx.rules.enabled(featureDynamicMPT)) + { + // Holder field is not allowed when mutating MPTokenIssuance + if (isMutate && holderID) + return temMALFORMED; + + // Can not set flags when mutating MPTokenIssuance + if (isMutate && (txFlags & tfUniversalMask)) + return temMALFORMED; + + if (transferFee && *transferFee > maxTransferFee) + return temBAD_TRANSFER_FEE; + + if (metadata && metadata->length() > maxMPTokenMetadataLength) + return temMALFORMED; + + if (mutableFlags) + { + if (!*mutableFlags || + (*mutableFlags & tfMPTokenIssuanceSetMutableMask)) + return temINVALID_FLAG; + + // Can not set and clear the same flag + if (std::any_of( + mptMutabilityFlags.begin(), + mptMutabilityFlags.end(), + [mutableFlags](auto const& f) { + return (*mutableFlags & f.setFlag) && + (*mutableFlags & f.clearFlag); + })) + return temINVALID_FLAG; + + // Trying to set a non-zero TransferFee and clear MPTCanTransfer + // in the same transaction is not allowed. + if (transferFee.value_or(0) && + (*mutableFlags & tfMPTClearCanTransfer)) + return temMALFORMED; + } + } + return preflight2(ctx); } @@ -116,7 +183,8 @@ MPTokenIssuanceSet::preclaim(PreclaimContext const& ctx) if (!sleMptIssuance->isFlag(lsfMPTCanLock)) { // For readability two separate `if` rather than `||` of two conditions - if (!ctx.view.rules().enabled(featureSingleAssetVault)) + if (!ctx.view.rules().enabled(featureSingleAssetVault) && + !ctx.view.rules().enabled(featureDynamicMPT)) return tecNO_PERMISSION; else if (ctx.tx.isFlag(tfMPTLock) || ctx.tx.isFlag(tfMPTUnlock)) return tecNO_PERMISSION; @@ -152,6 +220,44 @@ MPTokenIssuanceSet::preclaim(PreclaimContext const& ctx) } } + // sfMutableFlags is soeDEFAULT, defaulting to 0 if not specified on + // the ledger. + auto const currentMutableFlags = + sleMptIssuance->getFieldU32(sfMutableFlags); + + auto isMutableFlag = [&](std::uint32_t mutableFlag) -> bool { + return currentMutableFlags & mutableFlag; + }; + + if (auto const mutableFlags = ctx.tx[~sfMutableFlags]) + { + if (std::any_of( + mptMutabilityFlags.begin(), + mptMutabilityFlags.end(), + [mutableFlags, &isMutableFlag](auto const& f) { + return !isMutableFlag(f.canMutateFlag) && + ((*mutableFlags & (f.setFlag | f.clearFlag))); + })) + return tecNO_PERMISSION; + } + + if (!isMutableFlag(lsfMPTCanMutateMetadata) && + ctx.tx.isFieldPresent(sfMPTokenMetadata)) + return tecNO_PERMISSION; + + if (auto const fee = ctx.tx[~sfTransferFee]) + { + // A non-zero TransferFee is only valid if the lsfMPTCanTransfer flag + // was previously enabled (at issuance or via a prior mutation). Setting + // it by tfMPTSetCanTransfer in the current transaction does not meet + // this requirement. + if (fee > 0u && !sleMptIssuance->isFlag(lsfMPTCanTransfer)) + return tecNO_PERMISSION; + + if (!isMutableFlag(lsfMPTCanMutateTransferFee)) + return tecNO_PERMISSION; + } + return tesSUCCESS; } @@ -180,9 +286,47 @@ MPTokenIssuanceSet::doApply() else if (txFlags & tfMPTUnlock) flagsOut &= ~lsfMPTLocked; + if (auto const mutableFlags = ctx_.tx[~sfMutableFlags].value_or(0)) + { + for (auto const& f : mptMutabilityFlags) + { + if (mutableFlags & f.setFlag) + flagsOut |= f.canMutateFlag; + else if (mutableFlags & f.clearFlag) + flagsOut &= ~f.canMutateFlag; + } + + if (mutableFlags & tfMPTClearCanTransfer) + { + // If the lsfMPTCanTransfer flag is being cleared, then also clear + // the TransferFee field. + sle->makeFieldAbsent(sfTransferFee); + } + } + if (flagsIn != flagsOut) sle->setFieldU32(sfFlags, flagsOut); + if (auto const transferFee = ctx_.tx[~sfTransferFee]) + { + // TransferFee uses soeDEFAULT style: + // - If the field is absent, it is interpreted as 0. + // - If the field is present, it must be non-zero. + // Therefore, when TransferFee is 0, the field should be removed. + if (transferFee == 0) + sle->makeFieldAbsent(sfTransferFee); + else + sle->setFieldU16(sfTransferFee, *transferFee); + } + + if (auto const metadata = ctx_.tx[~sfMPTokenMetadata]) + { + if (metadata->empty()) + sle->makeFieldAbsent(sfMPTokenMetadata); + else + sle->setFieldVL(sfMPTokenMetadata, *metadata); + } + if (domainID) { // This is enforced in preflight. From 17a2606591cf4f45b03b128630895ebef66b42bd Mon Sep 17 00:00:00 2001 From: Vito Tumas <5780819+Tapanito@users.noreply.github.com> Date: Tue, 16 Sep 2025 11:51:55 +0200 Subject: [PATCH 185/244] Bugfix: Adds graceful peer disconnection (#5669) The XRPL establishes connections in three stages: first a TCP connection, then a TLS/SSL handshake to secure the connection, and finally an upgrade to the bespoke XRP Ledger peer-to-peer protocol. During connection termination, xrpld directly closes the TCP connection, bypassing the TLS/SSL shutdown handshake. This makes peer disconnection diagnostics more difficult - abrupt TCP termination appears as if the peer crashed rather than disconnected gracefully. This change refactors the connection lifecycle with the following changes: - Enhanced outgoing connection logic with granular timeouts for each connection stage (TCP, TLS, XRPL handshake) to improve diagnostic capabilities - Updated both PeerImp and ConnectAttempt to use proper asynchronous TLS shutdown procedures for graceful connection termination --- src/xrpld/overlay/detail/ConnectAttempt.cpp | 488 ++++++++++++++------ src/xrpld/overlay/detail/ConnectAttempt.h | 208 ++++++++- src/xrpld/overlay/detail/PeerImp.cpp | 397 +++++++++------- src/xrpld/overlay/detail/PeerImp.h | 208 ++++++++- 4 files changed, 966 insertions(+), 335 deletions(-) diff --git a/src/xrpld/overlay/detail/ConnectAttempt.cpp b/src/xrpld/overlay/detail/ConnectAttempt.cpp index 397ac06ba6..c1bc4bb069 100644 --- a/src/xrpld/overlay/detail/ConnectAttempt.cpp +++ b/src/xrpld/overlay/detail/ConnectAttempt.cpp @@ -24,6 +24,8 @@ #include +#include + namespace ripple { ConnectAttempt::ConnectAttempt( @@ -45,6 +47,7 @@ ConnectAttempt::ConnectAttempt( , usage_(usage) , strand_(boost::asio::make_strand(io_context)) , timer_(io_context) + , stepTimer_(io_context) , stream_ptr_(std::make_unique( socket_type(std::forward(io_context)), *context)) @@ -52,14 +55,14 @@ ConnectAttempt::ConnectAttempt( , stream_(*stream_ptr_) , slot_(slot) { - JLOG(journal_.debug()) << "Connect " << remote_endpoint; } ConnectAttempt::~ConnectAttempt() { + // slot_ will be null if we successfully connected + // and transferred ownership to a PeerImp if (slot_ != nullptr) overlay_.peerFinder().on_closed(slot_); - JLOG(journal_.trace()) << "~ConnectAttempt"; } void @@ -68,16 +71,29 @@ ConnectAttempt::stop() if (!strand_.running_in_this_thread()) return boost::asio::post( strand_, std::bind(&ConnectAttempt::stop, shared_from_this())); - if (socket_.is_open()) - { - JLOG(journal_.debug()) << "Stop"; - } - close(); + + if (!socket_.is_open()) + return; + + JLOG(journal_.debug()) << "stop: Stop"; + + shutdown(); } void ConnectAttempt::run() { + if (!strand_.running_in_this_thread()) + return boost::asio::post( + strand_, std::bind(&ConnectAttempt::run, shared_from_this())); + + JLOG(journal_.debug()) << "run: connecting to " << remote_endpoint_; + + ioPending_ = true; + + // Allow up to connectTimeout_ seconds to establish remote peer connection + setTimer(ConnectionStep::TcpConnect); + stream_.next_layer().async_connect( remote_endpoint_, boost::asio::bind_executor( @@ -90,61 +106,177 @@ ConnectAttempt::run() //------------------------------------------------------------------------------ +void +ConnectAttempt::shutdown() +{ + XRPL_ASSERT( + strand_.running_in_this_thread(), + "ripple::ConnectAttempt::shutdown: strand in this thread"); + + if (!socket_.is_open()) + return; + + shutdown_ = true; + boost::beast::get_lowest_layer(stream_).cancel(); + + tryAsyncShutdown(); +} + +void +ConnectAttempt::tryAsyncShutdown() +{ + XRPL_ASSERT( + strand_.running_in_this_thread(), + "ripple::ConnectAttempt::tryAsyncShutdown : strand in this thread"); + + if (!shutdown_ || currentStep_ == ConnectionStep::ShutdownStarted) + return; + + if (ioPending_) + return; + + // gracefully shutdown the SSL socket, performing a shutdown handshake + if (currentStep_ != ConnectionStep::TcpConnect && + currentStep_ != ConnectionStep::TlsHandshake) + { + setTimer(ConnectionStep::ShutdownStarted); + return stream_.async_shutdown(bind_executor( + strand_, + std::bind( + &ConnectAttempt::onShutdown, + shared_from_this(), + std::placeholders::_1))); + } + + close(); +} + +void +ConnectAttempt::onShutdown(error_code ec) +{ + cancelTimer(); + + if (ec) + { + // - eof: the stream was cleanly closed + // - operation_aborted: an expired timer (slow shutdown) + // - stream_truncated: the tcp connection closed (no handshake) it could + // occur if a peer does not perform a graceful disconnect + // - broken_pipe: the peer is gone + // - application data after close notify: benign SSL shutdown condition + bool shouldLog = + (ec != boost::asio::error::eof && + ec != boost::asio::error::operation_aborted && + ec.message().find("application data after close notify") == + std::string::npos); + + if (shouldLog) + { + JLOG(journal_.debug()) << "onShutdown: " << ec.message(); + } + } + + close(); +} + void ConnectAttempt::close() { XRPL_ASSERT( strand_.running_in_this_thread(), "ripple::ConnectAttempt::close : strand in this thread"); - if (socket_.is_open()) - { - try - { - timer_.cancel(); - socket_.close(); - } - catch (boost::system::system_error const&) - { - // ignored - } + if (!socket_.is_open()) + return; - JLOG(journal_.debug()) << "Closed"; - } + cancelTimer(); + + error_code ec; + socket_.close(ec); } void ConnectAttempt::fail(std::string const& reason) { JLOG(journal_.debug()) << reason; - close(); + shutdown(); } void ConnectAttempt::fail(std::string const& name, error_code ec) { JLOG(journal_.debug()) << name << ": " << ec.message(); - close(); + shutdown(); } void -ConnectAttempt::setTimer() +ConnectAttempt::setTimer(ConnectionStep step) { - try + currentStep_ = step; + + // Set global timer (only if not already set) + if (timer_.expiry() == std::chrono::steady_clock::time_point{}) { - timer_.expires_after(std::chrono::seconds(15)); - } - catch (boost::system::system_error const& e) - { - JLOG(journal_.error()) << "setTimer: " << e.code(); - return; + try + { + timer_.expires_after(connectTimeout); + timer_.async_wait(boost::asio::bind_executor( + strand_, + std::bind( + &ConnectAttempt::onTimer, + shared_from_this(), + std::placeholders::_1))); + } + catch (std::exception const& ex) + { + JLOG(journal_.error()) << "setTimer (global): " << ex.what(); + return close(); + } } - timer_.async_wait(boost::asio::bind_executor( - strand_, - std::bind( - &ConnectAttempt::onTimer, - shared_from_this(), - std::placeholders::_1))); + // Set step-specific timer + try + { + std::chrono::seconds stepTimeout; + switch (step) + { + case ConnectionStep::TcpConnect: + stepTimeout = StepTimeouts::tcpConnect; + break; + case ConnectionStep::TlsHandshake: + stepTimeout = StepTimeouts::tlsHandshake; + break; + case ConnectionStep::HttpWrite: + stepTimeout = StepTimeouts::httpWrite; + break; + case ConnectionStep::HttpRead: + stepTimeout = StepTimeouts::httpRead; + break; + case ConnectionStep::ShutdownStarted: + stepTimeout = StepTimeouts::tlsShutdown; + break; + case ConnectionStep::Complete: + case ConnectionStep::Init: + return; // No timer needed for init or complete step + } + + // call to expires_after cancels previous timer + stepTimer_.expires_after(stepTimeout); + stepTimer_.async_wait(boost::asio::bind_executor( + strand_, + std::bind( + &ConnectAttempt::onTimer, + shared_from_this(), + std::placeholders::_1))); + + JLOG(journal_.trace()) << "setTimer: " << stepToString(step) + << " timeout=" << stepTimeout.count() << "s"; + } + catch (std::exception const& ex) + { + JLOG(journal_.error()) + << "setTimer (step " << stepToString(step) << "): " << ex.what(); + return close(); + } } void @@ -153,6 +285,7 @@ ConnectAttempt::cancelTimer() try { timer_.cancel(); + stepTimer_.cancel(); } catch (boost::system::system_error const&) { @@ -165,34 +298,69 @@ ConnectAttempt::onTimer(error_code ec) { if (!socket_.is_open()) return; - if (ec == boost::asio::error::operation_aborted) - return; + if (ec) { + // do not initiate shutdown, timers are frequently cancelled + if (ec == boost::asio::error::operation_aborted) + return; + // This should never happen JLOG(journal_.error()) << "onTimer: " << ec.message(); return close(); } - fail("Timeout"); + + // Determine which timer expired by checking their expiry times + auto const now = std::chrono::steady_clock::now(); + bool globalExpired = (timer_.expiry() <= now); + bool stepExpired = (stepTimer_.expiry() <= now); + + if (globalExpired) + { + JLOG(journal_.debug()) + << "onTimer: Global timeout; step: " << stepToString(currentStep_); + } + else if (stepExpired) + { + JLOG(journal_.debug()) + << "onTimer: Step timeout; step: " << stepToString(currentStep_); + } + else + { + JLOG(journal_.warn()) << "onTimer: Unexpected timer callback"; + } + + close(); } void ConnectAttempt::onConnect(error_code ec) { - cancelTimer(); + ioPending_ = false; - if (ec == boost::asio::error::operation_aborted) - return; - endpoint_type local_endpoint; - if (!ec) - local_endpoint = socket_.local_endpoint(ec); if (ec) + { + if (ec == boost::asio::error::operation_aborted) + return tryAsyncShutdown(); + return fail("onConnect", ec); + } + if (!socket_.is_open()) return; - JLOG(journal_.trace()) << "onConnect"; - setTimer(); + // check if connection has really been established + socket_.local_endpoint(ec); + if (ec) + return fail("onConnect", ec); + + if (shutdown_) + return tryAsyncShutdown(); + + ioPending_ = true; + + setTimer(ConnectionStep::TlsHandshake); + stream_.set_verify_mode(boost::asio::ssl::verify_none); stream_.async_handshake( boost::asio::ssl::stream_base::client, @@ -207,25 +375,30 @@ ConnectAttempt::onConnect(error_code ec) void ConnectAttempt::onHandshake(error_code ec) { - cancelTimer(); - if (!socket_.is_open()) - return; - if (ec == boost::asio::error::operation_aborted) - return; - endpoint_type local_endpoint; - if (!ec) - local_endpoint = socket_.local_endpoint(ec); + ioPending_ = false; + + if (ec) + { + if (ec == boost::asio::error::operation_aborted) + return tryAsyncShutdown(); + + return fail("onHandshake", ec); + } + + auto const local_endpoint = socket_.local_endpoint(ec); if (ec) return fail("onHandshake", ec); - JLOG(journal_.trace()) << "onHandshake"; + setTimer(ConnectionStep::HttpWrite); + + // check if we connected to ourselves if (!overlay_.peerFinder().onConnected( slot_, beast::IPAddressConversion::from_asio(local_endpoint))) - return fail("Duplicate connection"); + return fail("Self connection"); auto const sharedValue = makeSharedValue(*stream_ptr_, journal_); if (!sharedValue) - return close(); // makeSharedValue logs + return shutdown(); // makeSharedValue logs req_ = makeRequest( !overlay_.peerFinder().config().peerPrivate, @@ -242,7 +415,11 @@ ConnectAttempt::onHandshake(error_code ec) remote_endpoint_.address(), app_); - setTimer(); + if (shutdown_) + return tryAsyncShutdown(); + + ioPending_ = true; + boost::beast::http::async_write( stream_, req_, @@ -257,13 +434,23 @@ ConnectAttempt::onHandshake(error_code ec) void ConnectAttempt::onWrite(error_code ec) { - cancelTimer(); - if (!socket_.is_open()) - return; - if (ec == boost::asio::error::operation_aborted) - return; + ioPending_ = false; + if (ec) + { + if (ec == boost::asio::error::operation_aborted) + return tryAsyncShutdown(); + return fail("onWrite", ec); + } + + if (shutdown_) + return tryAsyncShutdown(); + + ioPending_ = true; + + setTimer(ConnectionStep::HttpRead); + boost::beast::http::async_read( stream_, read_buf_, @@ -280,39 +467,27 @@ void ConnectAttempt::onRead(error_code ec) { cancelTimer(); + ioPending_ = false; + currentStep_ = ConnectionStep::Complete; - if (!socket_.is_open()) - return; - if (ec == boost::asio::error::operation_aborted) - return; - if (ec == boost::asio::error::eof) - { - JLOG(journal_.info()) << "EOF"; - setTimer(); - return stream_.async_shutdown(boost::asio::bind_executor( - strand_, - std::bind( - &ConnectAttempt::onShutdown, - shared_from_this(), - std::placeholders::_1))); - } if (ec) - return fail("onRead", ec); - processResponse(); -} - -void -ConnectAttempt::onShutdown(error_code ec) -{ - cancelTimer(); - if (!ec) { - JLOG(journal_.error()) << "onShutdown: expected error condition"; - return close(); + if (ec == boost::asio::error::eof) + { + JLOG(journal_.debug()) << "EOF"; + return shutdown(); + } + + if (ec == boost::asio::error::operation_aborted) + return tryAsyncShutdown(); + + return fail("onRead", ec); } - if (ec != boost::asio::error::eof) - return fail("onShutdown", ec); - close(); + + if (shutdown_) + return tryAsyncShutdown(); + + processResponse(); } //-------------------------------------------------------------------------- @@ -320,48 +495,69 @@ ConnectAttempt::onShutdown(error_code ec) void ConnectAttempt::processResponse() { - if (response_.result() == boost::beast::http::status::service_unavailable) - { - Json::Value json; - Json::Reader r; - std::string s; - s.reserve(boost::asio::buffer_size(response_.body().data())); - for (auto const buffer : response_.body().data()) - s.append( - static_cast(buffer.data()), - boost::asio::buffer_size(buffer)); - auto const success = r.parse(s, json); - if (success) - { - if (json.isObject() && json.isMember("peer-ips")) - { - Json::Value const& ips = json["peer-ips"]; - if (ips.isArray()) - { - std::vector eps; - eps.reserve(ips.size()); - for (auto const& v : ips) - { - if (v.isString()) - { - error_code ec; - auto const ep = parse_endpoint(v.asString(), ec); - if (!ec) - eps.push_back(ep); - } - } - overlay_.peerFinder().onRedirects(remote_endpoint_, eps); - } - } - } - } - if (!OverlayImpl::isPeerUpgrade(response_)) { - JLOG(journal_.info()) - << "Unable to upgrade to peer protocol: " << response_.result() - << " (" << response_.reason() << ")"; - return close(); + // A peer may respond with service_unavailable and a list of alternative + // peers to connect to, a differing status code is unexpected + if (response_.result() != + boost::beast::http::status::service_unavailable) + { + JLOG(journal_.warn()) + << "Unable to upgrade to peer protocol: " << response_.result() + << " (" << response_.reason() << ")"; + return shutdown(); + } + + // Parse response body to determine if this is a redirect or other + // service unavailable + std::string responseBody; + responseBody.reserve(boost::asio::buffer_size(response_.body().data())); + for (auto const buffer : response_.body().data()) + responseBody.append( + static_cast(buffer.data()), + boost::asio::buffer_size(buffer)); + + Json::Value json; + Json::Reader reader; + auto const isValidJson = reader.parse(responseBody, json); + + // Check if this is a redirect response (contains peer-ips field) + auto const isRedirect = + isValidJson && json.isObject() && json.isMember("peer-ips"); + + if (!isRedirect) + { + JLOG(journal_.warn()) + << "processResponse: " << remote_endpoint_ + << " failed to upgrade to peer protocol: " << response_.result() + << " (" << response_.reason() << ")"; + + return shutdown(); + } + + Json::Value const& peerIps = json["peer-ips"]; + if (!peerIps.isArray()) + return fail("processResponse: invalid peer-ips format"); + + // Extract and validate peer endpoints + std::vector redirectEndpoints; + redirectEndpoints.reserve(peerIps.size()); + + for (auto const& ipValue : peerIps) + { + if (!ipValue.isString()) + continue; + + error_code ec; + auto const endpoint = parse_endpoint(ipValue.asString(), ec); + if (!ec) + redirectEndpoints.push_back(endpoint); + } + + // Notify PeerFinder about the redirect redirectEndpoints may be empty + overlay_.peerFinder().onRedirects(remote_endpoint_, redirectEndpoints); + + return fail("processResponse: failed to connect to peer: redirected"); } // Just because our peer selected a particular protocol version doesn't @@ -381,11 +577,11 @@ ConnectAttempt::processResponse() auto const sharedValue = makeSharedValue(*stream_ptr_, journal_); if (!sharedValue) - return close(); // makeSharedValue logs + return shutdown(); // makeSharedValue logs try { - auto publicKey = verifyHandshake( + auto const publicKey = verifyHandshake( response_, *sharedValue, overlay_.setup().networkID, @@ -393,11 +589,10 @@ ConnectAttempt::processResponse() remote_endpoint_.address(), app_); - JLOG(journal_.info()) - << "Public Key: " << toBase58(TokenType::NodePublic, publicKey); - JLOG(journal_.debug()) << "Protocol: " << to_string(*negotiatedProtocol); + JLOG(journal_.info()) + << "Public Key: " << toBase58(TokenType::NodePublic, publicKey); auto const member = app_.cluster().member(publicKey); if (member) @@ -405,10 +600,21 @@ ConnectAttempt::processResponse() JLOG(journal_.info()) << "Cluster name: " << *member; } - auto const result = overlay_.peerFinder().activate( - slot_, publicKey, static_cast(member)); + auto const result = + overlay_.peerFinder().activate(slot_, publicKey, !member->empty()); if (result != PeerFinder::Result::success) - return fail("Outbound " + std::string(to_string(result))); + { + std::stringstream ss; + ss << "Outbound Connect Attempt " << remote_endpoint_ << " " + << to_string(result); + return fail(ss.str()); + } + + if (!socket_.is_open()) + return; + + if (shutdown_) + return tryAsyncShutdown(); auto const peer = std::make_shared( app_, diff --git a/src/xrpld/overlay/detail/ConnectAttempt.h b/src/xrpld/overlay/detail/ConnectAttempt.h index febbe88f45..38b9482d9d 100644 --- a/src/xrpld/overlay/detail/ConnectAttempt.h +++ b/src/xrpld/overlay/detail/ConnectAttempt.h @@ -22,90 +22,258 @@ #include +#include + namespace ripple { -/** Manages an outbound connection attempt. */ +/** + * @class ConnectAttempt + * @brief Manages outbound peer connection attempts with comprehensive timeout + * handling + * + * The ConnectAttempt class handles the complete lifecycle of establishing an + * outbound connection to a peer in the XRPL network. It implements a + * sophisticated dual-timer system that provides both global timeout protection + * and per-step timeout diagnostics. + * + * The connection establishment follows these steps: + * 1. **TCP Connect**: Establish basic network connection + * 2. **TLS Handshake**: Negotiate SSL/TLS encryption + * 3. **HTTP Write**: Send peer handshake request + * 4. **HTTP Read**: Receive and validate peer response + * 5. **Complete**: Connection successfully established + * + * Uses a hybrid timeout approach: + * - **Global Timer**: Hard limit (20s) for entire connection process + * - **Step Timers**: Individual timeouts for each connection phase + * + * - All errors result in connection termination + * + * All operations are serialized using boost::asio::strand to ensure thread + * safety. The class is designed to be used exclusively within the ASIO event + * loop. + * + * @note This class should not be used directly. It is managed by OverlayImpl + * as part of the peer discovery and connection management system. + * + */ class ConnectAttempt : public OverlayImpl::Child, public std::enable_shared_from_this { private: using error_code = boost::system::error_code; - using endpoint_type = boost::asio::ip::tcp::endpoint; - using request_type = boost::beast::http::request; - using response_type = boost::beast::http::response; - using socket_type = boost::asio::ip::tcp::socket; using middle_type = boost::beast::tcp_stream; using stream_type = boost::beast::ssl_stream; using shared_context = std::shared_ptr; + /** + * @enum ConnectionStep + * @brief Represents the current phase of the connection establishment + * process + * + * Used for tracking progress and providing detailed timeout diagnostics. + * Each step has its own timeout value defined in StepTimeouts. + */ + enum class ConnectionStep { + Init, // Initial state, nothing started + TcpConnect, // Establishing TCP connection to remote peer + TlsHandshake, // Performing SSL/TLS handshake + HttpWrite, // Sending HTTP upgrade request + HttpRead, // Reading HTTP upgrade response + Complete, // Connection successfully established + ShutdownStarted // Connection shutdown has started + }; + + // A timeout for connection process, greater than all step timeouts + static constexpr std::chrono::seconds connectTimeout{25}; + + /** + * @struct StepTimeouts + * @brief Defines timeout values for each connection step + * + * These timeouts are designed to detect slow individual phases while + * allowing the global timeout to enforce the overall time limit. + */ + struct StepTimeouts + { + // TCP connection timeout + static constexpr std::chrono::seconds tcpConnect{8}; + // SSL handshake timeout + static constexpr std::chrono::seconds tlsHandshake{8}; + // HTTP write timeout + static constexpr std::chrono::seconds httpWrite{3}; + // HTTP read timeout + static constexpr std::chrono::seconds httpRead{3}; + // SSL shutdown timeout + static constexpr std::chrono::seconds tlsShutdown{2}; + }; + + // Core application and networking components Application& app_; - std::uint32_t const id_; + Peer::id_t const id_; beast::WrappedSink sink_; beast::Journal const journal_; endpoint_type remote_endpoint_; Resource::Consumer usage_; + boost::asio::strand strand_; boost::asio::basic_waitable_timer timer_; - std::unique_ptr stream_ptr_; + boost::asio::basic_waitable_timer stepTimer_; + + std::unique_ptr stream_ptr_; // SSL stream (owned) socket_type& socket_; stream_type& stream_; boost::beast::multi_buffer read_buf_; + response_type response_; std::shared_ptr slot_; request_type req_; + bool shutdown_ = false; // Shutdown has been initiated + bool ioPending_ = false; // Async I/O operation in progress + ConnectionStep currentStep_ = ConnectionStep::Init; + public: + /** + * @brief Construct a new ConnectAttempt object + * + * @param app Application context providing configuration and services + * @param io_context ASIO I/O context for async operations + * @param remote_endpoint Target peer endpoint to connect to + * @param usage Resource usage tracker for rate limiting + * @param context Shared SSL context for encryption + * @param id Unique peer identifier for this connection attempt + * @param slot PeerFinder slot representing this connection + * @param journal Logging interface for diagnostics + * @param overlay Parent overlay manager + * + * @note The constructor only initializes the object. Call run() to begin + * the actual connection attempt. + */ ConnectAttempt( Application& app, boost::asio::io_context& io_context, endpoint_type const& remote_endpoint, Resource::Consumer usage, shared_context const& context, - std::uint32_t id, + Peer::id_t id, std::shared_ptr const& slot, beast::Journal journal, OverlayImpl& overlay); ~ConnectAttempt(); + /** + * @brief Stop the connection attempt + * + * This method is thread-safe and can be called from any thread. + */ void stop() override; + /** + * @brief Begin the connection attempt + * + * This method is thread-safe and posts to the strand if needed. + */ void run(); private: + /** + * @brief Set timers for the specified connection step + * + * @param step The connection step to set timers for + * + * Sets both the step-specific timer and the global timer (if not already + * set). + */ void - close(); - void - fail(std::string const& reason); - void - fail(std::string const& name, error_code ec); - void - setTimer(); + setTimer(ConnectionStep step); + + /** + * @brief Cancel both global and step timers + * + * Used during cleanup and when connection completes successfully. + * Exceptions from timer cancellation are safely ignored. + */ void cancelTimer(); + + /** + * @brief Handle timer expiration events + * + * @param ec Error code from timer operation + * + * Determines which timer expired (global vs step) and logs appropriate + * diagnostic information before terminating the connection. + */ void onTimer(error_code ec); + + // Connection phase handlers void - onConnect(error_code ec); + onConnect(error_code ec); // TCP connection completion handler void - onHandshake(error_code ec); + onHandshake(error_code ec); // TLS handshake completion handler void - onWrite(error_code ec); + onWrite(error_code ec); // HTTP write completion handler void - onRead(error_code ec); + onRead(error_code ec); // HTTP read completion handler + + // Error and cleanup handlers void - onShutdown(error_code ec); + fail(std::string const& reason); // Fail with custom reason + void + fail(std::string const& name, error_code ec); // Fail with system error + void + shutdown(); // Initiate graceful shutdown + void + tryAsyncShutdown(); // Attempt async SSL shutdown + void + onShutdown(error_code ec); // SSL shutdown completion handler + void + close(); // Force close socket + + /** + * @brief Process the HTTP upgrade response from peer + * + * Validates the peer's response, extracts protocol information, + * verifies handshake, and either creates a PeerImp or handles + * redirect responses. + */ void processResponse(); + static std::string + stepToString(ConnectionStep step) + { + switch (step) + { + case ConnectionStep::Init: + return "Init"; + case ConnectionStep::TcpConnect: + return "TcpConnect"; + case ConnectionStep::TlsHandshake: + return "TlsHandshake"; + case ConnectionStep::HttpWrite: + return "HttpWrite"; + case ConnectionStep::HttpRead: + return "HttpRead"; + case ConnectionStep::Complete: + return "Complete"; + case ConnectionStep::ShutdownStarted: + return "ShutdownStarted"; + } + return "Unknown"; + }; + template static boost::asio::ip::tcp::endpoint parse_endpoint(std::string const& s, boost::system::error_code& ec) diff --git a/src/xrpld/overlay/detail/PeerImp.cpp b/src/xrpld/overlay/detail/PeerImp.cpp index 2cd9432eb8..93371f42ab 100644 --- a/src/xrpld/overlay/detail/PeerImp.cpp +++ b/src/xrpld/overlay/detail/PeerImp.cpp @@ -44,6 +44,7 @@ #include #include +#include #include #include #include @@ -59,6 +60,10 @@ std::chrono::milliseconds constexpr peerHighLatency{300}; /** How often we PING the peer to check for latency and sendq probe */ std::chrono::seconds constexpr peerTimerInterval{60}; + +/** The timeout for a shutdown timer */ +std::chrono::seconds constexpr shutdownTimerInterval{5}; + } // namespace // TODO: Remove this exclusion once unit tests are added after the hotfix @@ -215,23 +220,17 @@ PeerImp::stop() { if (!strand_.running_in_this_thread()) return post(strand_, std::bind(&PeerImp::stop, shared_from_this())); - if (socket_.is_open()) - { - // The rationale for using different severity levels is that - // outbound connections are under our control and may be logged - // at a higher level, but inbound connections are more numerous and - // uncontrolled so to prevent log flooding the severity is reduced. - // - if (inbound_) - { - JLOG(journal_.debug()) << "Stop"; - } - else - { - JLOG(journal_.info()) << "Stop"; - } - } - close(); + + if (!socket_.is_open()) + return; + + // The rationale for using different severity levels is that + // outbound connections are under our control and may be logged + // at a higher level, but inbound connections are more numerous and + // uncontrolled so to prevent log flooding the severity is reduced. + JLOG(journal_.debug()) << "stop: Stop"; + + shutdown(); } //------------------------------------------------------------------------------ @@ -241,11 +240,14 @@ PeerImp::send(std::shared_ptr const& m) { if (!strand_.running_in_this_thread()) return post(strand_, std::bind(&PeerImp::send, shared_from_this(), m)); - if (gracefulClose_) - return; - if (detaching_) + + if (!socket_.is_open()) return; + // we are in progress of closing the connection + if (shutdown_) + return tryAsyncShutdown(); + auto validator = m->getValidatorKey(); if (validator && !squelch_.expireSquelch(*validator)) { @@ -287,6 +289,7 @@ PeerImp::send(std::shared_ptr const& m) if (sendq_size != 0) return; + writePending_ = true; boost::asio::async_write( stream_, boost::asio::buffer( @@ -573,34 +576,21 @@ PeerImp::hasRange(std::uint32_t uMin, std::uint32_t uMax) //------------------------------------------------------------------------------ void -PeerImp::close() +PeerImp::fail(std::string const& name, error_code ec) { XRPL_ASSERT( strand_.running_in_this_thread(), - "ripple::PeerImp::close : strand in this thread"); - if (socket_.is_open()) - { - detaching_ = true; // DEPRECATED - try - { - timer_.cancel(); - socket_.close(); - } - catch (boost::system::system_error const&) - { - // ignored - } + "ripple::PeerImp::fail : strand in this thread"); - overlay_.incPeerDisconnect(); - if (inbound_) - { - JLOG(journal_.debug()) << "Closed"; - } - else - { - JLOG(journal_.info()) << "Closed"; - } - } + if (!socket_.is_open()) + return; + + JLOG(journal_.warn()) << name << " from " + << toBase58(TokenType::NodePublic, publicKey_) + << " at " << remote_address_.to_string() << ": " + << ec.message(); + + shutdown(); } void @@ -613,45 +603,39 @@ PeerImp::fail(std::string const& reason) (void(Peer::*)(std::string const&)) & PeerImp::fail, shared_from_this(), reason)); - if (journal_.active(beast::severities::kWarning) && socket_.is_open()) + + if (!socket_.is_open()) + return; + + // Call to name() locks, log only if the message will be outputed + if (journal_.active(beast::severities::kWarning)) { std::string const n = name(); JLOG(journal_.warn()) << (n.empty() ? remote_address_.to_string() : n) << " failed: " << reason; } - close(); + + shutdown(); } void -PeerImp::fail(std::string const& name, error_code ec) +PeerImp::tryAsyncShutdown() { XRPL_ASSERT( strand_.running_in_this_thread(), - "ripple::PeerImp::fail : strand in this thread"); - if (socket_.is_open()) - { - JLOG(journal_.warn()) - << name << " from " << toBase58(TokenType::NodePublic, publicKey_) - << " at " << remote_address_.to_string() << ": " << ec.message(); - } - close(); -} + "ripple::PeerImp::tryAsyncShutdown : strand in this thread"); -void -PeerImp::gracefulClose() -{ - XRPL_ASSERT( - strand_.running_in_this_thread(), - "ripple::PeerImp::gracefulClose : strand in this thread"); - XRPL_ASSERT( - socket_.is_open(), "ripple::PeerImp::gracefulClose : socket is open"); - XRPL_ASSERT( - !gracefulClose_, - "ripple::PeerImp::gracefulClose : socket is not closing"); - gracefulClose_ = true; - if (send_queue_.size() > 0) + if (!shutdown_ || shutdownStarted_) return; - setTimer(); + + if (readPending_ || writePending_) + return; + + shutdownStarted_ = true; + + setTimer(shutdownTimerInterval); + + // gracefully shutdown the SSL socket, performing a shutdown handshake stream_.async_shutdown(bind_executor( strand_, std::bind( @@ -659,69 +643,125 @@ PeerImp::gracefulClose() } void -PeerImp::setTimer() +PeerImp::shutdown() +{ + XRPL_ASSERT( + strand_.running_in_this_thread(), + "ripple::PeerImp::shutdown: strand in this thread"); + + if (!socket_.is_open() || shutdown_) + return; + + shutdown_ = true; + + boost::beast::get_lowest_layer(stream_).cancel(); + + tryAsyncShutdown(); +} + +void +PeerImp::onShutdown(error_code ec) +{ + cancelTimer(); + if (ec) + { + // - eof: the stream was cleanly closed + // - operation_aborted: an expired timer (slow shutdown) + // - stream_truncated: the tcp connection closed (no handshake) it could + // occur if a peer does not perform a graceful disconnect + // - broken_pipe: the peer is gone + bool shouldLog = + (ec != boost::asio::error::eof && + ec != boost::asio::error::operation_aborted && + ec.message().find("application data after close notify") == + std::string::npos); + + if (shouldLog) + { + JLOG(journal_.debug()) << "onShutdown: " << ec.message(); + } + } + + close(); +} + +void +PeerImp::close() +{ + XRPL_ASSERT( + strand_.running_in_this_thread(), + "ripple::PeerImp::close : strand in this thread"); + + if (!socket_.is_open()) + return; + + cancelTimer(); + + error_code ec; + socket_.close(ec); + + overlay_.incPeerDisconnect(); + + // The rationale for using different severity levels is that + // outbound connections are under our control and may be logged + // at a higher level, but inbound connections are more numerous and + // uncontrolled so to prevent log flooding the severity is reduced. + JLOG((inbound_ ? journal_.debug() : journal_.info())) << "close: Closed"; +} + +//------------------------------------------------------------------------------ + +void +PeerImp::setTimer(std::chrono::seconds interval) { try { - timer_.expires_after(peerTimerInterval); + timer_.expires_after(interval); } - catch (boost::system::system_error const& e) + catch (std::exception const& ex) { - JLOG(journal_.error()) << "setTimer: " << e.code(); - return; + JLOG(journal_.error()) << "setTimer: " << ex.what(); + return shutdown(); } + timer_.async_wait(bind_executor( strand_, std::bind( &PeerImp::onTimer, shared_from_this(), std::placeholders::_1))); } -// convenience for ignoring the error code -void -PeerImp::cancelTimer() -{ - try - { - timer_.cancel(); - } - catch (boost::system::system_error const&) - { - // ignored - } -} - -//------------------------------------------------------------------------------ - -std::string -PeerImp::makePrefix(id_t id) -{ - std::stringstream ss; - ss << "[" << std::setfill('0') << std::setw(3) << id << "] "; - return ss.str(); -} - void PeerImp::onTimer(error_code const& ec) { - if (!socket_.is_open()) - return; + XRPL_ASSERT( + strand_.running_in_this_thread(), + "ripple::PeerImp::onTimer : strand in this thread"); - if (ec == boost::asio::error::operation_aborted) + if (!socket_.is_open()) return; if (ec) { + // do not initiate shutdown, timers are frequently cancelled + if (ec == boost::asio::error::operation_aborted) + return; + // This should never happen JLOG(journal_.error()) << "onTimer: " << ec.message(); return close(); } - if (large_sendq_++ >= Tuning::sendqIntervals) + // the timer expired before the shutdown completed + // force close the connection + if (shutdown_) { - fail("Large send queue"); - return; + JLOG(journal_.debug()) << "onTimer: shutdown timer expired"; + return close(); } + if (large_sendq_++ >= Tuning::sendqIntervals) + return fail("Large send queue"); + if (auto const t = tracking_.load(); !inbound_ && t != Tracking::converged) { clock_type::duration duration; @@ -737,17 +777,13 @@ PeerImp::onTimer(error_code const& ec) (duration > app_.config().MAX_UNKNOWN_TIME))) { overlay_.peerFinder().on_failure(slot_); - fail("Not useful"); - return; + return fail("Not useful"); } } // Already waiting for PONG if (lastPingSeq_) - { - fail("Ping Timeout"); - return; - } + return fail("Ping Timeout"); lastPingTime_ = clock_type::now(); lastPingSeq_ = rand_int(); @@ -758,22 +794,28 @@ PeerImp::onTimer(error_code const& ec) send(std::make_shared(message, protocol::mtPING)); - setTimer(); + setTimer(peerTimerInterval); } void -PeerImp::onShutdown(error_code ec) +PeerImp::cancelTimer() noexcept { - cancelTimer(); - // If we don't get eof then something went wrong - if (!ec) + try { - JLOG(journal_.error()) << "onShutdown: expected error condition"; - return close(); + timer_.cancel(); } - if (ec != boost::asio::error::eof) - return fail("onShutdown", ec); - close(); + catch (std::exception const& ex) + { + JLOG(journal_.error()) << "cancelTimer: " << ex.what(); + } +} + +std::string +PeerImp::makePrefix(id_t id) +{ + std::stringstream ss; + ss << "[" << std::setfill('0') << std::setw(3) << id << "] "; + return ss.str(); } //------------------------------------------------------------------------------ @@ -786,6 +828,10 @@ PeerImp::doAccept() JLOG(journal_.debug()) << "doAccept: " << remote_address_; + // a shutdown was initiated before the handshake, there is nothing to do + if (shutdown_) + return tryAsyncShutdown(); + auto const sharedValue = makeSharedValue(*stream_ptr_, journal_); // This shouldn't fail since we already computed @@ -793,7 +839,7 @@ PeerImp::doAccept() if (!sharedValue) return fail("makeSharedValue: Unexpected failure"); - JLOG(journal_.info()) << "Protocol: " << to_string(protocol_); + JLOG(journal_.debug()) << "Protocol: " << to_string(protocol_); JLOG(journal_.info()) << "Public Key: " << toBase58(TokenType::NodePublic, publicKey_); @@ -836,7 +882,7 @@ PeerImp::doAccept() if (!socket_.is_open()) return; if (ec == boost::asio::error::operation_aborted) - return; + return tryAsyncShutdown(); if (ec) return fail("onWriteResponse", ec); if (write_buffer->size() == bytes_transferred) @@ -865,6 +911,10 @@ PeerImp::domain() const void PeerImp::doProtocolStart() { + // a shutdown was initiated before the handshare, there is nothing to do + if (shutdown_) + return tryAsyncShutdown(); + onReadMessage(error_code(), 0); // Send all the validator lists that have been loaded @@ -896,30 +946,45 @@ PeerImp::doProtocolStart() if (auto m = overlay_.getManifestsMessage()) send(m); - setTimer(); + setTimer(peerTimerInterval); } // Called repeatedly with protocol message data void PeerImp::onReadMessage(error_code ec, std::size_t bytes_transferred) { + XRPL_ASSERT( + strand_.running_in_this_thread(), + "ripple::PeerImp::onReadMessage : strand in this thread"); + + readPending_ = false; + if (!socket_.is_open()) return; - if (ec == boost::asio::error::operation_aborted) - return; - if (ec == boost::asio::error::eof) - { - JLOG(journal_.info()) << "EOF"; - return gracefulClose(); - } + if (ec) + { + if (ec == boost::asio::error::eof) + { + JLOG(journal_.debug()) << "EOF"; + return shutdown(); + } + + if (ec == boost::asio::error::operation_aborted) + return tryAsyncShutdown(); + return fail("onReadMessage", ec); + } + // we started shutdown, no reason to process further data + if (shutdown_) + return tryAsyncShutdown(); + if (auto stream = journal_.trace()) { - if (bytes_transferred > 0) - stream << "onReadMessage: " << bytes_transferred << " bytes"; - else - stream << "onReadMessage"; + stream << "onReadMessage: " + << (bytes_transferred > 0 + ? to_string(bytes_transferred) + " bytes" + : ""); } metrics_.recv.add_message(bytes_transferred); @@ -941,17 +1006,29 @@ PeerImp::onReadMessage(error_code ec, std::size_t bytes_transferred) 350ms, journal_); - if (ec) - return fail("onReadMessage", ec); if (!socket_.is_open()) return; - if (gracefulClose_) - return; + + // the error_code is produced by invokeProtocolMessage + // it could be due to a bad message + if (ec) + return fail("onReadMessage", ec); + if (bytes_consumed == 0) break; + read_buffer_.consume(bytes_consumed); } + // check if a shutdown was initiated while processing messages + if (shutdown_) + return tryAsyncShutdown(); + + readPending_ = true; + + XRPL_ASSERT( + !shutdownStarted_, "ripple::PeerImp::onReadMessage : shutdown started"); + // Timeout on writes only stream_.async_read_some( read_buffer_.prepare(std::max(Tuning::readBufferBytes, hint)), @@ -967,18 +1044,29 @@ PeerImp::onReadMessage(error_code ec, std::size_t bytes_transferred) void PeerImp::onWriteMessage(error_code ec, std::size_t bytes_transferred) { + XRPL_ASSERT( + strand_.running_in_this_thread(), + "ripple::PeerImp::onWriteMessage : strand in this thread"); + + writePending_ = false; + if (!socket_.is_open()) return; - if (ec == boost::asio::error::operation_aborted) - return; + if (ec) + { + if (ec == boost::asio::error::operation_aborted) + return tryAsyncShutdown(); + return fail("onWriteMessage", ec); + } + if (auto stream = journal_.trace()) { - if (bytes_transferred > 0) - stream << "onWriteMessage: " << bytes_transferred << " bytes"; - else - stream << "onWriteMessage"; + stream << "onWriteMessage: " + << (bytes_transferred > 0 + ? to_string(bytes_transferred) + " bytes" + : ""); } metrics_.sent.add_message(bytes_transferred); @@ -987,8 +1075,17 @@ PeerImp::onWriteMessage(error_code ec, std::size_t bytes_transferred) !send_queue_.empty(), "ripple::PeerImp::onWriteMessage : non-empty send buffer"); send_queue_.pop(); + + if (shutdown_) + return tryAsyncShutdown(); + if (!send_queue_.empty()) { + writePending_ = true; + XRPL_ASSERT( + !shutdownStarted_, + "ripple::PeerImp::onWriteMessage : shutdown started"); + // Timeout on writes only return boost::asio::async_write( stream_, @@ -1002,16 +1099,6 @@ PeerImp::onWriteMessage(error_code ec, std::size_t bytes_transferred) std::placeholders::_1, std::placeholders::_2))); } - - if (gracefulClose_) - { - return stream_.async_shutdown(bind_executor( - strand_, - std::bind( - &PeerImp::onShutdown, - shared_from_this(), - std::placeholders::_1))); - } } //------------------------------------------------------------------------------ diff --git a/src/xrpld/overlay/detail/PeerImp.h b/src/xrpld/overlay/detail/PeerImp.h index 3d9a0c0b1e..c2221c136d 100644 --- a/src/xrpld/overlay/detail/PeerImp.h +++ b/src/xrpld/overlay/detail/PeerImp.h @@ -40,6 +40,7 @@ #include #include +#include #include #include #include @@ -49,6 +50,68 @@ namespace ripple { struct ValidatorBlobInfo; class SHAMap; +/** + * @class PeerImp + * @brief This class manages established peer-to-peer connections, handles + message exchange, monitors connection health, and graceful shutdown. + * + + * The PeerImp shutdown mechanism is a multi-stage process + * designed to ensure graceful connection termination while handling ongoing + * I/O operations safely. The shutdown can be initiated from multiple points + * and follows a deterministic state machine. + * + * The shutdown process can be triggered from several entry points: + * - **External requests**: `stop()` method called by overlay management + * - **Error conditions**: `fail(error_code)` or `fail(string)` on protocol + * violations + * - **Timer expiration**: Various timeout scenarios (ping timeout, large send + * queue) + * - **Connection health**: Peer tracking divergence or unknown state timeouts + * + * The shutdown follows this progression: + * + * Normal Operation → shutdown() → tryAsyncShutdown() → onShutdown() → close() + * ↓ ↓ ↓ ↓ + * Set shutdown_ SSL graceful Timer cancel Socket close + * Cancel timer shutdown start & cleanup & metrics + * 5s safety timer Set shutdownStarted_ update + * + * Two primary flags coordinate the shutdown process: + * - `shutdown_`: Set when shutdown is requested + * - `shutdownStarted_`: Set when SSL shutdown begins + * + * The shutdown mechanism carefully coordinates with ongoing read/write + * operations: + * + * **Read Operations (`onReadMessage`)**: + * - Checks `shutdown_` flag after processing each message batch + * - If shutdown initiated during processing, calls `tryAsyncShutdown()` + * + * **Write Operations (`onWriteMessage`)**: + * - Checks `shutdown_` flag before queuing new writes + * - Calls `tryAsyncShutdown()` when shutdown flag detected + * + * Multiple timers require coordination during shutdown: + * 1. **Peer Timer**: Regular ping/pong timer cancelled immediately in + * `shutdown()` + * 2. **Shutdown Timer**: 5-second safety timer ensures shutdown completion + * 3. **Operation Cancellation**: All pending async operations are cancelled + * + * The shutdown implements fallback mechanisms: + * - **Graceful Path**: SSL shutdown → Socket close → Cleanup + * - **Forced Path**: If SSL shutdown fails or times out, proceeds to socket + * close + * - **Safety Timer**: 5-second timeout prevents hanging shutdowns + * + * All shutdown operations are serialized through the boost::asio::strand to + * ensure thread safety. The strand guarantees that shutdown state changes + * and I/O operation callbacks are executed sequentially. + * + * @note This class requires careful coordination between async operations, + * timer management, and shutdown procedures to ensure no resource leaks + * or hanging connections in high-throughput networking scenarios. + */ class PeerImp : public Peer, public std::enable_shared_from_this, public OverlayImpl::Child @@ -79,6 +142,8 @@ private: socket_type& socket_; stream_type& stream_; boost::asio::strand strand_; + + // Multi-purpose timer for peer activity monitoring and shutdown safety waitable_timer timer_; // Updated at each stage of the connection process to reflect @@ -95,7 +160,6 @@ private: std::atomic tracking_; clock_type::time_point trackingTime_; - bool detaching_ = false; // Node public key of peer. PublicKey const publicKey_; std::string name_; @@ -175,7 +239,19 @@ private: http_response_type response_; boost::beast::http::fields const& headers_; std::queue> send_queue_; - bool gracefulClose_ = false; + + // Primary shutdown flag set when shutdown is requested + bool shutdown_ = false; + + // SSL shutdown coordination flag + bool shutdownStarted_ = false; + + // Indicates a read operation is currently pending + bool readPending_ = false; + + // Indicates a write operation is currently pending + bool writePending_ = false; + int large_sendq_ = 0; std::unique_ptr load_event_; // The highest sequence of each PublisherList that has @@ -425,9 +501,6 @@ public: bool isHighLatency() const override; - void - fail(std::string const& reason); - bool compressionEnabled() const override { @@ -441,32 +514,129 @@ public: } private: - void - close(); - + /** + * @brief Handles a failure associated with a specific error code. + * + * This function is called when an operation fails with an error code. It + * logs the warning message and gracefully shutdowns the connection. + * + * The function will do nothing if the connection is already closed or if a + * shutdown is already in progress. + * + * @param name The name of the operation that failed (e.g., "read", + * "write"). + * @param ec The error code associated with the failure. + * @note This function must be called from within the object's strand. + */ void fail(std::string const& name, error_code ec); + /** + * @brief Handles a failure described by a reason string. + * + * This overload is used for logical errors or protocol violations not + * associated with a specific error code. It logs a warning with the + * given reason, then initiates a graceful shutdown. + * + * The function will do nothing if the connection is already closed or if a + * shutdown is already in progress. + * + * @param reason A descriptive string explaining the reason for the failure. + * @note This function must be called from within the object's strand. + */ void - gracefulClose(); + fail(std::string const& reason); + /** @brief Initiates the peer disconnection sequence. + * + * This is the primary entry point to start closing a peer connection. It + * marks the peer for shutdown and cancels any outstanding asynchronous + * operations. This cancellation allows the graceful shutdown to proceed + * once the handlers for the cancelled operations have completed. + * + * @note This method must be called on the peer's strand. + */ void - setTimer(); + shutdown(); + /** @brief Attempts to perform a graceful SSL shutdown if conditions are + * met. + * + * This helper function checks if the peer is in a state where a graceful + * SSL shutdown can be performed (i.e., shutdown has been requested and no + * I/O operations are currently in progress). + * + * @note This method must be called on the peer's strand. + */ void - cancelTimer(); + tryAsyncShutdown(); + + /** + * @brief Handles the completion of the asynchronous SSL shutdown. + * + * This function is the callback for the `async_shutdown` operation started + * in `shutdown()`. Its first action is to cancel the timer. It + * then inspects the error code to determine the outcome. + * + * Regardless of the result, this function proceeds to call `close()` to + * ensure the underlying socket is fully closed. + * + * @param ec The error code resulting from the `async_shutdown` operation. + */ + void + onShutdown(error_code ec); + + /** + * @brief Forcibly closes the underlying socket connection. + * + * This function provides the final, non-graceful shutdown of the peer + * connection. It ensures any pending timers are cancelled and then + * immediately closes the TCP socket, bypassing the SSL shutdown handshake. + * + * After closing, it notifies the overlay manager of the disconnection. + * + * @note This function must be called from within the object's strand. + */ + void + close(); + + /** + * @brief Sets and starts the peer timer. + * + * This function starts timer, which is used to detect inactivity + * and prevent stalled connections. It sets the timer to expire after the + * predefined `peerTimerInterval`. + * + * @note This function will terminate the connection in case of any errors. + */ + void + setTimer(std::chrono::seconds interval); + + /** + * @brief Handles the expiration of the peer activity timer. + * + * This callback is invoked when the timer set by `setTimer` expires. It + * watches the peer connection, checking for various timeout and health + * conditions. + * + * @param ec The error code associated with the timer's expiration. + * `operation_aborted` is expected if the timer was cancelled. + */ + void + onTimer(error_code const& ec); + + /** + * @brief Cancels any pending wait on the peer activity timer. + * + * This function is called to stop the timer. It gracefully manages any + * errors that might occur during the cancellation process. + */ + void + cancelTimer() noexcept; static std::string makePrefix(id_t id); - // Called when the timer wait completes - void - onTimer(boost::system::error_code const& ec); - - // Called when SSL shutdown completes - void - onShutdown(error_code ec); - void doAccept(); From 1020a32d76d1a91eb7ad11d1497f731edb36e05f Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Wed, 3 Sep 2025 11:12:43 +0100 Subject: [PATCH 186/244] Downgrade to boost 1.83 --- conan/profiles/default | 3 +++ conanfile.py | 2 +- src/test/server/ServerStatus_test.cpp | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/conan/profiles/default b/conan/profiles/default index 3a7bcda1c6..0417704f8a 100644 --- a/conan/profiles/default +++ b/conan/profiles/default @@ -26,6 +26,9 @@ tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw'] {% if compiler == "apple-clang" and compiler_version >= 17 %} tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw'] {% endif %} +{% if compiler == "clang" and compiler_version == 16 %} +tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS'] +{% endif %} {% if compiler == "gcc" and compiler_version < 13 %} tools.build:cxxflags=['-Wno-restrict'] {% endif %} diff --git a/conanfile.py b/conanfile.py index ab4657277c..bff4c93aa3 100644 --- a/conanfile.py +++ b/conanfile.py @@ -104,7 +104,7 @@ class Xrpl(ConanFile): def requirements(self): # Conan 2 requires transitive headers to be specified transitive_headers_opt = {'transitive_headers': True} if conan_version.split('.')[0] == '2' else {} - self.requires('boost/1.86.0', force=True, **transitive_headers_opt) + self.requires('boost/1.83.0', force=True, **transitive_headers_opt) self.requires('date/3.0.4', **transitive_headers_opt) self.requires('lz4/1.10.0', force=True) self.requires('protobuf/3.21.12', force=True) diff --git a/src/test/server/ServerStatus_test.cpp b/src/test/server/ServerStatus_test.cpp index b27dee6e0a..bcd355e301 100644 --- a/src/test/server/ServerStatus_test.cpp +++ b/src/test/server/ServerStatus_test.cpp @@ -681,7 +681,7 @@ class ServerStatus_test : public beast::unit_test::suite, resp["Upgrade"] == "websocket"); BEAST_EXPECT( resp.find("Connection") != resp.end() && - resp["Connection"] == "Upgrade"); + resp["Connection"] == "upgrade"); } void From 8d01f35eb9c516aab4c93a0288f8d599370501af Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Wed, 3 Sep 2025 15:39:50 +0100 Subject: [PATCH 187/244] Set version to 2.6.1-rc1 --- src/libxrpl/protocol/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libxrpl/protocol/BuildInfo.cpp b/src/libxrpl/protocol/BuildInfo.cpp index d5077aa44d..ae15ea6dec 100644 --- a/src/libxrpl/protocol/BuildInfo.cpp +++ b/src/libxrpl/protocol/BuildInfo.cpp @@ -36,7 +36,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "2.6.0" +char const* const versionString = "2.6.1-rc1" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From 9494fc9668707b767ed0897f20fe53a33b30d726 Mon Sep 17 00:00:00 2001 From: Jingchen Date: Wed, 17 Sep 2025 14:29:15 +0100 Subject: [PATCH 188/244] chore: Use self hosted windows runners (#5780) This changes switches from the GitHub-managed Windows runners to self-hosted runners to significantly reduce build time. --- .github/scripts/strategy-matrix/windows.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/scripts/strategy-matrix/windows.json b/.github/scripts/strategy-matrix/windows.json index 5e6e536750..08b41e3f89 100644 --- a/.github/scripts/strategy-matrix/windows.json +++ b/.github/scripts/strategy-matrix/windows.json @@ -2,7 +2,7 @@ "architecture": [ { "platform": "windows/amd64", - "runner": ["windows-latest"] + "runner": ["self-hosted", "Windows", "devbox"] } ], "os": [ From 37b951859ccf490354a620b83637ed9cefa24f68 Mon Sep 17 00:00:00 2001 From: yinyiqian1 Date: Wed, 17 Sep 2025 16:43:04 -0400 Subject: [PATCH 189/244] Rename mutable flags (#5797) This is a minor change on top of #5705 --- include/xrpl/protocol/LedgerFormats.h | 16 +- include/xrpl/protocol/TxFlags.h | 54 ++--- src/test/app/MPToken_test.cpp | 199 +++++++++--------- src/test/jtx/impl/mpt.cpp | 24 +-- .../app/tx/detail/MPTokenIssuanceCreate.cpp | 2 +- .../app/tx/detail/MPTokenIssuanceSet.cpp | 26 +-- 6 files changed, 166 insertions(+), 155 deletions(-) diff --git a/include/xrpl/protocol/LedgerFormats.h b/include/xrpl/protocol/LedgerFormats.h index 711754df94..7cf92d0822 100644 --- a/include/xrpl/protocol/LedgerFormats.h +++ b/include/xrpl/protocol/LedgerFormats.h @@ -188,14 +188,14 @@ enum LedgerSpecificFlags { lsfMPTCanTransfer = 0x00000020, lsfMPTCanClawback = 0x00000040, - lsfMPTCanMutateCanLock = 0x00000002, - lsfMPTCanMutateRequireAuth = 0x00000004, - lsfMPTCanMutateCanEscrow = 0x00000008, - lsfMPTCanMutateCanTrade = 0x00000010, - lsfMPTCanMutateCanTransfer = 0x00000020, - lsfMPTCanMutateCanClawback = 0x00000040, - lsfMPTCanMutateMetadata = 0x00010000, - lsfMPTCanMutateTransferFee = 0x00020000, + lmfMPTCanMutateCanLock = 0x00000002, + lmfMPTCanMutateRequireAuth = 0x00000004, + lmfMPTCanMutateCanEscrow = 0x00000008, + lmfMPTCanMutateCanTrade = 0x00000010, + lmfMPTCanMutateCanTransfer = 0x00000020, + lmfMPTCanMutateCanClawback = 0x00000040, + lmfMPTCanMutateMetadata = 0x00010000, + lmfMPTCanMutateTransferFee = 0x00020000, // ltMPTOKEN lsfMPTAuthorized = 0x00000002, diff --git a/include/xrpl/protocol/TxFlags.h b/include/xrpl/protocol/TxFlags.h index c376180ac0..70c6833d3a 100644 --- a/include/xrpl/protocol/TxFlags.h +++ b/include/xrpl/protocol/TxFlags.h @@ -153,17 +153,17 @@ constexpr std::uint32_t const tfMPTokenIssuanceCreateMask = // MPTokenIssuanceCreate MutableFlags: // Indicating specific fields or flags may be changed after issuance. -constexpr std::uint32_t const tfMPTCanMutateCanLock = lsfMPTCanMutateCanLock; -constexpr std::uint32_t const tfMPTCanMutateRequireAuth = lsfMPTCanMutateRequireAuth; -constexpr std::uint32_t const tfMPTCanMutateCanEscrow = lsfMPTCanMutateCanEscrow; -constexpr std::uint32_t const tfMPTCanMutateCanTrade = lsfMPTCanMutateCanTrade; -constexpr std::uint32_t const tfMPTCanMutateCanTransfer = lsfMPTCanMutateCanTransfer; -constexpr std::uint32_t const tfMPTCanMutateCanClawback = lsfMPTCanMutateCanClawback; -constexpr std::uint32_t const tfMPTCanMutateMetadata = lsfMPTCanMutateMetadata; -constexpr std::uint32_t const tfMPTCanMutateTransferFee = lsfMPTCanMutateTransferFee; -constexpr std::uint32_t const tfMPTokenIssuanceCreateMutableMask = - ~(tfMPTCanMutateCanLock | tfMPTCanMutateRequireAuth | tfMPTCanMutateCanEscrow | tfMPTCanMutateCanTrade - | tfMPTCanMutateCanTransfer | tfMPTCanMutateCanClawback | tfMPTCanMutateMetadata | tfMPTCanMutateTransferFee); +constexpr std::uint32_t const tmfMPTCanMutateCanLock = lmfMPTCanMutateCanLock; +constexpr std::uint32_t const tmfMPTCanMutateRequireAuth = lmfMPTCanMutateRequireAuth; +constexpr std::uint32_t const tmfMPTCanMutateCanEscrow = lmfMPTCanMutateCanEscrow; +constexpr std::uint32_t const tmfMPTCanMutateCanTrade = lmfMPTCanMutateCanTrade; +constexpr std::uint32_t const tmfMPTCanMutateCanTransfer = lmfMPTCanMutateCanTransfer; +constexpr std::uint32_t const tmfMPTCanMutateCanClawback = lmfMPTCanMutateCanClawback; +constexpr std::uint32_t const tmfMPTCanMutateMetadata = lmfMPTCanMutateMetadata; +constexpr std::uint32_t const tmfMPTCanMutateTransferFee = lmfMPTCanMutateTransferFee; +constexpr std::uint32_t const tmfMPTokenIssuanceCreateMutableMask = + ~(tmfMPTCanMutateCanLock | tmfMPTCanMutateRequireAuth | tmfMPTCanMutateCanEscrow | tmfMPTCanMutateCanTrade + | tmfMPTCanMutateCanTransfer | tmfMPTCanMutateCanClawback | tmfMPTCanMutateMetadata | tmfMPTCanMutateTransferFee); // MPTokenAuthorize flags: constexpr std::uint32_t const tfMPTUnauthorize = 0x00000001; @@ -177,22 +177,22 @@ constexpr std::uint32_t const tfMPTokenIssuanceSetPermissionMask = ~(tfUniversal // MPTokenIssuanceSet MutableFlags: // Set or Clear flags. -constexpr std::uint32_t const tfMPTSetCanLock = 0x00000001; -constexpr std::uint32_t const tfMPTClearCanLock = 0x00000002; -constexpr std::uint32_t const tfMPTSetRequireAuth = 0x00000004; -constexpr std::uint32_t const tfMPTClearRequireAuth = 0x00000008; -constexpr std::uint32_t const tfMPTSetCanEscrow = 0x00000010; -constexpr std::uint32_t const tfMPTClearCanEscrow = 0x00000020; -constexpr std::uint32_t const tfMPTSetCanTrade = 0x00000040; -constexpr std::uint32_t const tfMPTClearCanTrade = 0x00000080; -constexpr std::uint32_t const tfMPTSetCanTransfer = 0x00000100; -constexpr std::uint32_t const tfMPTClearCanTransfer = 0x00000200; -constexpr std::uint32_t const tfMPTSetCanClawback = 0x00000400; -constexpr std::uint32_t const tfMPTClearCanClawback = 0x00000800; -constexpr std::uint32_t const tfMPTokenIssuanceSetMutableMask = ~(tfMPTSetCanLock | tfMPTClearCanLock | - tfMPTSetRequireAuth | tfMPTClearRequireAuth | tfMPTSetCanEscrow | tfMPTClearCanEscrow | - tfMPTSetCanTrade | tfMPTClearCanTrade | tfMPTSetCanTransfer | tfMPTClearCanTransfer | - tfMPTSetCanClawback | tfMPTClearCanClawback); +constexpr std::uint32_t const tmfMPTSetCanLock = 0x00000001; +constexpr std::uint32_t const tmfMPTClearCanLock = 0x00000002; +constexpr std::uint32_t const tmfMPTSetRequireAuth = 0x00000004; +constexpr std::uint32_t const tmfMPTClearRequireAuth = 0x00000008; +constexpr std::uint32_t const tmfMPTSetCanEscrow = 0x00000010; +constexpr std::uint32_t const tmfMPTClearCanEscrow = 0x00000020; +constexpr std::uint32_t const tmfMPTSetCanTrade = 0x00000040; +constexpr std::uint32_t const tmfMPTClearCanTrade = 0x00000080; +constexpr std::uint32_t const tmfMPTSetCanTransfer = 0x00000100; +constexpr std::uint32_t const tmfMPTClearCanTransfer = 0x00000200; +constexpr std::uint32_t const tmfMPTSetCanClawback = 0x00000400; +constexpr std::uint32_t const tmfMPTClearCanClawback = 0x00000800; +constexpr std::uint32_t const tmfMPTokenIssuanceSetMutableMask = ~(tmfMPTSetCanLock | tmfMPTClearCanLock | + tmfMPTSetRequireAuth | tmfMPTClearRequireAuth | tmfMPTSetCanEscrow | tmfMPTClearCanEscrow | + tmfMPTSetCanTrade | tmfMPTClearCanTrade | tmfMPTSetCanTransfer | tmfMPTClearCanTransfer | + tmfMPTSetCanClawback | tmfMPTClearCanClawback); // MPTokenIssuanceDestroy flags: constexpr std::uint32_t const tfMPTokenIssuanceDestroyMask = ~tfUniversal; diff --git a/src/test/app/MPToken_test.cpp b/src/test/app/MPToken_test.cpp index 1410370c33..e9740e67de 100644 --- a/src/test/app/MPToken_test.cpp +++ b/src/test/app/MPToken_test.cpp @@ -2871,8 +2871,8 @@ class MPToken_test : public beast::unit_test::suite MPTTester mptAlice(env, alice, {.holders = {bob}}); mptAlice.create( {.ownerCount = 1, - .mutableFlags = tfMPTCanMutateMetadata | - tfMPTCanMutateCanLock | tfMPTCanMutateTransferFee}); + .mutableFlags = tmfMPTCanMutateMetadata | + tmfMPTCanMutateCanLock | tmfMPTCanMutateTransferFee}); // Setting flags is not allowed when MutableFlags is present mptAlice.set( @@ -2906,7 +2906,7 @@ class MPToken_test : public beast::unit_test::suite .ownerCount = 1, .flags = tfMPTCanTransfer, .mutableFlags = - tfMPTCanMutateTransferFee | tfMPTCanMutateMetadata}); + tmfMPTCanMutateTransferFee | tmfMPTCanMutateMetadata}); mptAlice.set( {.account = alice, @@ -2943,15 +2943,15 @@ class MPToken_test : public beast::unit_test::suite auto const mptID = makeMptID(env.seq(alice), alice); auto const flagCombinations = { - tfMPTSetCanLock | tfMPTClearCanLock, - tfMPTSetRequireAuth | tfMPTClearRequireAuth, - tfMPTSetCanEscrow | tfMPTClearCanEscrow, - tfMPTSetCanTrade | tfMPTClearCanTrade, - tfMPTSetCanTransfer | tfMPTClearCanTransfer, - tfMPTSetCanClawback | tfMPTClearCanClawback, - tfMPTSetCanLock | tfMPTClearCanLock | tfMPTClearCanTrade, - tfMPTSetCanTransfer | tfMPTClearCanTransfer | - tfMPTSetCanEscrow | tfMPTClearCanClawback}; + tmfMPTSetCanLock | tmfMPTClearCanLock, + tmfMPTSetRequireAuth | tmfMPTClearRequireAuth, + tmfMPTSetCanEscrow | tmfMPTClearCanEscrow, + tmfMPTSetCanTrade | tmfMPTClearCanTrade, + tmfMPTSetCanTransfer | tmfMPTClearCanTransfer, + tmfMPTSetCanClawback | tmfMPTClearCanClawback, + tmfMPTSetCanLock | tmfMPTClearCanLock | tmfMPTClearCanTrade, + tmfMPTSetCanTransfer | tmfMPTClearCanTransfer | + tmfMPTSetCanEscrow | tmfMPTClearCanClawback}; for (auto const& mutableFlags : flagCombinations) { @@ -2971,18 +2971,18 @@ class MPToken_test : public beast::unit_test::suite mptAlice.create({.ownerCount = 1}); auto const mutableFlags = { - tfMPTSetCanLock, - tfMPTClearCanLock, - tfMPTSetRequireAuth, - tfMPTClearRequireAuth, - tfMPTSetCanEscrow, - tfMPTClearCanEscrow, - tfMPTSetCanTrade, - tfMPTClearCanTrade, - tfMPTSetCanTransfer, - tfMPTClearCanTransfer, - tfMPTSetCanClawback, - tfMPTClearCanClawback}; + tmfMPTSetCanLock, + tmfMPTClearCanLock, + tmfMPTSetRequireAuth, + tmfMPTClearRequireAuth, + tmfMPTSetCanEscrow, + tmfMPTClearCanEscrow, + tmfMPTSetCanTrade, + tmfMPTClearCanTrade, + tmfMPTSetCanTransfer, + tmfMPTClearCanTransfer, + tmfMPTSetCanClawback, + tmfMPTClearCanClawback}; for (auto const& mutableFlag : mutableFlags) { @@ -2999,7 +2999,7 @@ class MPToken_test : public beast::unit_test::suite MPTTester mptAlice(env, alice, {.holders = {bob}}); mptAlice.create( - {.ownerCount = 1, .mutableFlags = tfMPTCanMutateMetadata}); + {.ownerCount = 1, .mutableFlags = tmfMPTCanMutateMetadata}); std::string metadata(maxMPTokenMetadataLength + 1, 'a'); mptAlice.set( @@ -3025,7 +3025,7 @@ class MPToken_test : public beast::unit_test::suite auto const mptID = makeMptID(env.seq(alice), alice); mptAlice.create( - {.ownerCount = 1, .mutableFlags = tfMPTCanMutateTransferFee}); + {.ownerCount = 1, .mutableFlags = tmfMPTCanMutateTransferFee}); mptAlice.set( {.account = alice, @@ -3045,13 +3045,13 @@ class MPToken_test : public beast::unit_test::suite .ownerCount = 1, .flags = tfMPTCanTransfer, .mutableFlags = - tfMPTCanMutateTransferFee | tfMPTCanMutateCanTransfer}); + tmfMPTCanMutateTransferFee | tmfMPTCanMutateCanTransfer}); // Can not set non-zero transfer fee and clear MPTCanTransfer at the // same time mptAlice.set( {.account = alice, - .mutableFlags = tfMPTClearCanTransfer, + .mutableFlags = tmfMPTClearCanTransfer, .transferFee = 1, .err = temMALFORMED}); @@ -3060,7 +3060,7 @@ class MPToken_test : public beast::unit_test::suite // be removed. mptAlice.set( {.account = alice, - .mutableFlags = tfMPTClearCanTransfer, + .mutableFlags = tmfMPTClearCanTransfer, .transferFee = 0}); BEAST_EXPECT(!mptAlice.isTransferFeePresent()); } @@ -3073,7 +3073,7 @@ class MPToken_test : public beast::unit_test::suite mptAlice.create( {.ownerCount = 1, .mutableFlags = - tfMPTCanMutateTransferFee | tfMPTCanMutateCanTransfer}); + tmfMPTCanMutateTransferFee | tmfMPTCanMutateCanTransfer}); mptAlice.set( {.account = alice, @@ -3085,7 +3085,7 @@ class MPToken_test : public beast::unit_test::suite // fee can be set in a separate transaction. mptAlice.set( {.account = alice, - .mutableFlags = tfMPTSetCanTransfer, + .mutableFlags = tmfMPTSetCanTransfer, .transferFee = 100, .err = tecNO_PERMISSION}); } @@ -3116,8 +3116,8 @@ class MPToken_test : public beast::unit_test::suite mptAlice.create( {.ownerCount = 1, - .mutableFlags = tfMPTCanMutateCanTrade | - tfMPTCanMutateCanTransfer | tfMPTCanMutateMetadata}); + .mutableFlags = tmfMPTCanMutateCanTrade | + tmfMPTCanMutateCanTransfer | tmfMPTCanMutateMetadata}); // Can not mutate transfer fee mptAlice.set( @@ -3126,14 +3126,14 @@ class MPToken_test : public beast::unit_test::suite .err = tecNO_PERMISSION}); auto const invalidFlags = { - tfMPTSetCanLock, - tfMPTClearCanLock, - tfMPTSetRequireAuth, - tfMPTClearRequireAuth, - tfMPTSetCanEscrow, - tfMPTClearCanEscrow, - tfMPTSetCanClawback, - tfMPTClearCanClawback}; + tmfMPTSetCanLock, + tmfMPTClearCanLock, + tmfMPTSetRequireAuth, + tmfMPTClearRequireAuth, + tmfMPTSetCanEscrow, + tmfMPTClearCanEscrow, + tmfMPTSetCanClawback, + tmfMPTClearCanClawback}; // Can not mutate flags which are not mutable for (auto const& mutableFlag : invalidFlags) @@ -3145,15 +3145,15 @@ class MPToken_test : public beast::unit_test::suite } // Can mutate MPTCanTrade - mptAlice.set({.account = alice, .mutableFlags = tfMPTSetCanTrade}); + mptAlice.set({.account = alice, .mutableFlags = tmfMPTSetCanTrade}); mptAlice.set( - {.account = alice, .mutableFlags = tfMPTClearCanTrade}); + {.account = alice, .mutableFlags = tmfMPTClearCanTrade}); // Can mutate MPTCanTransfer mptAlice.set( - {.account = alice, .mutableFlags = tfMPTSetCanTransfer}); + {.account = alice, .mutableFlags = tmfMPTSetCanTransfer}); mptAlice.set( - {.account = alice, .mutableFlags = tfMPTClearCanTransfer}); + {.account = alice, .mutableFlags = tmfMPTClearCanTransfer}); // Can mutate metadata mptAlice.set({.account = alice, .metadata = "test"}); @@ -3176,7 +3176,7 @@ class MPToken_test : public beast::unit_test::suite mptAlice.create( {.metadata = "test", .ownerCount = 1, - .mutableFlags = tfMPTCanMutateMetadata}); + .mutableFlags = tmfMPTCanMutateMetadata}); std::vector metadatas = { "mutate metadata", @@ -3206,7 +3206,7 @@ class MPToken_test : public beast::unit_test::suite .metadata = "test", .ownerCount = 1, .flags = tfMPTCanTransfer, - .mutableFlags = tfMPTCanMutateTransferFee}); + .mutableFlags = tmfMPTCanMutateTransferFee}); for (std::uint16_t const fee : std::initializer_list{ 1, 10, 100, 200, 500, 1000, maxTransferFee}) @@ -3250,25 +3250,27 @@ class MPToken_test : public beast::unit_test::suite }; testFlagToggle( - tfMPTCanMutateCanLock, tfMPTCanLock, tfMPTClearCanLock); + tmfMPTCanMutateCanLock, tfMPTCanLock, tmfMPTClearCanLock); testFlagToggle( - tfMPTCanMutateRequireAuth, - tfMPTSetRequireAuth, - tfMPTClearRequireAuth); + tmfMPTCanMutateRequireAuth, + tmfMPTSetRequireAuth, + tmfMPTClearRequireAuth); testFlagToggle( - tfMPTCanMutateCanEscrow, - tfMPTSetCanEscrow, - tfMPTClearCanEscrow); + tmfMPTCanMutateCanEscrow, + tmfMPTSetCanEscrow, + tmfMPTClearCanEscrow); testFlagToggle( - tfMPTCanMutateCanTrade, tfMPTSetCanTrade, tfMPTClearCanTrade); + tmfMPTCanMutateCanTrade, + tmfMPTSetCanTrade, + tmfMPTClearCanTrade); testFlagToggle( - tfMPTCanMutateCanTransfer, - tfMPTSetCanTransfer, - tfMPTClearCanTransfer); + tmfMPTCanMutateCanTransfer, + tmfMPTSetCanTransfer, + tmfMPTClearCanTransfer); testFlagToggle( - tfMPTCanMutateCanClawback, - tfMPTSetCanClawback, - tfMPTClearCanClawback); + tmfMPTCanMutateCanClawback, + tmfMPTSetCanClawback, + tmfMPTClearCanClawback); } } @@ -3289,20 +3291,22 @@ class MPToken_test : public beast::unit_test::suite {.ownerCount = 1, .holderCount = 0, .flags = tfMPTCanLock | tfMPTCanTransfer, - .mutableFlags = tfMPTCanMutateCanLock | - tfMPTCanMutateCanTrade | tfMPTCanMutateTransferFee}); + .mutableFlags = tmfMPTCanMutateCanLock | + tmfMPTCanMutateCanTrade | tmfMPTCanMutateTransferFee}); mptAlice.authorize({.account = bob, .holderCount = 1}); // Lock bob's mptoken mptAlice.set({.account = alice, .holder = bob, .flags = tfMPTLock}); // Can mutate the mutable flags and fields - mptAlice.set({.account = alice, .mutableFlags = tfMPTClearCanLock}); - mptAlice.set({.account = alice, .mutableFlags = tfMPTSetCanLock}); - mptAlice.set({.account = alice, .mutableFlags = tfMPTClearCanLock}); - mptAlice.set({.account = alice, .mutableFlags = tfMPTSetCanTrade}); mptAlice.set( - {.account = alice, .mutableFlags = tfMPTClearCanTrade}); + {.account = alice, .mutableFlags = tmfMPTClearCanLock}); + mptAlice.set({.account = alice, .mutableFlags = tmfMPTSetCanLock}); + mptAlice.set( + {.account = alice, .mutableFlags = tmfMPTClearCanLock}); + mptAlice.set({.account = alice, .mutableFlags = tmfMPTSetCanTrade}); + mptAlice.set( + {.account = alice, .mutableFlags = tmfMPTClearCanTrade}); mptAlice.set({.account = alice, .transferFee = 200}); } @@ -3314,21 +3318,23 @@ class MPToken_test : public beast::unit_test::suite {.ownerCount = 1, .holderCount = 0, .flags = tfMPTCanLock, - .mutableFlags = tfMPTCanMutateCanLock | - tfMPTCanMutateCanClawback | tfMPTCanMutateMetadata}); + .mutableFlags = tmfMPTCanMutateCanLock | + tmfMPTCanMutateCanClawback | tmfMPTCanMutateMetadata}); mptAlice.authorize({.account = bob, .holderCount = 1}); // Lock issuance mptAlice.set({.account = alice, .flags = tfMPTLock}); // Can mutate the mutable flags and fields - mptAlice.set({.account = alice, .mutableFlags = tfMPTClearCanLock}); - mptAlice.set({.account = alice, .mutableFlags = tfMPTSetCanLock}); - mptAlice.set({.account = alice, .mutableFlags = tfMPTClearCanLock}); mptAlice.set( - {.account = alice, .mutableFlags = tfMPTSetCanClawback}); + {.account = alice, .mutableFlags = tmfMPTClearCanLock}); + mptAlice.set({.account = alice, .mutableFlags = tmfMPTSetCanLock}); mptAlice.set( - {.account = alice, .mutableFlags = tfMPTClearCanClawback}); + {.account = alice, .mutableFlags = tmfMPTClearCanLock}); + mptAlice.set( + {.account = alice, .mutableFlags = tmfMPTSetCanClawback}); + mptAlice.set( + {.account = alice, .mutableFlags = tmfMPTClearCanClawback}); mptAlice.set({.account = alice, .metadata = "mutate"}); } @@ -3340,8 +3346,8 @@ class MPToken_test : public beast::unit_test::suite {.ownerCount = 1, .holderCount = 0, .flags = tfMPTCanLock, - .mutableFlags = tfMPTCanMutateCanLock | - tfMPTCanMutateCanClawback | tfMPTCanMutateMetadata}); + .mutableFlags = tmfMPTCanMutateCanLock | + tmfMPTCanMutateCanClawback | tmfMPTCanMutateMetadata}); mptAlice.authorize({.account = bob, .holderCount = 1}); // Can lock and unlock @@ -3352,7 +3358,8 @@ class MPToken_test : public beast::unit_test::suite {.account = alice, .holder = bob, .flags = tfMPTUnlock}); // Clear lsfMPTCanLock - mptAlice.set({.account = alice, .mutableFlags = tfMPTClearCanLock}); + mptAlice.set( + {.account = alice, .mutableFlags = tmfMPTClearCanLock}); // Can not lock or unlock mptAlice.set( @@ -3375,7 +3382,7 @@ class MPToken_test : public beast::unit_test::suite .err = tecNO_PERMISSION}); // Set MPTCanLock again - mptAlice.set({.account = alice, .mutableFlags = tfMPTSetCanLock}); + mptAlice.set({.account = alice, .mutableFlags = tmfMPTSetCanLock}); // Can lock and unlock again mptAlice.set({.account = alice, .flags = tfMPTLock}); @@ -3400,7 +3407,7 @@ class MPToken_test : public beast::unit_test::suite mptAlice.create( {.ownerCount = 1, .flags = tfMPTRequireAuth, - .mutableFlags = tfMPTCanMutateRequireAuth}); + .mutableFlags = tmfMPTCanMutateRequireAuth}); mptAlice.authorize({.account = bob}); mptAlice.authorize({.account = alice, .holder = bob}); @@ -3416,13 +3423,14 @@ class MPToken_test : public beast::unit_test::suite mptAlice.pay(bob, alice, 100, tecNO_AUTH); // Clear RequireAuth - mptAlice.set({.account = alice, .mutableFlags = tfMPTClearRequireAuth}); + mptAlice.set( + {.account = alice, .mutableFlags = tmfMPTClearRequireAuth}); // Can pay to bob mptAlice.pay(alice, bob, 1000); // Set RequireAuth again - mptAlice.set({.account = alice, .mutableFlags = tfMPTSetRequireAuth}); + mptAlice.set({.account = alice, .mutableFlags = tmfMPTSetRequireAuth}); // Can not pay to bob since he is not authorized mptAlice.pay(bob, alice, 100, tecNO_AUTH); @@ -3452,7 +3460,7 @@ class MPToken_test : public beast::unit_test::suite {.ownerCount = 1, .holderCount = 0, .flags = tfMPTCanTransfer, - .mutableFlags = tfMPTCanMutateCanEscrow}); + .mutableFlags = tmfMPTCanMutateCanEscrow}); mptAlice.authorize({.account = carol}); mptAlice.authorize({.account = bob}); @@ -3469,14 +3477,14 @@ class MPToken_test : public beast::unit_test::suite ter(tecNO_PERMISSION)); // MPTCanEscrow is enabled now - mptAlice.set({.account = alice, .mutableFlags = tfMPTSetCanEscrow}); + mptAlice.set({.account = alice, .mutableFlags = tmfMPTSetCanEscrow}); env(escrow::create(carol, bob, MPT(3)), escrow::condition(escrow::cb1), escrow::finish_time(env.now() + 1s), fee(baseFee * 150)); // Clear MPTCanEscrow - mptAlice.set({.account = alice, .mutableFlags = tfMPTClearCanEscrow}); + mptAlice.set({.account = alice, .mutableFlags = tmfMPTClearCanEscrow}); env(escrow::create(carol, bob, MPT(3)), escrow::condition(escrow::cb1), escrow::finish_time(env.now() + 1s), @@ -3501,7 +3509,7 @@ class MPToken_test : public beast::unit_test::suite mptAlice.create( {.ownerCount = 1, .mutableFlags = - tfMPTCanMutateCanTransfer | tfMPTCanMutateTransferFee}); + tmfMPTCanMutateCanTransfer | tmfMPTCanMutateTransferFee}); mptAlice.authorize({.account = bob}); mptAlice.authorize({.account = carol}); @@ -3522,13 +3530,13 @@ class MPToken_test : public beast::unit_test::suite // MPTCanTransfer at the same time mptAlice.set( {.account = alice, - .mutableFlags = tfMPTSetCanTransfer, + .mutableFlags = tmfMPTSetCanTransfer, .transferFee = 100, .err = tecNO_PERMISSION}); // Alice sets MPTCanTransfer mptAlice.set( - {.account = alice, .mutableFlags = tfMPTSetCanTransfer}); + {.account = alice, .mutableFlags = tmfMPTSetCanTransfer}); // Can set transfer fee now BEAST_EXPECT(!mptAlice.isTransferFeePresent()); @@ -3540,7 +3548,7 @@ class MPToken_test : public beast::unit_test::suite // Alice clears MPTCanTransfer mptAlice.set( - {.account = alice, .mutableFlags = tfMPTClearCanTransfer}); + {.account = alice, .mutableFlags = tmfMPTClearCanTransfer}); // TransferFee field is removed when MPTCanTransfer is cleared BEAST_EXPECT(!mptAlice.isTransferFeePresent()); @@ -3550,7 +3558,7 @@ class MPToken_test : public beast::unit_test::suite } // Can set transfer fee to zero when MPTCanTransfer is not set, but - // tfMPTCanMutateTransferFee is set. + // tmfMPTCanMutateTransferFee is set. { Env env{*this, features}; @@ -3560,13 +3568,13 @@ class MPToken_test : public beast::unit_test::suite .ownerCount = 1, .flags = tfMPTCanTransfer, .mutableFlags = - tfMPTCanMutateTransferFee | tfMPTCanMutateCanTransfer}); + tmfMPTCanMutateTransferFee | tmfMPTCanMutateCanTransfer}); BEAST_EXPECT(mptAlice.checkTransferFee(100)); // Clear MPTCanTransfer and transfer fee is removed mptAlice.set( - {.account = alice, .mutableFlags = tfMPTClearCanTransfer}); + {.account = alice, .mutableFlags = tmfMPTClearCanTransfer}); BEAST_EXPECT(!mptAlice.isTransferFeePresent()); // Can still set transfer fee to zero, although it is already zero @@ -3592,7 +3600,7 @@ class MPToken_test : public beast::unit_test::suite mptAlice.create( {.ownerCount = 1, .holderCount = 0, - .mutableFlags = tfMPTCanMutateCanClawback}); + .mutableFlags = tmfMPTCanMutateCanClawback}); // Bob creates an MPToken mptAlice.authorize({.account = bob}); @@ -3604,13 +3612,14 @@ class MPToken_test : public beast::unit_test::suite mptAlice.claw(alice, bob, 1, tecNO_PERMISSION); // Enable MPTCanClawback - mptAlice.set({.account = alice, .mutableFlags = tfMPTSetCanClawback}); + mptAlice.set({.account = alice, .mutableFlags = tmfMPTSetCanClawback}); // Can clawback now mptAlice.claw(alice, bob, 1); // Clear MPTCanClawback - mptAlice.set({.account = alice, .mutableFlags = tfMPTClearCanClawback}); + mptAlice.set( + {.account = alice, .mutableFlags = tmfMPTClearCanClawback}); // Can not clawback mptAlice.claw(alice, bob, 1, tecNO_PERMISSION); diff --git a/src/test/jtx/impl/mpt.cpp b/src/test/jtx/impl/mpt.cpp index f35b1b1ebb..f2f51492e3 100644 --- a/src/test/jtx/impl/mpt.cpp +++ b/src/test/jtx/impl/mpt.cpp @@ -265,34 +265,34 @@ MPTTester::set(MPTSet const& arg) if (arg.mutableFlags) { - if (*arg.mutableFlags & tfMPTSetCanLock) + if (*arg.mutableFlags & tmfMPTSetCanLock) flags |= lsfMPTCanLock; - else if (*arg.mutableFlags & tfMPTClearCanLock) + else if (*arg.mutableFlags & tmfMPTClearCanLock) flags &= ~lsfMPTCanLock; - if (*arg.mutableFlags & tfMPTSetRequireAuth) + if (*arg.mutableFlags & tmfMPTSetRequireAuth) flags |= lsfMPTRequireAuth; - else if (*arg.mutableFlags & tfMPTClearRequireAuth) + else if (*arg.mutableFlags & tmfMPTClearRequireAuth) flags &= ~lsfMPTRequireAuth; - if (*arg.mutableFlags & tfMPTSetCanEscrow) + if (*arg.mutableFlags & tmfMPTSetCanEscrow) flags |= lsfMPTCanEscrow; - else if (*arg.mutableFlags & tfMPTClearCanEscrow) + else if (*arg.mutableFlags & tmfMPTClearCanEscrow) flags &= ~lsfMPTCanEscrow; - if (*arg.mutableFlags & tfMPTSetCanClawback) + if (*arg.mutableFlags & tmfMPTSetCanClawback) flags |= lsfMPTCanClawback; - else if (*arg.mutableFlags & tfMPTClearCanClawback) + else if (*arg.mutableFlags & tmfMPTClearCanClawback) flags &= ~lsfMPTCanClawback; - if (*arg.mutableFlags & tfMPTSetCanTrade) + if (*arg.mutableFlags & tmfMPTSetCanTrade) flags |= lsfMPTCanTrade; - else if (*arg.mutableFlags & tfMPTClearCanTrade) + else if (*arg.mutableFlags & tmfMPTClearCanTrade) flags &= ~lsfMPTCanTrade; - if (*arg.mutableFlags & tfMPTSetCanTransfer) + if (*arg.mutableFlags & tmfMPTSetCanTransfer) flags |= lsfMPTCanTransfer; - else if (*arg.mutableFlags & tfMPTClearCanTransfer) + else if (*arg.mutableFlags & tmfMPTClearCanTransfer) flags &= ~lsfMPTCanTransfer; } } diff --git a/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.cpp b/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.cpp index 6a6e598f42..c195e45c1d 100644 --- a/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.cpp +++ b/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.cpp @@ -44,7 +44,7 @@ MPTokenIssuanceCreate::preflight(PreflightContext const& ctx) return ret; if (auto const mutableFlags = ctx.tx[~sfMutableFlags]; mutableFlags && - (!*mutableFlags || *mutableFlags & tfMPTokenIssuanceCreateMutableMask)) + (!*mutableFlags || *mutableFlags & tmfMPTokenIssuanceCreateMutableMask)) return temINVALID_FLAG; if (ctx.tx.getFlags() & tfMPTokenIssuanceCreateMask) diff --git a/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp b/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp index 83b771c705..37c563460a 100644 --- a/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp +++ b/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp @@ -37,12 +37,14 @@ struct MPTMutabilityFlags }; static constexpr std::array mptMutabilityFlags = { - {{tfMPTSetCanLock, tfMPTClearCanLock, lsfMPTCanMutateCanLock}, - {tfMPTSetRequireAuth, tfMPTClearRequireAuth, lsfMPTCanMutateRequireAuth}, - {tfMPTSetCanEscrow, tfMPTClearCanEscrow, lsfMPTCanMutateCanEscrow}, - {tfMPTSetCanTrade, tfMPTClearCanTrade, lsfMPTCanMutateCanTrade}, - {tfMPTSetCanTransfer, tfMPTClearCanTransfer, lsfMPTCanMutateCanTransfer}, - {tfMPTSetCanClawback, tfMPTClearCanClawback, lsfMPTCanMutateCanClawback}}}; + {{tmfMPTSetCanLock, tmfMPTClearCanLock, lmfMPTCanMutateCanLock}, + {tmfMPTSetRequireAuth, tmfMPTClearRequireAuth, lmfMPTCanMutateRequireAuth}, + {tmfMPTSetCanEscrow, tmfMPTClearCanEscrow, lmfMPTCanMutateCanEscrow}, + {tmfMPTSetCanTrade, tmfMPTClearCanTrade, lmfMPTCanMutateCanTrade}, + {tmfMPTSetCanTransfer, tmfMPTClearCanTransfer, lmfMPTCanMutateCanTransfer}, + {tmfMPTSetCanClawback, + tmfMPTClearCanClawback, + lmfMPTCanMutateCanClawback}}}; NotTEC MPTokenIssuanceSet::preflight(PreflightContext const& ctx) @@ -110,7 +112,7 @@ MPTokenIssuanceSet::preflight(PreflightContext const& ctx) if (mutableFlags) { if (!*mutableFlags || - (*mutableFlags & tfMPTokenIssuanceSetMutableMask)) + (*mutableFlags & tmfMPTokenIssuanceSetMutableMask)) return temINVALID_FLAG; // Can not set and clear the same flag @@ -126,7 +128,7 @@ MPTokenIssuanceSet::preflight(PreflightContext const& ctx) // Trying to set a non-zero TransferFee and clear MPTCanTransfer // in the same transaction is not allowed. if (transferFee.value_or(0) && - (*mutableFlags & tfMPTClearCanTransfer)) + (*mutableFlags & tmfMPTClearCanTransfer)) return temMALFORMED; } } @@ -241,7 +243,7 @@ MPTokenIssuanceSet::preclaim(PreclaimContext const& ctx) return tecNO_PERMISSION; } - if (!isMutableFlag(lsfMPTCanMutateMetadata) && + if (!isMutableFlag(lmfMPTCanMutateMetadata) && ctx.tx.isFieldPresent(sfMPTokenMetadata)) return tecNO_PERMISSION; @@ -249,12 +251,12 @@ MPTokenIssuanceSet::preclaim(PreclaimContext const& ctx) { // A non-zero TransferFee is only valid if the lsfMPTCanTransfer flag // was previously enabled (at issuance or via a prior mutation). Setting - // it by tfMPTSetCanTransfer in the current transaction does not meet + // it by tmfMPTSetCanTransfer in the current transaction does not meet // this requirement. if (fee > 0u && !sleMptIssuance->isFlag(lsfMPTCanTransfer)) return tecNO_PERMISSION; - if (!isMutableFlag(lsfMPTCanMutateTransferFee)) + if (!isMutableFlag(lmfMPTCanMutateTransferFee)) return tecNO_PERMISSION; } @@ -296,7 +298,7 @@ MPTokenIssuanceSet::doApply() flagsOut &= ~f.canMutateFlag; } - if (mutableFlags & tfMPTClearCanTransfer) + if (mutableFlags & tmfMPTClearCanTransfer) { // If the lsfMPTCanTransfer flag is being cleared, then also clear // the TransferFee field. From 510314d34465324575719ce528bdda185581464b Mon Sep 17 00:00:00 2001 From: Mayukha Vadari Date: Wed, 17 Sep 2025 17:34:47 -0400 Subject: [PATCH 190/244] fix(amendment): Add missing fields for keylets to ledger objects (#5646) This change adds a fix amendment (`fixIncludeKeyletFields`) that adds: * `sfSequence` to `Escrow` and `PayChannel` * `sfOwner` to `SignerList` * `sfOracleDocumentID` to `Oracle` This ensures that all ledger entries hold all the information needed to determine their keylet. --- include/xrpl/protocol/Indexes.h | 2 + include/xrpl/protocol/detail/features.macro | 1 + .../xrpl/protocol/detail/ledger_entries.macro | 6 +- src/test/app/Escrow_test.cpp | 9 ++ src/test/app/MultiSign_test.cpp | 115 +++++++++++------- src/test/app/Oracle_test.cpp | 23 +++- src/test/app/PayChan_test.cpp | 9 ++ src/test/jtx/impl/Oracle.cpp | 4 +- src/xrpld/app/tx/detail/Escrow.cpp | 5 + src/xrpld/app/tx/detail/PayChan.cpp | 4 + src/xrpld/app/tx/detail/SetOracle.cpp | 9 ++ src/xrpld/app/tx/detail/SetSignerList.cpp | 8 +- 12 files changed, 144 insertions(+), 51 deletions(-) diff --git a/include/xrpl/protocol/Indexes.h b/include/xrpl/protocol/Indexes.h index 3e3f2843c1..79be15d906 100644 --- a/include/xrpl/protocol/Indexes.h +++ b/include/xrpl/protocol/Indexes.h @@ -287,9 +287,11 @@ delegate(AccountID const& account, AccountID const& authorizedAccount) noexcept; Keylet bridge(STXChainBridge const& bridge, STXChainBridge::ChainType chainType); +// `seq` is stored as `sfXChainClaimID` in the object Keylet xChainClaimID(STXChainBridge const& bridge, std::uint64_t seq); +// `seq` is stored as `sfXChainAccountCreateCount` in the object Keylet xChainCreateAccountClaimID(STXChainBridge const& bridge, std::uint64_t seq); diff --git a/include/xrpl/protocol/detail/features.macro b/include/xrpl/protocol/detail/features.macro index a9f5d95624..9dc40dc8e5 100644 --- a/include/xrpl/protocol/detail/features.macro +++ b/include/xrpl/protocol/detail/features.macro @@ -32,6 +32,7 @@ // If you add an amendment here, then do not forget to increment `numFeatures` // in include/xrpl/protocol/Feature.h. +XRPL_FIX (IncludeKeyletFields, Supported::no, VoteBehavior::DefaultNo) XRPL_FEATURE(DynamicMPT, Supported::no, VoteBehavior::DefaultNo) XRPL_FIX (TokenEscrowV1, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (DelegateV1_1, Supported::no, VoteBehavior::DefaultNo) diff --git a/include/xrpl/protocol/detail/ledger_entries.macro b/include/xrpl/protocol/detail/ledger_entries.macro index 1066986223..f76188095e 100644 --- a/include/xrpl/protocol/detail/ledger_entries.macro +++ b/include/xrpl/protocol/detail/ledger_entries.macro @@ -120,6 +120,7 @@ LEDGER_ENTRY(ltNFTOKEN_PAGE, 0x0050, NFTokenPage, nft_page, ({ // All fields are soeREQUIRED because there is always a SignerEntries. // If there are no SignerEntries the node is deleted. LEDGER_ENTRY(ltSIGNER_LIST, 0x0053, SignerList, signer_list, ({ + {sfOwner, soeOPTIONAL}, {sfOwnerNode, soeREQUIRED}, {sfSignerQuorum, soeREQUIRED}, {sfSignerEntries, soeREQUIRED}, @@ -188,7 +189,7 @@ LEDGER_ENTRY(ltDIR_NODE, 0x0064, DirectoryNode, directory, ({ {sfNFTokenID, soeOPTIONAL}, {sfPreviousTxnID, soeOPTIONAL}, {sfPreviousTxnLgrSeq, soeOPTIONAL}, - {sfDomainID, soeOPTIONAL} + {sfDomainID, soeOPTIONAL} // order book directories })) /** The ledger object which lists details about amendments on the network. @@ -343,6 +344,7 @@ LEDGER_ENTRY(ltXCHAIN_OWNED_CREATE_ACCOUNT_CLAIM_ID, 0x0074, XChainOwnedCreateAc */ LEDGER_ENTRY(ltESCROW, 0x0075, Escrow, escrow, ({ {sfAccount, soeREQUIRED}, + {sfSequence, soeOPTIONAL}, {sfDestination, soeREQUIRED}, {sfAmount, soeREQUIRED}, {sfCondition, soeOPTIONAL}, @@ -365,6 +367,7 @@ LEDGER_ENTRY(ltESCROW, 0x0075, Escrow, escrow, ({ LEDGER_ENTRY(ltPAYCHAN, 0x0078, PayChannel, payment_channel, ({ {sfAccount, soeREQUIRED}, {sfDestination, soeREQUIRED}, + {sfSequence, soeOPTIONAL}, {sfAmount, soeREQUIRED}, {sfBalance, soeREQUIRED}, {sfPublicKey, soeREQUIRED}, @@ -433,6 +436,7 @@ LEDGER_ENTRY(ltMPTOKEN, 0x007f, MPToken, mptoken, ({ */ LEDGER_ENTRY(ltORACLE, 0x0080, Oracle, oracle, ({ {sfOwner, soeREQUIRED}, + {sfOracleDocumentID, soeOPTIONAL}, {sfProvider, soeREQUIRED}, {sfPriceDataSeries, soeREQUIRED}, {sfAssetClass, soeREQUIRED}, diff --git a/src/test/app/Escrow_test.cpp b/src/test/app/Escrow_test.cpp index 3eaf0f13ea..19b8612ef4 100644 --- a/src/test/app/Escrow_test.cpp +++ b/src/test/app/Escrow_test.cpp @@ -253,6 +253,14 @@ struct Escrow_test : public beast::unit_test::suite BEAST_EXPECT(sle); BEAST_EXPECT((*sle)[sfSourceTag] == 1); BEAST_EXPECT((*sle)[sfDestinationTag] == 2); + if (features[fixIncludeKeyletFields]) + { + BEAST_EXPECT((*sle)[sfSequence] == seq); + } + else + { + BEAST_EXPECT(!sle->isFieldPresent(sfSequence)); + } } void @@ -1718,6 +1726,7 @@ public: FeatureBitset const all{testable_amendments()}; testWithFeats(all); testWithFeats(all - featureTokenEscrow); + testTags(all - fixIncludeKeyletFields); } }; diff --git a/src/test/app/MultiSign_test.cpp b/src/test/app/MultiSign_test.cpp index 571ec33417..776e163cd4 100644 --- a/src/test/app/MultiSign_test.cpp +++ b/src/test/app/MultiSign_test.cpp @@ -63,7 +63,7 @@ class MultiSign_test : public beast::unit_test::suite public: void - test_noReserve(FeatureBitset features) + testNoReserve(FeatureBitset features) { testcase("No Reserve"); @@ -133,7 +133,7 @@ public: } void - test_signerListSet(FeatureBitset features) + testSignerListSet(FeatureBitset features) { testcase("SignerListSet"); @@ -215,7 +215,7 @@ public: } void - test_phantomSigners(FeatureBitset features) + testPhantomSigners(FeatureBitset features) { testcase("Phantom Signers"); @@ -282,7 +282,7 @@ public: } void - test_fee(FeatureBitset features) + testFee(FeatureBitset features) { testcase("Fee"); @@ -346,7 +346,7 @@ public: } void - test_misorderedSigners(FeatureBitset features) + testMisorderedSigners(FeatureBitset features) { testcase("Misordered Signers"); @@ -374,7 +374,7 @@ public: } void - test_masterSigners(FeatureBitset features) + testMasterSigners(FeatureBitset features) { testcase("Master Signers"); @@ -429,7 +429,7 @@ public: } void - test_regularSigners(FeatureBitset features) + testRegularSigners(FeatureBitset features) { testcase("Regular Signers"); @@ -494,7 +494,7 @@ public: } void - test_regularSignersUsingSubmitMulti(FeatureBitset features) + testRegularSignersUsingSubmitMulti(FeatureBitset features) { testcase("Regular Signers Using submit_multisigned"); @@ -734,7 +734,7 @@ public: } void - test_heterogeneousSigners(FeatureBitset features) + testHeterogeneousSigners(FeatureBitset features) { testcase("Heterogenious Signers"); @@ -881,7 +881,7 @@ public: // We want to always leave an account signable. Make sure the that we // disallow removing the last way a transaction may be signed. void - test_keyDisable(FeatureBitset features) + testKeyDisable(FeatureBitset features) { testcase("Key Disable"); @@ -963,7 +963,7 @@ public: // Verify that the first regular key can be made for free using the // master key, but not when multisigning. void - test_regKey(FeatureBitset features) + testRegKey(FeatureBitset features) { testcase("Regular Key"); @@ -1000,7 +1000,7 @@ public: // See if every kind of transaction can be successfully multi-signed. void - test_txTypes(FeatureBitset features) + testTxTypes(FeatureBitset features) { testcase("Transaction Types"); @@ -1089,7 +1089,7 @@ public: } void - test_badSignatureText(FeatureBitset features) + testBadSignatureText(FeatureBitset features) { testcase("Bad Signature Text"); @@ -1285,7 +1285,7 @@ public: } void - test_noMultiSigners(FeatureBitset features) + testNoMultiSigners(FeatureBitset features) { testcase("No Multisigners"); @@ -1304,7 +1304,7 @@ public: } void - test_multisigningMultisigner(FeatureBitset features) + testMultisigningMultisigner(FeatureBitset features) { testcase("Multisigning multisigner"); @@ -1381,7 +1381,7 @@ public: } void - test_signForHash(FeatureBitset features) + testSignForHash(FeatureBitset features) { testcase("sign_for Hash"); @@ -1464,7 +1464,7 @@ public: } void - test_amendmentTransition() + testAmendmentTransition() { testcase("Amendment Transition"); @@ -1559,7 +1559,7 @@ public: } void - test_signersWithTickets(FeatureBitset features) + testSignersWithTickets(FeatureBitset features) { testcase("Signers With Tickets"); @@ -1600,7 +1600,7 @@ public: } void - test_signersWithTags(FeatureBitset features) + testSignersWithTags(FeatureBitset features) { if (!features[featureExpandedSignerList]) return; @@ -1680,7 +1680,7 @@ public: } void - test_signerListSetFlags(FeatureBitset features) + testSignerListSetFlags(FeatureBitset features) { using namespace test::jtx; @@ -1702,27 +1702,57 @@ public: env.close(); } + void + testSignerListObject(FeatureBitset features) + { + testcase("SignerList Object"); + + // Verify that the SignerList object is created correctly. + using namespace jtx; + Env env{*this, features}; + Account const alice{"alice", KeyType::ed25519}; + env.fund(XRP(1000), alice); + env.close(); + + // Attach phantom signers to alice. + env(signers(alice, 1, {{bogie, 1}, {demon, 1}})); + env.close(); + + // Verify that the SignerList object was created correctly. + auto const& sle = env.le(keylet::signers(alice.id())); + BEAST_EXPECT(sle); + BEAST_EXPECT(sle->getFieldArray(sfSignerEntries).size() == 2); + if (features[fixIncludeKeyletFields]) + { + BEAST_EXPECT((*sle)[sfOwner] == alice.id()); + } + else + { + BEAST_EXPECT(!sle->isFieldPresent(sfOwner)); + } + } + void testAll(FeatureBitset features) { - test_noReserve(features); - test_signerListSet(features); - test_phantomSigners(features); - test_fee(features); - test_misorderedSigners(features); - test_masterSigners(features); - test_regularSigners(features); - test_regularSignersUsingSubmitMulti(features); - test_heterogeneousSigners(features); - test_keyDisable(features); - test_regKey(features); - test_txTypes(features); - test_badSignatureText(features); - test_noMultiSigners(features); - test_multisigningMultisigner(features); - test_signForHash(features); - test_signersWithTickets(features); - test_signersWithTags(features); + testNoReserve(features); + testSignerListSet(features); + testPhantomSigners(features); + testFee(features); + testMisorderedSigners(features); + testMasterSigners(features); + testRegularSigners(features); + testRegularSignersUsingSubmitMulti(features); + testHeterogeneousSigners(features); + testKeyDisable(features); + testRegKey(features); + testTxTypes(features); + testBadSignatureText(features); + testNoMultiSigners(features); + testMultisigningMultisigner(features); + testSignForHash(features); + testSignersWithTickets(features); + testSignersWithTags(features); } void @@ -1739,10 +1769,13 @@ public: testAll(all - featureExpandedSignerList); testAll(all); - test_signerListSetFlags(all - fixInvalidTxFlags); - test_signerListSetFlags(all); + testSignerListSetFlags(all - fixInvalidTxFlags); + testSignerListSetFlags(all); - test_amendmentTransition(); + testSignerListObject(all - fixIncludeKeyletFields); + testSignerListObject(all); + + testAmendmentTransition(); } }; diff --git a/src/test/app/Oracle_test.cpp b/src/test/app/Oracle_test.cpp index fdd7ad941e..f0cde41394 100644 --- a/src/test/app/Oracle_test.cpp +++ b/src/test/app/Oracle_test.cpp @@ -398,7 +398,7 @@ private: } void - testCreate() + testCreate(FeatureBitset features) { testcase("Create"); using namespace jtx; @@ -413,18 +413,30 @@ private: env, {.owner = owner, .series = series, .fee = baseFee}); BEAST_EXPECT(oracle.exists()); BEAST_EXPECT(ownerCount(env, owner) == (count + adj)); + auto const entry = oracle.ledgerEntry(); + BEAST_EXPECT(entry[jss::node][jss::Owner] == owner.human()); + if (features[fixIncludeKeyletFields]) + { + BEAST_EXPECT( + entry[jss::node][jss::OracleDocumentID] == + oracle.documentID()); + } + else + { + BEAST_EXPECT(!entry[jss::node].isMember(jss::OracleDocumentID)); + } BEAST_EXPECT(oracle.expectLastUpdateTime(946694810)); }; { // owner count is adjusted by 1 - Env env(*this); + Env env(*this, features); test(env, {{"XRP", "USD", 740, 1}}, 1); } { // owner count is adjusted by 2 - Env env(*this); + Env env(*this, features); test( env, {{"XRP", "USD", 740, 1}, @@ -438,7 +450,7 @@ private: { // Different owner creates a new object - Env env(*this); + Env env(*this, features); auto const baseFee = static_cast(env.current()->fees().base.drops()); Account const some("some"); @@ -864,7 +876,8 @@ public: auto const all = testable_amendments(); testInvalidSet(); testInvalidDelete(); - testCreate(); + testCreate(all); + testCreate(all - fixIncludeKeyletFields); testDelete(); testUpdate(); testAmendment(); diff --git a/src/test/app/PayChan_test.cpp b/src/test/app/PayChan_test.cpp index 3a5d3d6ff5..3d0557fd5c 100644 --- a/src/test/app/PayChan_test.cpp +++ b/src/test/app/PayChan_test.cpp @@ -1852,6 +1852,14 @@ struct PayChan_test : public beast::unit_test::suite BEAST_EXPECT(ownerDirCount(*env.current(), alice) == 1); BEAST_EXPECT(!inOwnerDir(*env.current(), bob, chanSle)); BEAST_EXPECT(ownerDirCount(*env.current(), bob) == 0); + if (features[fixIncludeKeyletFields]) + { + BEAST_EXPECT((*chanSle)[sfSequence] == env.seq(alice) - 1); + } + else + { + BEAST_EXPECT(!chanSle->isFieldPresent(sfSequence)); + } // close the channel env(claim(bob, chan), txflags(tfClose)); BEAST_EXPECT(!channelExists(*env.current(), chan)); @@ -2348,6 +2356,7 @@ public: testWithFeats(all - disallowIncoming); testWithFeats(all); testDepositAuthCreds(); + testMetaAndOwnership(all - fixIncludeKeyletFields); } }; diff --git a/src/test/jtx/impl/Oracle.cpp b/src/test/jtx/impl/Oracle.cpp index 721a1c299d..97a31cbb0c 100644 --- a/src/test/jtx/impl/Oracle.cpp +++ b/src/test/jtx/impl/Oracle.cpp @@ -317,10 +317,10 @@ Oracle::ledgerEntry( if (jr.isObject()) { + if (jr.isMember(jss::error)) + return jr; if (jr.isMember(jss::result) && jr[jss::result].isMember(jss::status)) return jr[jss::result]; - else if (jr.isMember(jss::error)) - return jr; } return Json::nullValue; } diff --git a/src/xrpld/app/tx/detail/Escrow.cpp b/src/xrpld/app/tx/detail/Escrow.cpp index 3b05aa0007..3c15278efc 100644 --- a/src/xrpld/app/tx/detail/Escrow.cpp +++ b/src/xrpld/app/tx/detail/Escrow.cpp @@ -538,6 +538,11 @@ EscrowCreate::doApply() (*slep)[~sfFinishAfter] = ctx_.tx[~sfFinishAfter]; (*slep)[~sfDestinationTag] = ctx_.tx[~sfDestinationTag]; + if (ctx_.view().rules().enabled(fixIncludeKeyletFields)) + { + (*slep)[sfSequence] = ctx_.tx.getSeqValue(); + } + if (ctx_.view().rules().enabled(featureTokenEscrow) && !isXRP(amount)) { auto const xferRate = transferRate(ctx_.view(), amount); diff --git a/src/xrpld/app/tx/detail/PayChan.cpp b/src/xrpld/app/tx/detail/PayChan.cpp index d9e53ac75c..12a9d0cb75 100644 --- a/src/xrpld/app/tx/detail/PayChan.cpp +++ b/src/xrpld/app/tx/detail/PayChan.cpp @@ -286,6 +286,10 @@ PayChanCreate::doApply() (*slep)[~sfCancelAfter] = ctx_.tx[~sfCancelAfter]; (*slep)[~sfSourceTag] = ctx_.tx[~sfSourceTag]; (*slep)[~sfDestinationTag] = ctx_.tx[~sfDestinationTag]; + if (ctx_.view().rules().enabled(fixIncludeKeyletFields)) + { + (*slep)[sfSequence] = ctx_.tx.getSeqValue(); + } ctx_.view().insert(slep); diff --git a/src/xrpld/app/tx/detail/SetOracle.cpp b/src/xrpld/app/tx/detail/SetOracle.cpp index d598507cb7..ba1d4a2e47 100644 --- a/src/xrpld/app/tx/detail/SetOracle.cpp +++ b/src/xrpld/app/tx/detail/SetOracle.cpp @@ -271,6 +271,11 @@ SetOracle::doApply() if (ctx_.tx.isFieldPresent(sfURI)) sle->setFieldVL(sfURI, ctx_.tx[sfURI]); sle->setFieldU32(sfLastUpdateTime, ctx_.tx[sfLastUpdateTime]); + if (!sle->isFieldPresent(sfOracleDocumentID) && + ctx_.view().rules().enabled(fixIncludeKeyletFields)) + { + (*sle)[sfOracleDocumentID] = ctx_.tx[sfOracleDocumentID]; + } auto const newCount = pairs.size() > 5 ? 2 : 1; auto const adjust = newCount - oldCount; @@ -285,6 +290,10 @@ SetOracle::doApply() sle = std::make_shared(oracleID); sle->setAccountID(sfOwner, ctx_.tx.getAccountID(sfAccount)); + if (ctx_.view().rules().enabled(fixIncludeKeyletFields)) + { + (*sle)[sfOracleDocumentID] = ctx_.tx[sfOracleDocumentID]; + } sle->setFieldVL(sfProvider, ctx_.tx[sfProvider]); if (ctx_.tx.isFieldPresent(sfURI)) sle->setFieldVL(sfURI, ctx_.tx[sfURI]); diff --git a/src/xrpld/app/tx/detail/SetSignerList.cpp b/src/xrpld/app/tx/detail/SetSignerList.cpp index 4a1ee703a0..b52130e2fa 100644 --- a/src/xrpld/app/tx/detail/SetSignerList.cpp +++ b/src/xrpld/app/tx/detail/SetSignerList.cpp @@ -37,7 +37,7 @@ namespace ripple { // We're prepared for there to be multiple signer lists in the future, // but we don't need them yet. So for the time being we're manually // setting the sfSignerListID to zero in all cases. -static std::uint32_t const defaultSignerListID_ = 0; +static std::uint32_t const DEFAULT_SIGNER_LIST_ID = 0; std::tuple< NotTEC, @@ -424,8 +424,12 @@ SetSignerList::writeSignersToSLE( std::uint32_t flags) const { // Assign the quorum, default SignerListID, and flags. + if (ctx_.view().rules().enabled(fixIncludeKeyletFields)) + { + ledgerEntry->setAccountID(sfOwner, account_); + } ledgerEntry->setFieldU32(sfSignerQuorum, quorum_); - ledgerEntry->setFieldU32(sfSignerListID, defaultSignerListID_); + ledgerEntry->setFieldU32(sfSignerListID, DEFAULT_SIGNER_LIST_ID); if (flags) // Only set flags if they are non-default (default is zero). ledgerEntry->setFieldU32(sfFlags, flags); From e66558a883e23f45ff1d2299eaa9e900606b2132 Mon Sep 17 00:00:00 2001 From: Bart Date: Wed, 17 Sep 2025 18:55:00 -0400 Subject: [PATCH 191/244] chore: Limits CI build and test parallelism to reduce resource contention (#5799) GitHub runners have a limit on how many concurrent jobs they can actually process (even though they will try to run them all at the same time), and similarly the Conan remote cannot handle hundreds of concurrent requests. Previously, the Conan dependency uploading was already limited to max 10 jobs running in parallel, and this change makes the same change to the build+test workflow. --- .github/workflows/build-test.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml index 69ff986f98..634ed42690 100644 --- a/.github/workflows/build-test.yml +++ b/.github/workflows/build-test.yml @@ -61,6 +61,7 @@ jobs: strategy: fail-fast: false matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }} + max-parallel: 10 runs-on: ${{ matrix.architecture.runner }} container: ${{ inputs.os == 'linux' && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version) || null }} steps: From 1af1048c581a509f067e6458d617fdbaf4a05601 Mon Sep 17 00:00:00 2001 From: Bart Date: Wed, 17 Sep 2025 19:17:48 -0400 Subject: [PATCH 192/244] chore: Build and test all configs for daily scheduled run (#5801) This change re-enables building and testing all configurations, but only for the daily scheduled run. Previously all configurations were run for each merge into the develop branch, but that overwhelmed both the GitHub runners and the Conan remote, and thus they were limited to just a subset of configurations. Now that the number of jobs is limited via `max-parallel: 10`, we should be able to safely enable building all configurations again. However, building them all once a day instead of for each PR merge should be sufficient. --- .github/workflows/on-trigger.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/on-trigger.yml b/.github/workflows/on-trigger.yml index 7c17621d67..06abbd3f17 100644 --- a/.github/workflows/on-trigger.yml +++ b/.github/workflows/on-trigger.yml @@ -80,6 +80,6 @@ jobs: os: [linux, macos, windows] with: os: ${{ matrix.os }} - strategy_matrix: "minimal" + strategy_matrix: ${{ github.event_name == 'schedule' && 'all' || 'minimal' }} secrets: codecov_token: ${{ secrets.CODECOV_TOKEN }} From 617a895af5e43e80fd129dcd265d510057815635 Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Thu, 18 Sep 2025 11:30:34 +0100 Subject: [PATCH 193/244] chore: Add unit tests dir to code coverage excludes (#5803) This change excludes unit test code from code coverage reporting. --- .codecov.yml | 1 + cmake/RippledCov.cmake | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.codecov.yml b/.codecov.yml index d28d7c80df..cd52e2604d 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -33,5 +33,6 @@ slack_app: false ignore: - "src/test/" + - "src/tests/" - "include/xrpl/beast/test/" - "include/xrpl/beast/unit_test/" diff --git a/cmake/RippledCov.cmake b/cmake/RippledCov.cmake index 847915a51a..6cbe2ff921 100644 --- a/cmake/RippledCov.cmake +++ b/cmake/RippledCov.cmake @@ -33,7 +33,7 @@ setup_target_for_coverage_gcovr( FORMAT ${coverage_format} EXECUTABLE rippled EXECUTABLE_ARGS --unittest$<$:=${coverage_test}> --unittest-jobs ${coverage_test_parallelism} --quiet --unittest-log - EXCLUDE "src/test" "include/xrpl/beast/test" "include/xrpl/beast/unit_test" "${CMAKE_BINARY_DIR}/pb-xrpl.libpb" + EXCLUDE "src/test" "src/tests" "include/xrpl/beast/test" "include/xrpl/beast/unit_test" "${CMAKE_BINARY_DIR}/pb-xrpl.libpb" DEPENDENCIES rippled ) From dc8b37a52448b005153c13a7f046ad494128cf94 Mon Sep 17 00:00:00 2001 From: Jingchen Date: Thu, 18 Sep 2025 16:12:24 +0100 Subject: [PATCH 194/244] refactor: Modularise ledger (#5493) This change moves the ledger code to libxrpl. --- .../scripts/levelization/results/loops.txt | 3 --- .../scripts/levelization/results/ordering.txt | 20 +++++++++++-------- cmake/RippledCore.cmake | 7 +++++++ cmake/RippledInstall.cmake | 1 + .../xrpld => include/xrpl}/ledger/ApplyView.h | 5 ++--- .../xrpl}/ledger/ApplyViewImpl.h | 5 ++--- {src/xrpld => include/xrpl}/ledger/BookDirs.h | 3 +-- .../xrpl}/ledger/CachedSLEs.h | 0 .../xrpl}/ledger/CachedView.h | 5 ++--- .../xrpl/ledger}/CredentialHelpers.h | 5 ++--- {src/xrpld => include/xrpl}/ledger/Dir.h | 3 +-- {src/xrpld => include/xrpl}/ledger/OpenView.h | 7 +++---- .../xrpl}/ledger/PaymentSandbox.h | 7 +++---- {src/xrpld => include/xrpl}/ledger/RawView.h | 3 +-- {src/xrpld => include/xrpl}/ledger/ReadView.h | 5 ++--- {src/xrpld => include/xrpl}/ledger/Sandbox.h | 4 ++-- {src/xrpld => include/xrpl}/ledger/View.h | 7 +++---- .../xrpl}/ledger/detail/ApplyStateTable.h | 7 +++---- .../xrpl}/ledger/detail/ApplyViewBase.h | 7 +++---- .../xrpl}/ledger/detail/RawStateTable.h | 4 ++-- .../xrpl}/ledger/detail/ReadViewFwdRange.h | 0 .../xrpl}/ledger/detail/ReadViewFwdRange.ipp | 0 .../ledger}/ApplyStateTable.cpp | 3 +-- .../detail => libxrpl/ledger}/ApplyView.cpp | 3 +-- .../ledger}/ApplyViewBase.cpp | 2 +- .../ledger}/ApplyViewImpl.cpp | 2 +- .../detail => libxrpl/ledger}/BookDirs.cpp | 5 ++--- .../detail => libxrpl/ledger}/CachedView.cpp | 3 +-- .../ledger}/CredentialHelpers.cpp | 5 ++--- .../ledger/detail => libxrpl/ledger}/Dir.cpp | 2 +- .../detail => libxrpl/ledger}/OpenView.cpp | 3 +-- .../ledger}/PaymentSandbox.cpp | 6 ++---- .../ledger}/RawStateTable.cpp | 3 +-- .../detail => libxrpl/ledger}/ReadView.cpp | 2 +- .../ledger/detail => libxrpl/ledger}/View.cpp | 7 +++---- src/test/app/AMMExtended_test.cpp | 2 +- src/test/app/Credentials_test.cpp | 5 ++--- src/test/app/EscrowToken_test.cpp | 4 ++-- src/test/app/Escrow_test.cpp | 2 +- src/test/app/Flow_test.cpp | 4 ++-- src/test/app/LedgerHistory_test.cpp | 2 +- src/test/app/LoadFeeTrack_test.cpp | 2 +- src/test/app/PayChan_test.cpp | 2 +- src/test/app/PayStrand_test.cpp | 2 +- src/test/app/PermissionedDEX_test.cpp | 2 +- src/test/app/TheoreticalQuality_test.cpp | 2 +- src/test/app/Vault_test.cpp | 5 ++--- src/test/consensus/NegativeUNL_test.cpp | 2 +- src/test/jtx/owners.h | 3 +-- src/test/ledger/BookDirs_test.cpp | 3 +-- src/test/ledger/Directory_test.cpp | 5 ++--- src/test/ledger/PaymentSandbox_test.cpp | 7 +++---- src/test/ledger/SkipList_test.cpp | 2 +- src/test/ledger/View_test.cpp | 8 ++++---- src/xrpld/app/consensus/RCLCxLedger.h | 2 +- src/xrpld/app/ledger/BuildLedger.h | 3 +-- src/xrpld/app/ledger/Ledger.h | 4 ++-- src/xrpld/app/ledger/LocalTxs.h | 3 ++- src/xrpld/app/ledger/OpenLedger.h | 4 ++-- src/xrpld/app/ledger/detail/OpenLedger.cpp | 2 +- src/xrpld/app/misc/AMMUtils.h | 3 +-- src/xrpld/app/misc/FeeVote.h | 2 +- src/xrpld/app/misc/NetworkOPs.h | 2 +- src/xrpld/app/misc/PermissionedDEXHelpers.cpp | 3 ++- src/xrpld/app/misc/PermissionedDEXHelpers.h | 2 +- src/xrpld/app/misc/TxQ.h | 4 ++-- src/xrpld/app/misc/detail/AMMUtils.cpp | 2 +- src/xrpld/app/misc/detail/LoadFeeTrack.cpp | 2 +- src/xrpld/app/paths/AMMLiquidity.h | 4 ++-- src/xrpld/app/paths/AMMOffer.h | 5 ++--- src/xrpld/app/paths/Credit.cpp | 3 +-- src/xrpld/app/paths/Credit.h | 3 +-- src/xrpld/app/paths/Pathfinder.cpp | 2 +- src/xrpld/app/paths/RippleCalc.cpp | 2 +- src/xrpld/app/paths/RippleCalc.h | 3 +-- src/xrpld/app/paths/TrustLine.h | 3 +-- src/xrpld/app/paths/detail/BookStep.cpp | 2 +- src/xrpld/app/paths/detail/DirectStep.cpp | 2 +- src/xrpld/app/paths/detail/FlowDebugInfo.h | 2 +- src/xrpld/app/paths/detail/PaySteps.cpp | 2 +- src/xrpld/app/paths/detail/StepChecks.h | 5 ++--- .../app/paths/detail/XRPEndpointStep.cpp | 2 +- src/xrpld/app/tx/apply.h | 2 +- src/xrpld/app/tx/applySteps.h | 3 +-- src/xrpld/app/tx/detail/AMMBid.cpp | 4 ++-- src/xrpld/app/tx/detail/AMMClawback.cpp | 4 ++-- src/xrpld/app/tx/detail/AMMCreate.cpp | 4 ++-- src/xrpld/app/tx/detail/AMMDelete.cpp | 2 +- src/xrpld/app/tx/detail/AMMDeposit.cpp | 4 ++-- src/xrpld/app/tx/detail/AMMVote.cpp | 2 +- src/xrpld/app/tx/detail/AMMWithdraw.cpp | 2 +- src/xrpld/app/tx/detail/AMMWithdraw.h | 3 ++- src/xrpld/app/tx/detail/ApplyContext.h | 2 +- src/xrpld/app/tx/detail/Batch.cpp | 4 ++-- src/xrpld/app/tx/detail/BookTip.h | 3 +-- src/xrpld/app/tx/detail/CancelCheck.cpp | 2 +- src/xrpld/app/tx/detail/CancelOffer.cpp | 2 +- src/xrpld/app/tx/detail/Change.cpp | 2 +- src/xrpld/app/tx/detail/Clawback.cpp | 2 +- src/xrpld/app/tx/detail/CreateCheck.cpp | 2 +- src/xrpld/app/tx/detail/CreateOffer.cpp | 2 +- src/xrpld/app/tx/detail/Credentials.cpp | 6 +++--- src/xrpld/app/tx/detail/DID.cpp | 4 ++-- src/xrpld/app/tx/detail/DelegateSet.cpp | 2 +- src/xrpld/app/tx/detail/DeleteAccount.cpp | 4 ++-- src/xrpld/app/tx/detail/DeleteOracle.cpp | 2 +- src/xrpld/app/tx/detail/DepositPreauth.cpp | 4 ++-- src/xrpld/app/tx/detail/Escrow.cpp | 6 +++--- src/xrpld/app/tx/detail/InvariantCheck.cpp | 6 +++--- src/xrpld/app/tx/detail/LedgerStateFix.cpp | 2 +- src/xrpld/app/tx/detail/MPTokenAuthorize.cpp | 2 +- .../app/tx/detail/MPTokenIssuanceCreate.cpp | 2 +- .../app/tx/detail/MPTokenIssuanceDestroy.cpp | 2 +- .../app/tx/detail/NFTokenAcceptOffer.cpp | 2 +- .../app/tx/detail/NFTokenCancelOffer.cpp | 2 +- .../app/tx/detail/NFTokenCreateOffer.cpp | 2 +- src/xrpld/app/tx/detail/NFTokenMint.cpp | 2 +- src/xrpld/app/tx/detail/NFTokenUtils.cpp | 4 ++-- src/xrpld/app/tx/detail/NFTokenUtils.h | 2 +- src/xrpld/app/tx/detail/Offer.h | 3 +-- src/xrpld/app/tx/detail/OfferStream.cpp | 2 +- src/xrpld/app/tx/detail/OfferStream.h | 2 +- src/xrpld/app/tx/detail/PayChan.cpp | 6 +++--- src/xrpld/app/tx/detail/Payment.cpp | 4 ++-- .../tx/detail/PermissionedDomainDelete.cpp | 2 +- .../app/tx/detail/PermissionedDomainSet.cpp | 4 ++-- src/xrpld/app/tx/detail/SetAccount.cpp | 2 +- src/xrpld/app/tx/detail/SetOracle.cpp | 4 ++-- src/xrpld/app/tx/detail/SetSignerList.cpp | 2 +- src/xrpld/app/tx/detail/SetTrust.cpp | 2 +- src/xrpld/app/tx/detail/Taker.h | 0 src/xrpld/app/tx/detail/Transactor.cpp | 4 ++-- src/xrpld/app/tx/detail/VaultClawback.cpp | 2 +- src/xrpld/app/tx/detail/VaultCreate.cpp | 2 +- src/xrpld/app/tx/detail/VaultDelete.cpp | 2 +- src/xrpld/app/tx/detail/VaultDeposit.cpp | 4 ++-- src/xrpld/app/tx/detail/VaultSet.cpp | 2 +- src/xrpld/app/tx/detail/VaultWithdraw.cpp | 4 ++-- src/xrpld/app/tx/detail/XChainBridge.cpp | 6 +++--- src/xrpld/rpc/detail/RPCHelpers.cpp | 2 +- src/xrpld/rpc/handlers/AMMInfo.cpp | 2 +- src/xrpld/rpc/handlers/AccountChannels.cpp | 4 ++-- .../rpc/handlers/AccountCurrenciesHandler.cpp | 2 +- src/xrpld/rpc/handlers/AccountInfo.cpp | 2 +- src/xrpld/rpc/handlers/AccountLines.cpp | 2 +- src/xrpld/rpc/handlers/AccountObjects.cpp | 2 +- src/xrpld/rpc/handlers/AccountOffers.cpp | 4 ++-- src/xrpld/rpc/handlers/AccountTx.cpp | 2 +- src/xrpld/rpc/handlers/BookOffers.cpp | 2 +- src/xrpld/rpc/handlers/DepositAuthorized.cpp | 4 ++-- src/xrpld/rpc/handlers/GatewayBalances.cpp | 2 +- src/xrpld/rpc/handlers/GetAggregatePrice.cpp | 2 +- src/xrpld/rpc/handlers/LedgerData.cpp | 2 +- src/xrpld/rpc/handlers/LedgerEntry.cpp | 4 ++-- src/xrpld/rpc/handlers/LedgerHandler.h | 2 +- src/xrpld/rpc/handlers/LedgerHeader.cpp | 2 +- src/xrpld/rpc/handlers/NFTOffers.cpp | 4 ++-- src/xrpld/rpc/handlers/NoRippleCheck.cpp | 2 +- src/xrpld/rpc/handlers/PayChanClaim.cpp | 2 +- src/xrpld/rpc/handlers/Subscribe.cpp | 2 +- src/xrpld/rpc/handlers/TransactionEntry.cpp | 2 +- 161 files changed, 244 insertions(+), 272 deletions(-) rename {src/xrpld => include/xrpl}/ledger/ApplyView.h (99%) rename {src/xrpld => include/xrpl}/ledger/ApplyViewImpl.h (97%) rename {src/xrpld => include/xrpl}/ledger/BookDirs.h (98%) rename {src/xrpld => include/xrpl}/ledger/CachedSLEs.h (100%) rename {src/xrpld => include/xrpl}/ledger/CachedView.h (98%) rename {src/xrpld/app/misc => include/xrpl/ledger}/CredentialHelpers.h (98%) rename {src/xrpld => include/xrpl}/ledger/Dir.h (98%) rename {src/xrpld => include/xrpl}/ledger/OpenView.h (98%) rename {src/xrpld => include/xrpl}/ledger/PaymentSandbox.h (98%) rename {src/xrpld => include/xrpl}/ledger/RawView.h (98%) rename {src/xrpld => include/xrpl}/ledger/ReadView.h (98%) rename {src/xrpld => include/xrpl}/ledger/Sandbox.h (95%) rename {src/xrpld => include/xrpl}/ledger/View.h (99%) rename {src/xrpld => include/xrpl}/ledger/detail/ApplyStateTable.h (97%) rename {src/xrpld => include/xrpl}/ledger/detail/ApplyViewBase.h (96%) rename {src/xrpld => include/xrpl}/ledger/detail/RawStateTable.h (98%) rename {src/xrpld => include/xrpl}/ledger/detail/ReadViewFwdRange.h (100%) rename {src/xrpld => include/xrpl}/ledger/detail/ReadViewFwdRange.ipp (100%) rename src/{xrpld/ledger/detail => libxrpl/ledger}/ApplyStateTable.cpp (99%) rename src/{xrpld/ledger/detail => libxrpl/ledger}/ApplyView.cpp (99%) rename src/{xrpld/ledger/detail => libxrpl/ledger}/ApplyViewBase.cpp (98%) rename src/{xrpld/ledger/detail => libxrpl/ledger}/ApplyViewImpl.cpp (97%) rename src/{xrpld/ledger/detail => libxrpl/ledger}/BookDirs.cpp (98%) rename src/{xrpld/ledger/detail => libxrpl/ledger}/CachedView.cpp (98%) rename src/{xrpld/app/misc => libxrpl/ledger}/CredentialHelpers.cpp (99%) rename src/{xrpld/ledger/detail => libxrpl/ledger}/Dir.cpp (99%) rename src/{xrpld/ledger/detail => libxrpl/ledger}/OpenView.cpp (99%) rename src/{xrpld/ledger/detail => libxrpl/ledger}/PaymentSandbox.cpp (99%) rename src/{xrpld/ledger/detail => libxrpl/ledger}/RawStateTable.cpp (99%) rename src/{xrpld/ledger/detail => libxrpl/ledger}/ReadView.cpp (98%) rename src/{xrpld/ledger/detail => libxrpl/ledger}/View.cpp (99%) create mode 100644 src/xrpld/app/tx/detail/Taker.h diff --git a/.github/scripts/levelization/results/loops.txt b/.github/scripts/levelization/results/loops.txt index 0bbd65a9e4..e998e962d4 100644 --- a/.github/scripts/levelization/results/loops.txt +++ b/.github/scripts/levelization/results/loops.txt @@ -7,9 +7,6 @@ Loop: test.jtx test.unit_test Loop: xrpld.app xrpld.core xrpld.app > xrpld.core -Loop: xrpld.app xrpld.ledger - xrpld.app > xrpld.ledger - Loop: xrpld.app xrpld.overlay xrpld.overlay > xrpld.app diff --git a/.github/scripts/levelization/results/ordering.txt b/.github/scripts/levelization/results/ordering.txt index bf2d1db693..13de36e2a5 100644 --- a/.github/scripts/levelization/results/ordering.txt +++ b/.github/scripts/levelization/results/ordering.txt @@ -2,6 +2,10 @@ libxrpl.basics > xrpl.basics libxrpl.crypto > xrpl.basics libxrpl.json > xrpl.basics libxrpl.json > xrpl.json +libxrpl.ledger > xrpl.basics +libxrpl.ledger > xrpl.json +libxrpl.ledger > xrpl.ledger +libxrpl.ledger > xrpl.protocol libxrpl.net > xrpl.basics libxrpl.net > xrpl.net libxrpl.protocol > xrpl.basics @@ -21,11 +25,11 @@ test.app > test.unit_test test.app > xrpl.basics test.app > xrpld.app test.app > xrpld.core -test.app > xrpld.ledger test.app > xrpld.nodestore test.app > xrpld.overlay test.app > xrpld.rpc test.app > xrpl.json +test.app > xrpl.ledger test.app > xrpl.protocol test.app > xrpl.resource test.basics > test.jtx @@ -44,8 +48,8 @@ test.consensus > test.unit_test test.consensus > xrpl.basics test.consensus > xrpld.app test.consensus > xrpld.consensus -test.consensus > xrpld.ledger test.consensus > xrpl.json +test.consensus > xrpl.ledger test.core > test.jtx test.core > test.toplevel test.core > test.unit_test @@ -63,9 +67,9 @@ test.json > xrpl.json test.jtx > xrpl.basics test.jtx > xrpld.app test.jtx > xrpld.core -test.jtx > xrpld.ledger test.jtx > xrpld.rpc test.jtx > xrpl.json +test.jtx > xrpl.ledger test.jtx > xrpl.net test.jtx > xrpl.protocol test.jtx > xrpl.resource @@ -75,7 +79,7 @@ test.ledger > test.toplevel test.ledger > xrpl.basics test.ledger > xrpld.app test.ledger > xrpld.core -test.ledger > xrpld.ledger +test.ledger > xrpl.ledger test.ledger > xrpl.protocol test.nodestore > test.jtx test.nodestore > test.toplevel @@ -135,6 +139,8 @@ test.toplevel > xrpl.json test.unit_test > xrpl.basics tests.libxrpl > xrpl.basics xrpl.json > xrpl.basics +xrpl.ledger > xrpl.basics +xrpl.ledger > xrpl.protocol xrpl.net > xrpl.basics xrpl.protocol > xrpl.basics xrpl.protocol > xrpl.json @@ -151,6 +157,7 @@ xrpld.app > xrpld.consensus xrpld.app > xrpld.nodestore xrpld.app > xrpld.perflog xrpld.app > xrpl.json +xrpld.app > xrpl.ledger xrpld.app > xrpl.net xrpld.app > xrpl.protocol xrpld.app > xrpl.resource @@ -163,9 +170,6 @@ xrpld.core > xrpl.basics xrpld.core > xrpl.json xrpld.core > xrpl.net xrpld.core > xrpl.protocol -xrpld.ledger > xrpl.basics -xrpld.ledger > xrpl.json -xrpld.ledger > xrpl.protocol xrpld.nodestore > xrpl.basics xrpld.nodestore > xrpld.core xrpld.nodestore > xrpld.unity @@ -186,9 +190,9 @@ xrpld.perflog > xrpl.basics xrpld.perflog > xrpl.json xrpld.rpc > xrpl.basics xrpld.rpc > xrpld.core -xrpld.rpc > xrpld.ledger xrpld.rpc > xrpld.nodestore xrpld.rpc > xrpl.json +xrpld.rpc > xrpl.ledger xrpld.rpc > xrpl.net xrpld.rpc > xrpl.protocol xrpld.rpc > xrpl.resource diff --git a/cmake/RippledCore.cmake b/cmake/RippledCore.cmake index 7d3561675a..481b6e3cea 100644 --- a/cmake/RippledCore.cmake +++ b/cmake/RippledCore.cmake @@ -111,6 +111,12 @@ target_link_libraries(xrpl.libxrpl.net PUBLIC add_module(xrpl server) target_link_libraries(xrpl.libxrpl.server PUBLIC xrpl.libxrpl.protocol) +add_module(xrpl ledger) +target_link_libraries(xrpl.libxrpl.ledger PUBLIC + xrpl.libxrpl.basics + xrpl.libxrpl.json + xrpl.libxrpl.protocol +) add_library(xrpl.libxrpl) set_target_properties(xrpl.libxrpl PROPERTIES OUTPUT_NAME xrpl) @@ -131,6 +137,7 @@ target_link_modules(xrpl PUBLIC resource server net + ledger ) # All headers in libxrpl are in modules. diff --git a/cmake/RippledInstall.cmake b/cmake/RippledInstall.cmake index f32781f596..95c25a212f 100644 --- a/cmake/RippledInstall.cmake +++ b/cmake/RippledInstall.cmake @@ -18,6 +18,7 @@ install ( xrpl.libxrpl.json xrpl.libxrpl.protocol xrpl.libxrpl.resource + xrpl.libxrpl.ledger xrpl.libxrpl.server xrpl.libxrpl.net xrpl.libxrpl diff --git a/src/xrpld/ledger/ApplyView.h b/include/xrpl/ledger/ApplyView.h similarity index 99% rename from src/xrpld/ledger/ApplyView.h rename to include/xrpl/ledger/ApplyView.h index 1e4a5a112a..d8b9028d7c 100644 --- a/src/xrpld/ledger/ApplyView.h +++ b/include/xrpl/ledger/ApplyView.h @@ -20,11 +20,10 @@ #ifndef RIPPLE_LEDGER_APPLYVIEW_H_INCLUDED #define RIPPLE_LEDGER_APPLYVIEW_H_INCLUDED -#include -#include - #include #include +#include +#include namespace ripple { diff --git a/src/xrpld/ledger/ApplyViewImpl.h b/include/xrpl/ledger/ApplyViewImpl.h similarity index 97% rename from src/xrpld/ledger/ApplyViewImpl.h rename to include/xrpl/ledger/ApplyViewImpl.h index d170cf71ff..eadcd8acb5 100644 --- a/src/xrpld/ledger/ApplyViewImpl.h +++ b/include/xrpl/ledger/ApplyViewImpl.h @@ -20,9 +20,8 @@ #ifndef RIPPLE_LEDGER_APPLYVIEWIMPL_H_INCLUDED #define RIPPLE_LEDGER_APPLYVIEWIMPL_H_INCLUDED -#include -#include - +#include +#include #include #include diff --git a/src/xrpld/ledger/BookDirs.h b/include/xrpl/ledger/BookDirs.h similarity index 98% rename from src/xrpld/ledger/BookDirs.h rename to include/xrpl/ledger/BookDirs.h index dc58905c5a..95cd41e044 100644 --- a/src/xrpld/ledger/BookDirs.h +++ b/include/xrpl/ledger/BookDirs.h @@ -20,9 +20,8 @@ #ifndef RIPPLE_LEDGER_BOOK_DIRS_H_INCLUDED #define RIPPLE_LEDGER_BOOK_DIRS_H_INCLUDED -#include - #include +#include namespace ripple { diff --git a/src/xrpld/ledger/CachedSLEs.h b/include/xrpl/ledger/CachedSLEs.h similarity index 100% rename from src/xrpld/ledger/CachedSLEs.h rename to include/xrpl/ledger/CachedSLEs.h diff --git a/src/xrpld/ledger/CachedView.h b/include/xrpl/ledger/CachedView.h similarity index 98% rename from src/xrpld/ledger/CachedView.h rename to include/xrpl/ledger/CachedView.h index ae59312f98..3e1fb9cc72 100644 --- a/src/xrpld/ledger/CachedView.h +++ b/include/xrpl/ledger/CachedView.h @@ -20,10 +20,9 @@ #ifndef RIPPLE_LEDGER_CACHEDVIEW_H_INCLUDED #define RIPPLE_LEDGER_CACHEDVIEW_H_INCLUDED -#include -#include - #include +#include +#include #include #include diff --git a/src/xrpld/app/misc/CredentialHelpers.h b/include/xrpl/ledger/CredentialHelpers.h similarity index 98% rename from src/xrpld/app/misc/CredentialHelpers.h rename to include/xrpl/ledger/CredentialHelpers.h index 84938180ce..cb6b3c98ee 100644 --- a/src/xrpld/app/misc/CredentialHelpers.h +++ b/include/xrpl/ledger/CredentialHelpers.h @@ -20,12 +20,11 @@ #ifndef RIPPLE_APP_MISC_CREDENTIALHELPERS_H_INCLUDED #define RIPPLE_APP_MISC_CREDENTIALHELPERS_H_INCLUDED -#include -#include - #include #include #include +#include +#include #include #include #include diff --git a/src/xrpld/ledger/Dir.h b/include/xrpl/ledger/Dir.h similarity index 98% rename from src/xrpld/ledger/Dir.h rename to include/xrpl/ledger/Dir.h index d3a52558fd..0e9e5e998f 100644 --- a/src/xrpld/ledger/Dir.h +++ b/include/xrpl/ledger/Dir.h @@ -20,8 +20,7 @@ #ifndef RIPPLE_LEDGER_DIR_H_INCLUDED #define RIPPLE_LEDGER_DIR_H_INCLUDED -#include - +#include #include namespace ripple { diff --git a/src/xrpld/ledger/OpenView.h b/include/xrpl/ledger/OpenView.h similarity index 98% rename from src/xrpld/ledger/OpenView.h rename to include/xrpl/ledger/OpenView.h index a1fa195a69..e856e03764 100644 --- a/src/xrpld/ledger/OpenView.h +++ b/include/xrpl/ledger/OpenView.h @@ -20,10 +20,9 @@ #ifndef RIPPLE_LEDGER_OPENVIEW_H_INCLUDED #define RIPPLE_LEDGER_OPENVIEW_H_INCLUDED -#include -#include -#include - +#include +#include +#include #include #include diff --git a/src/xrpld/ledger/PaymentSandbox.h b/include/xrpl/ledger/PaymentSandbox.h similarity index 98% rename from src/xrpld/ledger/PaymentSandbox.h rename to include/xrpl/ledger/PaymentSandbox.h index 2cd31ea490..c5bf28d1f9 100644 --- a/src/xrpld/ledger/PaymentSandbox.h +++ b/include/xrpl/ledger/PaymentSandbox.h @@ -20,10 +20,9 @@ #ifndef RIPPLE_LEDGER_PAYMENTSANDBOX_H_INCLUDED #define RIPPLE_LEDGER_PAYMENTSANDBOX_H_INCLUDED -#include -#include -#include - +#include +#include +#include #include #include diff --git a/src/xrpld/ledger/RawView.h b/include/xrpl/ledger/RawView.h similarity index 98% rename from src/xrpld/ledger/RawView.h rename to include/xrpl/ledger/RawView.h index fb6dcffe06..c22fd40132 100644 --- a/src/xrpld/ledger/RawView.h +++ b/include/xrpl/ledger/RawView.h @@ -20,8 +20,7 @@ #ifndef RIPPLE_LEDGER_RAWVIEW_H_INCLUDED #define RIPPLE_LEDGER_RAWVIEW_H_INCLUDED -#include - +#include #include #include diff --git a/src/xrpld/ledger/ReadView.h b/include/xrpl/ledger/ReadView.h similarity index 98% rename from src/xrpld/ledger/ReadView.h rename to include/xrpl/ledger/ReadView.h index 4c1986be4e..2c87dbbf6a 100644 --- a/src/xrpld/ledger/ReadView.h +++ b/include/xrpl/ledger/ReadView.h @@ -20,10 +20,9 @@ #ifndef RIPPLE_LEDGER_READVIEW_H_INCLUDED #define RIPPLE_LEDGER_READVIEW_H_INCLUDED -#include - #include #include +#include #include #include #include @@ -280,6 +279,6 @@ makeRulesGivenLedger( } // namespace ripple -#include +#include #endif diff --git a/src/xrpld/ledger/Sandbox.h b/include/xrpl/ledger/Sandbox.h similarity index 95% rename from src/xrpld/ledger/Sandbox.h rename to include/xrpl/ledger/Sandbox.h index 22b8dbecfb..495da120c6 100644 --- a/src/xrpld/ledger/Sandbox.h +++ b/include/xrpl/ledger/Sandbox.h @@ -20,8 +20,8 @@ #ifndef RIPPLE_LEDGER_SANDBOX_H_INCLUDED #define RIPPLE_LEDGER_SANDBOX_H_INCLUDED -#include -#include +#include +#include namespace ripple { diff --git a/src/xrpld/ledger/View.h b/include/xrpl/ledger/View.h similarity index 99% rename from src/xrpld/ledger/View.h rename to include/xrpl/ledger/View.h index faad633e00..3d67e25a22 100644 --- a/src/xrpld/ledger/View.h +++ b/include/xrpl/ledger/View.h @@ -20,11 +20,10 @@ #ifndef RIPPLE_LEDGER_VIEW_H_INCLUDED #define RIPPLE_LEDGER_VIEW_H_INCLUDED -#include -#include -#include - #include +#include +#include +#include #include #include #include diff --git a/src/xrpld/ledger/detail/ApplyStateTable.h b/include/xrpl/ledger/detail/ApplyStateTable.h similarity index 97% rename from src/xrpld/ledger/detail/ApplyStateTable.h rename to include/xrpl/ledger/detail/ApplyStateTable.h index 5a2e0bcf54..01ab1e07ab 100644 --- a/src/xrpld/ledger/detail/ApplyStateTable.h +++ b/include/xrpl/ledger/detail/ApplyStateTable.h @@ -20,11 +20,10 @@ #ifndef RIPPLE_LEDGER_APPLYSTATETABLE_H_INCLUDED #define RIPPLE_LEDGER_APPLYSTATETABLE_H_INCLUDED -#include -#include -#include - #include +#include +#include +#include #include #include #include diff --git a/src/xrpld/ledger/detail/ApplyViewBase.h b/include/xrpl/ledger/detail/ApplyViewBase.h similarity index 96% rename from src/xrpld/ledger/detail/ApplyViewBase.h rename to include/xrpl/ledger/detail/ApplyViewBase.h index f9c9c80af0..af86fc23b4 100644 --- a/src/xrpld/ledger/detail/ApplyViewBase.h +++ b/include/xrpl/ledger/detail/ApplyViewBase.h @@ -20,10 +20,9 @@ #ifndef RIPPLE_LEDGER_APPLYVIEWBASE_H_INCLUDED #define RIPPLE_LEDGER_APPLYVIEWBASE_H_INCLUDED -#include -#include -#include - +#include +#include +#include #include namespace ripple { diff --git a/src/xrpld/ledger/detail/RawStateTable.h b/include/xrpl/ledger/detail/RawStateTable.h similarity index 98% rename from src/xrpld/ledger/detail/RawStateTable.h rename to include/xrpl/ledger/detail/RawStateTable.h index 37597aa678..53a6f52208 100644 --- a/src/xrpld/ledger/detail/RawStateTable.h +++ b/include/xrpl/ledger/detail/RawStateTable.h @@ -20,8 +20,8 @@ #ifndef RIPPLE_LEDGER_RAWSTATETABLE_H_INCLUDED #define RIPPLE_LEDGER_RAWSTATETABLE_H_INCLUDED -#include -#include +#include +#include #include #include diff --git a/src/xrpld/ledger/detail/ReadViewFwdRange.h b/include/xrpl/ledger/detail/ReadViewFwdRange.h similarity index 100% rename from src/xrpld/ledger/detail/ReadViewFwdRange.h rename to include/xrpl/ledger/detail/ReadViewFwdRange.h diff --git a/src/xrpld/ledger/detail/ReadViewFwdRange.ipp b/include/xrpl/ledger/detail/ReadViewFwdRange.ipp similarity index 100% rename from src/xrpld/ledger/detail/ReadViewFwdRange.ipp rename to include/xrpl/ledger/detail/ReadViewFwdRange.ipp diff --git a/src/xrpld/ledger/detail/ApplyStateTable.cpp b/src/libxrpl/ledger/ApplyStateTable.cpp similarity index 99% rename from src/xrpld/ledger/detail/ApplyStateTable.cpp rename to src/libxrpl/ledger/ApplyStateTable.cpp index 2a740093d9..7b041939d4 100644 --- a/src/xrpld/ledger/detail/ApplyStateTable.cpp +++ b/src/libxrpl/ledger/ApplyStateTable.cpp @@ -17,11 +17,10 @@ */ //============================================================================== -#include - #include #include #include +#include #include #include diff --git a/src/xrpld/ledger/detail/ApplyView.cpp b/src/libxrpl/ledger/ApplyView.cpp similarity index 99% rename from src/xrpld/ledger/detail/ApplyView.cpp rename to src/libxrpl/ledger/ApplyView.cpp index 3191b47cbb..8a0fd51bd8 100644 --- a/src/xrpld/ledger/detail/ApplyView.cpp +++ b/src/libxrpl/ledger/ApplyView.cpp @@ -17,10 +17,9 @@ */ //============================================================================== -#include - #include #include +#include #include namespace ripple { diff --git a/src/xrpld/ledger/detail/ApplyViewBase.cpp b/src/libxrpl/ledger/ApplyViewBase.cpp similarity index 98% rename from src/xrpld/ledger/detail/ApplyViewBase.cpp rename to src/libxrpl/ledger/ApplyViewBase.cpp index 1d93eae7aa..dd0e043474 100644 --- a/src/xrpld/ledger/detail/ApplyViewBase.cpp +++ b/src/libxrpl/ledger/ApplyViewBase.cpp @@ -17,7 +17,7 @@ */ //============================================================================== -#include +#include namespace ripple { namespace detail { diff --git a/src/xrpld/ledger/detail/ApplyViewImpl.cpp b/src/libxrpl/ledger/ApplyViewImpl.cpp similarity index 97% rename from src/xrpld/ledger/detail/ApplyViewImpl.cpp rename to src/libxrpl/ledger/ApplyViewImpl.cpp index 3fd9478b54..9429bcee6e 100644 --- a/src/xrpld/ledger/detail/ApplyViewImpl.cpp +++ b/src/libxrpl/ledger/ApplyViewImpl.cpp @@ -17,7 +17,7 @@ */ //============================================================================== -#include +#include namespace ripple { diff --git a/src/xrpld/ledger/detail/BookDirs.cpp b/src/libxrpl/ledger/BookDirs.cpp similarity index 98% rename from src/xrpld/ledger/detail/BookDirs.cpp rename to src/libxrpl/ledger/BookDirs.cpp index 41a14945a6..f777d23aca 100644 --- a/src/xrpld/ledger/detail/BookDirs.cpp +++ b/src/libxrpl/ledger/BookDirs.cpp @@ -18,9 +18,8 @@ */ //============================================================================== -#include -#include - +#include +#include #include namespace ripple { diff --git a/src/xrpld/ledger/detail/CachedView.cpp b/src/libxrpl/ledger/CachedView.cpp similarity index 98% rename from src/xrpld/ledger/detail/CachedView.cpp rename to src/libxrpl/ledger/CachedView.cpp index 365d63e400..0bec9094d2 100644 --- a/src/xrpld/ledger/detail/CachedView.cpp +++ b/src/libxrpl/ledger/CachedView.cpp @@ -17,9 +17,8 @@ */ //============================================================================== -#include - #include +#include namespace ripple { namespace detail { diff --git a/src/xrpld/app/misc/CredentialHelpers.cpp b/src/libxrpl/ledger/CredentialHelpers.cpp similarity index 99% rename from src/xrpld/app/misc/CredentialHelpers.cpp rename to src/libxrpl/ledger/CredentialHelpers.cpp index 6d1f9f78c5..965d6f6911 100644 --- a/src/xrpld/app/misc/CredentialHelpers.cpp +++ b/src/libxrpl/ledger/CredentialHelpers.cpp @@ -17,9 +17,8 @@ */ //============================================================================== -#include -#include - +#include +#include #include #include diff --git a/src/xrpld/ledger/detail/Dir.cpp b/src/libxrpl/ledger/Dir.cpp similarity index 99% rename from src/xrpld/ledger/detail/Dir.cpp rename to src/libxrpl/ledger/Dir.cpp index 01d4487276..ea57369495 100644 --- a/src/xrpld/ledger/detail/Dir.cpp +++ b/src/libxrpl/ledger/Dir.cpp @@ -17,7 +17,7 @@ */ //============================================================================== -#include +#include namespace ripple { diff --git a/src/xrpld/ledger/detail/OpenView.cpp b/src/libxrpl/ledger/OpenView.cpp similarity index 99% rename from src/xrpld/ledger/detail/OpenView.cpp rename to src/libxrpl/ledger/OpenView.cpp index 73e502a5e2..c40434b38d 100644 --- a/src/xrpld/ledger/detail/OpenView.cpp +++ b/src/libxrpl/ledger/OpenView.cpp @@ -17,9 +17,8 @@ */ //============================================================================== -#include - #include +#include namespace ripple { diff --git a/src/xrpld/ledger/detail/PaymentSandbox.cpp b/src/libxrpl/ledger/PaymentSandbox.cpp similarity index 99% rename from src/xrpld/ledger/detail/PaymentSandbox.cpp rename to src/libxrpl/ledger/PaymentSandbox.cpp index 3eab845472..ba59b573fa 100644 --- a/src/xrpld/ledger/detail/PaymentSandbox.cpp +++ b/src/libxrpl/ledger/PaymentSandbox.cpp @@ -17,11 +17,9 @@ */ //============================================================================== -#include -#include -#include - #include +#include +#include #include namespace ripple { diff --git a/src/xrpld/ledger/detail/RawStateTable.cpp b/src/libxrpl/ledger/RawStateTable.cpp similarity index 99% rename from src/xrpld/ledger/detail/RawStateTable.cpp rename to src/libxrpl/ledger/RawStateTable.cpp index f19eed8297..e6b1467581 100644 --- a/src/xrpld/ledger/detail/RawStateTable.cpp +++ b/src/libxrpl/ledger/RawStateTable.cpp @@ -17,9 +17,8 @@ */ //============================================================================== -#include - #include +#include namespace ripple { namespace detail { diff --git a/src/xrpld/ledger/detail/ReadView.cpp b/src/libxrpl/ledger/ReadView.cpp similarity index 98% rename from src/xrpld/ledger/detail/ReadView.cpp rename to src/libxrpl/ledger/ReadView.cpp index 69a4b5d6a9..449a5a9cec 100644 --- a/src/xrpld/ledger/detail/ReadView.cpp +++ b/src/libxrpl/ledger/ReadView.cpp @@ -17,7 +17,7 @@ */ //============================================================================== -#include +#include namespace ripple { diff --git a/src/xrpld/ledger/detail/View.cpp b/src/libxrpl/ledger/View.cpp similarity index 99% rename from src/xrpld/ledger/detail/View.cpp rename to src/libxrpl/ledger/View.cpp index 473efa58fb..45aded0030 100644 --- a/src/xrpld/ledger/detail/View.cpp +++ b/src/libxrpl/ledger/View.cpp @@ -17,14 +17,13 @@ */ //============================================================================== -#include -#include -#include - #include #include #include #include +#include +#include +#include #include #include #include diff --git a/src/test/app/AMMExtended_test.cpp b/src/test/app/AMMExtended_test.cpp index 893e9e4f75..cb937038fe 100644 --- a/src/test/app/AMMExtended_test.cpp +++ b/src/test/app/AMMExtended_test.cpp @@ -29,8 +29,8 @@ #include #include #include -#include +#include #include #include diff --git a/src/test/app/Credentials_test.cpp b/src/test/app/Credentials_test.cpp index 54826cbb12..23aa7ad952 100644 --- a/src/test/app/Credentials_test.cpp +++ b/src/test/app/Credentials_test.cpp @@ -19,10 +19,9 @@ #include -#include -#include - #include +#include +#include #include #include #include diff --git a/src/test/app/EscrowToken_test.cpp b/src/test/app/EscrowToken_test.cpp index 28c9a5b167..9c1868134f 100644 --- a/src/test/app/EscrowToken_test.cpp +++ b/src/test/app/EscrowToken_test.cpp @@ -20,9 +20,9 @@ #include #include -#include -#include +#include +#include #include #include #include diff --git a/src/test/app/Escrow_test.cpp b/src/test/app/Escrow_test.cpp index 19b8612ef4..cea3a835a6 100644 --- a/src/test/app/Escrow_test.cpp +++ b/src/test/app/Escrow_test.cpp @@ -20,8 +20,8 @@ #include #include -#include +#include #include #include #include diff --git a/src/test/app/Flow_test.cpp b/src/test/app/Flow_test.cpp index 0f40d70b57..23095b0145 100644 --- a/src/test/app/Flow_test.cpp +++ b/src/test/app/Flow_test.cpp @@ -23,10 +23,10 @@ #include #include #include -#include -#include #include +#include +#include #include namespace ripple { diff --git a/src/test/app/LedgerHistory_test.cpp b/src/test/app/LedgerHistory_test.cpp index 7b1910bd4d..1d440f6420 100644 --- a/src/test/app/LedgerHistory_test.cpp +++ b/src/test/app/LedgerHistory_test.cpp @@ -23,10 +23,10 @@ #include #include #include -#include #include #include +#include #include #include diff --git a/src/test/app/LoadFeeTrack_test.cpp b/src/test/app/LoadFeeTrack_test.cpp index 8a88e0273f..80110b073d 100644 --- a/src/test/app/LoadFeeTrack_test.cpp +++ b/src/test/app/LoadFeeTrack_test.cpp @@ -19,9 +19,9 @@ #include #include -#include #include +#include namespace ripple { diff --git a/src/test/app/PayChan_test.cpp b/src/test/app/PayChan_test.cpp index 3d0557fd5c..fe9b70cf7f 100644 --- a/src/test/app/PayChan_test.cpp +++ b/src/test/app/PayChan_test.cpp @@ -19,10 +19,10 @@ #include -#include #include #include +#include #include #include #include diff --git a/src/test/app/PayStrand_test.cpp b/src/test/app/PayStrand_test.cpp index 936fe403d4..16a6861bfd 100644 --- a/src/test/app/PayStrand_test.cpp +++ b/src/test/app/PayStrand_test.cpp @@ -22,10 +22,10 @@ #include #include #include -#include #include #include +#include #include #include diff --git a/src/test/app/PermissionedDEX_test.cpp b/src/test/app/PermissionedDEX_test.cpp index 3fd3a35f45..80c75a2daf 100644 --- a/src/test/app/PermissionedDEX_test.cpp +++ b/src/test/app/PermissionedDEX_test.cpp @@ -22,11 +22,11 @@ #include #include -#include #include #include #include +#include #include #include #include diff --git a/src/test/app/TheoreticalQuality_test.cpp b/src/test/app/TheoreticalQuality_test.cpp index a8713ec69a..814e6f7136 100644 --- a/src/test/app/TheoreticalQuality_test.cpp +++ b/src/test/app/TheoreticalQuality_test.cpp @@ -24,10 +24,10 @@ #include #include #include -#include #include #include +#include #include #include diff --git a/src/test/app/Vault_test.cpp b/src/test/app/Vault_test.cpp index 2216ff6421..3cd52eaad3 100644 --- a/src/test/app/Vault_test.cpp +++ b/src/test/app/Vault_test.cpp @@ -21,14 +21,13 @@ #include #include -#include -#include - #include #include #include #include #include +#include +#include #include #include #include diff --git a/src/test/consensus/NegativeUNL_test.cpp b/src/test/consensus/NegativeUNL_test.cpp index b56b834726..cc38ea5ab6 100644 --- a/src/test/consensus/NegativeUNL_test.cpp +++ b/src/test/consensus/NegativeUNL_test.cpp @@ -24,9 +24,9 @@ #include #include #include -#include #include +#include namespace ripple { namespace test { diff --git a/src/test/jtx/owners.h b/src/test/jtx/owners.h index fc904f9e87..9b6f6a6df5 100644 --- a/src/test/jtx/owners.h +++ b/src/test/jtx/owners.h @@ -22,8 +22,7 @@ #include -#include - +#include #include #include diff --git a/src/test/ledger/BookDirs_test.cpp b/src/test/ledger/BookDirs_test.cpp index 52b618e9a0..48ee92a6fd 100644 --- a/src/test/ledger/BookDirs_test.cpp +++ b/src/test/ledger/BookDirs_test.cpp @@ -17,8 +17,7 @@ #include -#include - +#include #include namespace ripple { diff --git a/src/test/ledger/Directory_test.cpp b/src/test/ledger/Directory_test.cpp index 9e8d40e0cc..fe8f04523f 100644 --- a/src/test/ledger/Directory_test.cpp +++ b/src/test/ledger/Directory_test.cpp @@ -17,10 +17,9 @@ #include -#include -#include - #include +#include +#include #include #include #include diff --git a/src/test/ledger/PaymentSandbox_test.cpp b/src/test/ledger/PaymentSandbox_test.cpp index 26b06a0034..db7fbed019 100644 --- a/src/test/ledger/PaymentSandbox_test.cpp +++ b/src/test/ledger/PaymentSandbox_test.cpp @@ -19,10 +19,9 @@ #include -#include -#include -#include - +#include +#include +#include #include #include diff --git a/src/test/ledger/SkipList_test.cpp b/src/test/ledger/SkipList_test.cpp index c2088d77cf..e0f4049c73 100644 --- a/src/test/ledger/SkipList_test.cpp +++ b/src/test/ledger/SkipList_test.cpp @@ -20,9 +20,9 @@ #include #include -#include #include +#include namespace ripple { namespace test { diff --git a/src/test/ledger/View_test.cpp b/src/test/ledger/View_test.cpp index 4af3e37ce2..17d3244aa2 100644 --- a/src/test/ledger/View_test.cpp +++ b/src/test/ledger/View_test.cpp @@ -21,11 +21,11 @@ #include #include -#include -#include -#include -#include +#include +#include +#include +#include #include #include diff --git a/src/xrpld/app/consensus/RCLCxLedger.h b/src/xrpld/app/consensus/RCLCxLedger.h index cd14c30a94..f9df1fe41a 100644 --- a/src/xrpld/app/consensus/RCLCxLedger.h +++ b/src/xrpld/app/consensus/RCLCxLedger.h @@ -22,8 +22,8 @@ #include #include -#include +#include #include namespace ripple { diff --git a/src/xrpld/app/ledger/BuildLedger.h b/src/xrpld/app/ledger/BuildLedger.h index 2ec571773c..980fe82ed8 100644 --- a/src/xrpld/app/ledger/BuildLedger.h +++ b/src/xrpld/app/ledger/BuildLedger.h @@ -20,10 +20,9 @@ #ifndef RIPPLE_APP_LEDGER_BUILD_LEDGER_H_INCLUDED #define RIPPLE_APP_LEDGER_BUILD_LEDGER_H_INCLUDED -#include - #include #include +#include namespace ripple { diff --git a/src/xrpld/app/ledger/Ledger.h b/src/xrpld/app/ledger/Ledger.h index 81c26526e5..552f59ff19 100644 --- a/src/xrpld/app/ledger/Ledger.h +++ b/src/xrpld/app/ledger/Ledger.h @@ -22,12 +22,12 @@ #include #include -#include -#include #include #include #include +#include +#include #include #include #include diff --git a/src/xrpld/app/ledger/LocalTxs.h b/src/xrpld/app/ledger/LocalTxs.h index 391bb4f7ef..2d202a5b60 100644 --- a/src/xrpld/app/ledger/LocalTxs.h +++ b/src/xrpld/app/ledger/LocalTxs.h @@ -21,7 +21,8 @@ #define RIPPLE_APP_LEDGER_LOCALTXS_H_INCLUDED #include -#include + +#include #include diff --git a/src/xrpld/app/ledger/OpenLedger.h b/src/xrpld/app/ledger/OpenLedger.h index 9fe56ff488..9383a53575 100644 --- a/src/xrpld/app/ledger/OpenLedger.h +++ b/src/xrpld/app/ledger/OpenLedger.h @@ -23,13 +23,13 @@ #include #include #include -#include -#include #include #include #include #include +#include +#include #include diff --git a/src/xrpld/app/ledger/detail/OpenLedger.cpp b/src/xrpld/app/ledger/detail/OpenLedger.cpp index 2c98caaa6d..5bba544e31 100644 --- a/src/xrpld/app/ledger/detail/OpenLedger.cpp +++ b/src/xrpld/app/ledger/detail/OpenLedger.cpp @@ -22,10 +22,10 @@ #include #include #include -#include #include #include +#include #include #include diff --git a/src/xrpld/app/misc/AMMUtils.h b/src/xrpld/app/misc/AMMUtils.h index 2a9f82ae60..d89085b116 100644 --- a/src/xrpld/app/misc/AMMUtils.h +++ b/src/xrpld/app/misc/AMMUtils.h @@ -20,10 +20,9 @@ #ifndef RIPPLE_APP_MISC_AMMUTILS_H_INCLUDED #define RIPPLE_APP_MISC_AMMUTILS_H_INCLUDED -#include - #include #include +#include #include #include #include diff --git a/src/xrpld/app/misc/FeeVote.h b/src/xrpld/app/misc/FeeVote.h index 35f723aa02..543456785a 100644 --- a/src/xrpld/app/misc/FeeVote.h +++ b/src/xrpld/app/misc/FeeVote.h @@ -20,9 +20,9 @@ #ifndef RIPPLE_APP_MISC_FEEVOTE_H_INCLUDED #define RIPPLE_APP_MISC_FEEVOTE_H_INCLUDED -#include #include +#include #include namespace ripple { diff --git a/src/xrpld/app/misc/NetworkOPs.h b/src/xrpld/app/misc/NetworkOPs.h index 9587d63b3a..bec0ad2341 100644 --- a/src/xrpld/app/misc/NetworkOPs.h +++ b/src/xrpld/app/misc/NetworkOPs.h @@ -23,9 +23,9 @@ #include #include #include -#include #include +#include #include #include diff --git a/src/xrpld/app/misc/PermissionedDEXHelpers.cpp b/src/xrpld/app/misc/PermissionedDEXHelpers.cpp index 4251ac1519..279ef8d7e9 100644 --- a/src/xrpld/app/misc/PermissionedDEXHelpers.cpp +++ b/src/xrpld/app/misc/PermissionedDEXHelpers.cpp @@ -17,9 +17,10 @@ */ //============================================================================== -#include #include +#include + namespace ripple { namespace permissioned_dex { diff --git a/src/xrpld/app/misc/PermissionedDEXHelpers.h b/src/xrpld/app/misc/PermissionedDEXHelpers.h index 1b3a0323fd..409b32f4ba 100644 --- a/src/xrpld/app/misc/PermissionedDEXHelpers.h +++ b/src/xrpld/app/misc/PermissionedDEXHelpers.h @@ -18,7 +18,7 @@ //============================================================================== #pragma once -#include +#include namespace ripple { namespace permissioned_dex { diff --git a/src/xrpld/app/misc/TxQ.h b/src/xrpld/app/misc/TxQ.h index f6ac2c6861..6afd83b0d0 100644 --- a/src/xrpld/app/misc/TxQ.h +++ b/src/xrpld/app/misc/TxQ.h @@ -21,9 +21,9 @@ #define RIPPLE_TXQ_H_INCLUDED #include -#include -#include +#include +#include #include #include #include diff --git a/src/xrpld/app/misc/detail/AMMUtils.cpp b/src/xrpld/app/misc/detail/AMMUtils.cpp index b56ce2748e..94cdb04287 100644 --- a/src/xrpld/app/misc/detail/AMMUtils.cpp +++ b/src/xrpld/app/misc/detail/AMMUtils.cpp @@ -19,9 +19,9 @@ #include #include -#include #include +#include #include #include diff --git a/src/xrpld/app/misc/detail/LoadFeeTrack.cpp b/src/xrpld/app/misc/detail/LoadFeeTrack.cpp index 96e7555401..776e9fa50b 100644 --- a/src/xrpld/app/misc/detail/LoadFeeTrack.cpp +++ b/src/xrpld/app/misc/detail/LoadFeeTrack.cpp @@ -18,11 +18,11 @@ //============================================================================== #include -#include #include #include #include +#include #include #include diff --git a/src/xrpld/app/paths/AMMLiquidity.h b/src/xrpld/app/paths/AMMLiquidity.h index ee745b4a8a..cb1db93705 100644 --- a/src/xrpld/app/paths/AMMLiquidity.h +++ b/src/xrpld/app/paths/AMMLiquidity.h @@ -23,10 +23,10 @@ #include #include #include -#include -#include #include +#include +#include #include namespace ripple { diff --git a/src/xrpld/app/paths/AMMOffer.h b/src/xrpld/app/paths/AMMOffer.h index 9241ba2057..3c218c0f5e 100644 --- a/src/xrpld/app/paths/AMMOffer.h +++ b/src/xrpld/app/paths/AMMOffer.h @@ -20,9 +20,8 @@ #ifndef RIPPLE_APP_AMMOFFER_H_INCLUDED #define RIPPLE_APP_AMMOFFER_H_INCLUDED -#include -#include - +#include +#include #include #include diff --git a/src/xrpld/app/paths/Credit.cpp b/src/xrpld/app/paths/Credit.cpp index ca721e4edf..f975385b28 100644 --- a/src/xrpld/app/paths/Credit.cpp +++ b/src/xrpld/app/paths/Credit.cpp @@ -17,8 +17,7 @@ */ //============================================================================== -#include - +#include #include #include #include diff --git a/src/xrpld/app/paths/Credit.h b/src/xrpld/app/paths/Credit.h index 0ea2d687dd..12ea27a734 100644 --- a/src/xrpld/app/paths/Credit.h +++ b/src/xrpld/app/paths/Credit.h @@ -20,8 +20,7 @@ #ifndef RIPPLE_APP_PATHS_CREDIT_H_INCLUDED #define RIPPLE_APP_PATHS_CREDIT_H_INCLUDED -#include - +#include #include #include diff --git a/src/xrpld/app/paths/Pathfinder.cpp b/src/xrpld/app/paths/Pathfinder.cpp index 74a33ec917..41a3697888 100644 --- a/src/xrpld/app/paths/Pathfinder.cpp +++ b/src/xrpld/app/paths/Pathfinder.cpp @@ -24,11 +24,11 @@ #include #include #include -#include #include #include #include +#include #include diff --git a/src/xrpld/app/paths/RippleCalc.cpp b/src/xrpld/app/paths/RippleCalc.cpp index 9c438bdfa9..237f0b8f9c 100644 --- a/src/xrpld/app/paths/RippleCalc.cpp +++ b/src/xrpld/app/paths/RippleCalc.cpp @@ -20,9 +20,9 @@ #include #include #include -#include #include +#include #include namespace ripple { diff --git a/src/xrpld/app/paths/RippleCalc.h b/src/xrpld/app/paths/RippleCalc.h index 09de7334e8..527c26f2ca 100644 --- a/src/xrpld/app/paths/RippleCalc.h +++ b/src/xrpld/app/paths/RippleCalc.h @@ -20,9 +20,8 @@ #ifndef RIPPLE_APP_PATHS_RIPPLECALC_H_INCLUDED #define RIPPLE_APP_PATHS_RIPPLECALC_H_INCLUDED -#include - #include +#include #include #include diff --git a/src/xrpld/app/paths/TrustLine.h b/src/xrpld/app/paths/TrustLine.h index efbe281f5e..5cff89b177 100644 --- a/src/xrpld/app/paths/TrustLine.h +++ b/src/xrpld/app/paths/TrustLine.h @@ -20,9 +20,8 @@ #ifndef RIPPLE_APP_PATHS_RIPPLESTATE_H_INCLUDED #define RIPPLE_APP_PATHS_RIPPLESTATE_H_INCLUDED -#include - #include +#include #include #include #include diff --git a/src/xrpld/app/paths/detail/BookStep.cpp b/src/xrpld/app/paths/detail/BookStep.cpp index 554d2525f5..97cf87c046 100644 --- a/src/xrpld/app/paths/detail/BookStep.cpp +++ b/src/xrpld/app/paths/detail/BookStep.cpp @@ -23,11 +23,11 @@ #include #include #include -#include #include #include #include +#include #include #include #include diff --git a/src/xrpld/app/paths/detail/DirectStep.cpp b/src/xrpld/app/paths/detail/DirectStep.cpp index 5e62a289a3..03d207e008 100644 --- a/src/xrpld/app/paths/detail/DirectStep.cpp +++ b/src/xrpld/app/paths/detail/DirectStep.cpp @@ -20,9 +20,9 @@ #include #include #include -#include #include +#include #include #include #include diff --git a/src/xrpld/app/paths/detail/FlowDebugInfo.h b/src/xrpld/app/paths/detail/FlowDebugInfo.h index e28b34f5d1..eec1d7c5a6 100644 --- a/src/xrpld/app/paths/detail/FlowDebugInfo.h +++ b/src/xrpld/app/paths/detail/FlowDebugInfo.h @@ -21,8 +21,8 @@ #define RIPPLE_PATH_IMPL_FLOWDEBUGINFO_H_INCLUDED #include -#include +#include #include #include diff --git a/src/xrpld/app/paths/detail/PaySteps.cpp b/src/xrpld/app/paths/detail/PaySteps.cpp index aa9e21e182..6eb38eee83 100644 --- a/src/xrpld/app/paths/detail/PaySteps.cpp +++ b/src/xrpld/app/paths/detail/PaySteps.cpp @@ -18,10 +18,10 @@ //============================================================================== #include -#include #include #include +#include #include #include diff --git a/src/xrpld/app/paths/detail/StepChecks.h b/src/xrpld/app/paths/detail/StepChecks.h index 4acafd1b9a..5ca2a463cb 100644 --- a/src/xrpld/app/paths/detail/StepChecks.h +++ b/src/xrpld/app/paths/detail/StepChecks.h @@ -20,11 +20,10 @@ #ifndef RIPPLE_APP_PATHS_IMPL_STEP_CHECKS_H_INCLUDED #define RIPPLE_APP_PATHS_IMPL_STEP_CHECKS_H_INCLUDED -#include -#include - #include #include +#include +#include #include #include diff --git a/src/xrpld/app/paths/detail/XRPEndpointStep.cpp b/src/xrpld/app/paths/detail/XRPEndpointStep.cpp index 7fdfb3749d..9cbcb0c84d 100644 --- a/src/xrpld/app/paths/detail/XRPEndpointStep.cpp +++ b/src/xrpld/app/paths/detail/XRPEndpointStep.cpp @@ -21,9 +21,9 @@ #include #include #include -#include #include +#include #include #include #include diff --git a/src/xrpld/app/tx/apply.h b/src/xrpld/app/tx/apply.h index 101f9a946d..7f43d1a744 100644 --- a/src/xrpld/app/tx/apply.h +++ b/src/xrpld/app/tx/apply.h @@ -22,9 +22,9 @@ #include #include -#include #include +#include #include #include diff --git a/src/xrpld/app/tx/applySteps.h b/src/xrpld/app/tx/applySteps.h index ec7180e263..a543ac37de 100644 --- a/src/xrpld/app/tx/applySteps.h +++ b/src/xrpld/app/tx/applySteps.h @@ -20,9 +20,8 @@ #ifndef RIPPLE_TX_APPLYSTEPS_H_INCLUDED #define RIPPLE_TX_APPLYSTEPS_H_INCLUDED -#include - #include +#include namespace ripple { diff --git a/src/xrpld/app/tx/detail/AMMBid.cpp b/src/xrpld/app/tx/detail/AMMBid.cpp index 86a80431b4..806c075c4f 100644 --- a/src/xrpld/app/tx/detail/AMMBid.cpp +++ b/src/xrpld/app/tx/detail/AMMBid.cpp @@ -20,9 +20,9 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/AMMClawback.cpp b/src/xrpld/app/tx/detail/AMMClawback.cpp index 07c5151727..634b948a64 100644 --- a/src/xrpld/app/tx/detail/AMMClawback.cpp +++ b/src/xrpld/app/tx/detail/AMMClawback.cpp @@ -21,9 +21,9 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/AMMCreate.cpp b/src/xrpld/app/tx/detail/AMMCreate.cpp index f0ccc6f298..03c972f1cd 100644 --- a/src/xrpld/app/tx/detail/AMMCreate.cpp +++ b/src/xrpld/app/tx/detail/AMMCreate.cpp @@ -21,9 +21,9 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/AMMDelete.cpp b/src/xrpld/app/tx/detail/AMMDelete.cpp index 28d56eab98..004e0b2229 100644 --- a/src/xrpld/app/tx/detail/AMMDelete.cpp +++ b/src/xrpld/app/tx/detail/AMMDelete.cpp @@ -19,8 +19,8 @@ #include #include -#include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/AMMDeposit.cpp b/src/xrpld/app/tx/detail/AMMDeposit.cpp index 0dafa0da6c..614d788c71 100644 --- a/src/xrpld/app/tx/detail/AMMDeposit.cpp +++ b/src/xrpld/app/tx/detail/AMMDeposit.cpp @@ -20,9 +20,9 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/AMMVote.cpp b/src/xrpld/app/tx/detail/AMMVote.cpp index 84d0905a22..6fbff86056 100644 --- a/src/xrpld/app/tx/detail/AMMVote.cpp +++ b/src/xrpld/app/tx/detail/AMMVote.cpp @@ -19,8 +19,8 @@ #include #include -#include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/AMMWithdraw.cpp b/src/xrpld/app/tx/detail/AMMWithdraw.cpp index 2ad1a19df5..9bc36efc81 100644 --- a/src/xrpld/app/tx/detail/AMMWithdraw.cpp +++ b/src/xrpld/app/tx/detail/AMMWithdraw.cpp @@ -20,9 +20,9 @@ #include #include #include -#include #include +#include #include #include diff --git a/src/xrpld/app/tx/detail/AMMWithdraw.h b/src/xrpld/app/tx/detail/AMMWithdraw.h index 1de91fd787..e9a597bdb7 100644 --- a/src/xrpld/app/tx/detail/AMMWithdraw.h +++ b/src/xrpld/app/tx/detail/AMMWithdraw.h @@ -21,7 +21,8 @@ #define RIPPLE_TX_AMMWITHDRAW_H_INCLUDED #include -#include + +#include namespace ripple { diff --git a/src/xrpld/app/tx/detail/ApplyContext.h b/src/xrpld/app/tx/detail/ApplyContext.h index 720d0aeea3..0344771a43 100644 --- a/src/xrpld/app/tx/detail/ApplyContext.h +++ b/src/xrpld/app/tx/detail/ApplyContext.h @@ -22,9 +22,9 @@ #include #include -#include #include +#include #include #include diff --git a/src/xrpld/app/tx/detail/Batch.cpp b/src/xrpld/app/tx/detail/Batch.cpp index 40991ea99a..86d6e8a8f4 100644 --- a/src/xrpld/app/tx/detail/Batch.cpp +++ b/src/xrpld/app/tx/detail/Batch.cpp @@ -19,10 +19,10 @@ #include #include -#include -#include #include +#include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/BookTip.h b/src/xrpld/app/tx/detail/BookTip.h index f215cdb620..0a9c49a4e9 100644 --- a/src/xrpld/app/tx/detail/BookTip.h +++ b/src/xrpld/app/tx/detail/BookTip.h @@ -20,8 +20,7 @@ #ifndef RIPPLE_APP_BOOK_BOOKTIP_H_INCLUDED #define RIPPLE_APP_BOOK_BOOKTIP_H_INCLUDED -#include - +#include #include #include diff --git a/src/xrpld/app/tx/detail/CancelCheck.cpp b/src/xrpld/app/tx/detail/CancelCheck.cpp index cfa3bd10e2..39d0d23096 100644 --- a/src/xrpld/app/tx/detail/CancelCheck.cpp +++ b/src/xrpld/app/tx/detail/CancelCheck.cpp @@ -19,9 +19,9 @@ #include #include -#include #include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/CancelOffer.cpp b/src/xrpld/app/tx/detail/CancelOffer.cpp index 004ae1e8b9..e0a5c7baa7 100644 --- a/src/xrpld/app/tx/detail/CancelOffer.cpp +++ b/src/xrpld/app/tx/detail/CancelOffer.cpp @@ -18,9 +18,9 @@ //============================================================================== #include -#include #include +#include #include namespace ripple { diff --git a/src/xrpld/app/tx/detail/Change.cpp b/src/xrpld/app/tx/detail/Change.cpp index 1392d84c08..de30ed5f62 100644 --- a/src/xrpld/app/tx/detail/Change.cpp +++ b/src/xrpld/app/tx/detail/Change.cpp @@ -22,9 +22,9 @@ #include #include #include -#include #include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/Clawback.cpp b/src/xrpld/app/tx/detail/Clawback.cpp index 41ab1256fb..08cf4baef0 100644 --- a/src/xrpld/app/tx/detail/Clawback.cpp +++ b/src/xrpld/app/tx/detail/Clawback.cpp @@ -18,8 +18,8 @@ //============================================================================== #include -#include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/CreateCheck.cpp b/src/xrpld/app/tx/detail/CreateCheck.cpp index 9baceef944..4dbfd1f81d 100644 --- a/src/xrpld/app/tx/detail/CreateCheck.cpp +++ b/src/xrpld/app/tx/detail/CreateCheck.cpp @@ -18,9 +18,9 @@ //============================================================================== #include -#include #include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/CreateOffer.cpp b/src/xrpld/app/tx/detail/CreateOffer.cpp index 3cfae92cbd..6185e52183 100644 --- a/src/xrpld/app/tx/detail/CreateOffer.cpp +++ b/src/xrpld/app/tx/detail/CreateOffer.cpp @@ -21,10 +21,10 @@ #include #include #include -#include #include #include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/Credentials.cpp b/src/xrpld/app/tx/detail/Credentials.cpp index 73c397cf37..b30ae200b7 100644 --- a/src/xrpld/app/tx/detail/Credentials.cpp +++ b/src/xrpld/app/tx/detail/Credentials.cpp @@ -17,12 +17,12 @@ */ //============================================================================== -#include #include -#include -#include #include +#include +#include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/DID.cpp b/src/xrpld/app/tx/detail/DID.cpp index 31ce7c8770..8c4a220844 100644 --- a/src/xrpld/app/tx/detail/DID.cpp +++ b/src/xrpld/app/tx/detail/DID.cpp @@ -18,10 +18,10 @@ //============================================================================== #include -#include -#include #include +#include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/DelegateSet.cpp b/src/xrpld/app/tx/detail/DelegateSet.cpp index ddeb01b399..53052fd75b 100644 --- a/src/xrpld/app/tx/detail/DelegateSet.cpp +++ b/src/xrpld/app/tx/detail/DelegateSet.cpp @@ -18,9 +18,9 @@ //============================================================================== #include -#include #include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/DeleteAccount.cpp b/src/xrpld/app/tx/detail/DeleteAccount.cpp index 4311aa79a8..deb1743991 100644 --- a/src/xrpld/app/tx/detail/DeleteAccount.cpp +++ b/src/xrpld/app/tx/detail/DeleteAccount.cpp @@ -17,7 +17,6 @@ */ //============================================================================== -#include #include #include #include @@ -25,11 +24,12 @@ #include #include #include -#include #include #include #include +#include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/DeleteOracle.cpp b/src/xrpld/app/tx/detail/DeleteOracle.cpp index 78e3d55230..ac195d100c 100644 --- a/src/xrpld/app/tx/detail/DeleteOracle.cpp +++ b/src/xrpld/app/tx/detail/DeleteOracle.cpp @@ -18,8 +18,8 @@ //============================================================================== #include -#include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/DepositPreauth.cpp b/src/xrpld/app/tx/detail/DepositPreauth.cpp index f10f09b38f..0e8c5c05d2 100644 --- a/src/xrpld/app/tx/detail/DepositPreauth.cpp +++ b/src/xrpld/app/tx/detail/DepositPreauth.cpp @@ -17,11 +17,11 @@ */ //============================================================================== -#include #include -#include #include +#include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/Escrow.cpp b/src/xrpld/app/tx/detail/Escrow.cpp index 3c15278efc..ace7437098 100644 --- a/src/xrpld/app/tx/detail/Escrow.cpp +++ b/src/xrpld/app/tx/detail/Escrow.cpp @@ -17,17 +17,17 @@ */ //============================================================================== -#include #include #include #include #include #include -#include -#include #include #include +#include +#include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/InvariantCheck.cpp b/src/xrpld/app/tx/detail/InvariantCheck.cpp index da0dfc117f..f20a49366b 100644 --- a/src/xrpld/app/tx/detail/InvariantCheck.cpp +++ b/src/xrpld/app/tx/detail/InvariantCheck.cpp @@ -19,14 +19,14 @@ #include #include -#include #include #include #include -#include -#include #include +#include +#include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/LedgerStateFix.cpp b/src/xrpld/app/tx/detail/LedgerStateFix.cpp index 008d9b6ae7..b861f1d0ef 100644 --- a/src/xrpld/app/tx/detail/LedgerStateFix.cpp +++ b/src/xrpld/app/tx/detail/LedgerStateFix.cpp @@ -19,8 +19,8 @@ #include #include -#include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/MPTokenAuthorize.cpp b/src/xrpld/app/tx/detail/MPTokenAuthorize.cpp index 77fe19a287..1c6d153ec5 100644 --- a/src/xrpld/app/tx/detail/MPTokenAuthorize.cpp +++ b/src/xrpld/app/tx/detail/MPTokenAuthorize.cpp @@ -18,8 +18,8 @@ //============================================================================== #include -#include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.cpp b/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.cpp index c195e45c1d..478ef17bb0 100644 --- a/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.cpp +++ b/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.cpp @@ -18,8 +18,8 @@ //============================================================================== #include -#include +#include #include #include diff --git a/src/xrpld/app/tx/detail/MPTokenIssuanceDestroy.cpp b/src/xrpld/app/tx/detail/MPTokenIssuanceDestroy.cpp index e2b87dbd79..2c330ba8f7 100644 --- a/src/xrpld/app/tx/detail/MPTokenIssuanceDestroy.cpp +++ b/src/xrpld/app/tx/detail/MPTokenIssuanceDestroy.cpp @@ -18,8 +18,8 @@ //============================================================================== #include -#include +#include #include #include diff --git a/src/xrpld/app/tx/detail/NFTokenAcceptOffer.cpp b/src/xrpld/app/tx/detail/NFTokenAcceptOffer.cpp index ab74e5ac39..0cf6a86a37 100644 --- a/src/xrpld/app/tx/detail/NFTokenAcceptOffer.cpp +++ b/src/xrpld/app/tx/detail/NFTokenAcceptOffer.cpp @@ -19,8 +19,8 @@ #include #include -#include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/NFTokenCancelOffer.cpp b/src/xrpld/app/tx/detail/NFTokenCancelOffer.cpp index f6072bc953..3d0bf04a1b 100644 --- a/src/xrpld/app/tx/detail/NFTokenCancelOffer.cpp +++ b/src/xrpld/app/tx/detail/NFTokenCancelOffer.cpp @@ -19,8 +19,8 @@ #include #include -#include +#include #include #include diff --git a/src/xrpld/app/tx/detail/NFTokenCreateOffer.cpp b/src/xrpld/app/tx/detail/NFTokenCreateOffer.cpp index 8e1a026415..f9cc8c1fc8 100644 --- a/src/xrpld/app/tx/detail/NFTokenCreateOffer.cpp +++ b/src/xrpld/app/tx/detail/NFTokenCreateOffer.cpp @@ -19,8 +19,8 @@ #include #include -#include +#include #include #include diff --git a/src/xrpld/app/tx/detail/NFTokenMint.cpp b/src/xrpld/app/tx/detail/NFTokenMint.cpp index 42b551b3a4..4c07a6e499 100644 --- a/src/xrpld/app/tx/detail/NFTokenMint.cpp +++ b/src/xrpld/app/tx/detail/NFTokenMint.cpp @@ -18,9 +18,9 @@ //============================================================================== #include -#include #include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/NFTokenUtils.cpp b/src/xrpld/app/tx/detail/NFTokenUtils.cpp index 4866a3b385..ad3e6f4d35 100644 --- a/src/xrpld/app/tx/detail/NFTokenUtils.cpp +++ b/src/xrpld/app/tx/detail/NFTokenUtils.cpp @@ -18,10 +18,10 @@ //============================================================================== #include -#include -#include #include +#include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/NFTokenUtils.h b/src/xrpld/app/tx/detail/NFTokenUtils.h index 7ee0541984..6d33d4ec1a 100644 --- a/src/xrpld/app/tx/detail/NFTokenUtils.h +++ b/src/xrpld/app/tx/detail/NFTokenUtils.h @@ -21,9 +21,9 @@ #define RIPPLE_TX_IMPL_DETAILS_NFTOKENUTILS_H_INCLUDED #include -#include #include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/Offer.h b/src/xrpld/app/tx/detail/Offer.h index d6ff4c7699..c214bea23f 100644 --- a/src/xrpld/app/tx/detail/Offer.h +++ b/src/xrpld/app/tx/detail/Offer.h @@ -20,10 +20,9 @@ #ifndef RIPPLE_APP_BOOK_OFFER_H_INCLUDED #define RIPPLE_APP_BOOK_OFFER_H_INCLUDED -#include - #include #include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/OfferStream.cpp b/src/xrpld/app/tx/detail/OfferStream.cpp index 55993f5c5f..8e1215f5c8 100644 --- a/src/xrpld/app/tx/detail/OfferStream.cpp +++ b/src/xrpld/app/tx/detail/OfferStream.cpp @@ -19,9 +19,9 @@ #include #include -#include #include +#include #include #include diff --git a/src/xrpld/app/tx/detail/OfferStream.h b/src/xrpld/app/tx/detail/OfferStream.h index cf6c1c3d2d..6470f876a6 100644 --- a/src/xrpld/app/tx/detail/OfferStream.h +++ b/src/xrpld/app/tx/detail/OfferStream.h @@ -22,11 +22,11 @@ #include #include -#include #include #include #include +#include #include diff --git a/src/xrpld/app/tx/detail/PayChan.cpp b/src/xrpld/app/tx/detail/PayChan.cpp index 12a9d0cb75..bdfe0d5c95 100644 --- a/src/xrpld/app/tx/detail/PayChan.cpp +++ b/src/xrpld/app/tx/detail/PayChan.cpp @@ -17,13 +17,13 @@ */ //============================================================================== -#include #include -#include -#include #include #include +#include +#include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/Payment.cpp b/src/xrpld/app/tx/detail/Payment.cpp index 784330b203..e622d54498 100644 --- a/src/xrpld/app/tx/detail/Payment.cpp +++ b/src/xrpld/app/tx/detail/Payment.cpp @@ -17,14 +17,14 @@ */ //============================================================================== -#include #include #include #include #include -#include #include +#include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/PermissionedDomainDelete.cpp b/src/xrpld/app/tx/detail/PermissionedDomainDelete.cpp index 64c498b68b..76224ba6b3 100644 --- a/src/xrpld/app/tx/detail/PermissionedDomainDelete.cpp +++ b/src/xrpld/app/tx/detail/PermissionedDomainDelete.cpp @@ -18,8 +18,8 @@ //============================================================================== #include -#include +#include #include namespace ripple { diff --git a/src/xrpld/app/tx/detail/PermissionedDomainSet.cpp b/src/xrpld/app/tx/detail/PermissionedDomainSet.cpp index 6e2df2a082..cc25809aa1 100644 --- a/src/xrpld/app/tx/detail/PermissionedDomainSet.cpp +++ b/src/xrpld/app/tx/detail/PermissionedDomainSet.cpp @@ -17,10 +17,10 @@ */ //============================================================================== -#include #include -#include +#include +#include #include #include diff --git a/src/xrpld/app/tx/detail/SetAccount.cpp b/src/xrpld/app/tx/detail/SetAccount.cpp index ec618981c1..dc84c7cc7e 100644 --- a/src/xrpld/app/tx/detail/SetAccount.cpp +++ b/src/xrpld/app/tx/detail/SetAccount.cpp @@ -20,9 +20,9 @@ #include #include #include -#include #include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/SetOracle.cpp b/src/xrpld/app/tx/detail/SetOracle.cpp index ba1d4a2e47..78ff8e2953 100644 --- a/src/xrpld/app/tx/detail/SetOracle.cpp +++ b/src/xrpld/app/tx/detail/SetOracle.cpp @@ -18,9 +18,9 @@ //============================================================================== #include -#include -#include +#include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/SetSignerList.cpp b/src/xrpld/app/tx/detail/SetSignerList.cpp index b52130e2fa..60f92cf87b 100644 --- a/src/xrpld/app/tx/detail/SetSignerList.cpp +++ b/src/xrpld/app/tx/detail/SetSignerList.cpp @@ -19,9 +19,9 @@ #include #include -#include #include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/SetTrust.cpp b/src/xrpld/app/tx/detail/SetTrust.cpp index d3b39aaf11..87f1721b29 100644 --- a/src/xrpld/app/tx/detail/SetTrust.cpp +++ b/src/xrpld/app/tx/detail/SetTrust.cpp @@ -19,9 +19,9 @@ #include #include -#include #include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/Taker.h b/src/xrpld/app/tx/detail/Taker.h new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/xrpld/app/tx/detail/Transactor.cpp b/src/xrpld/app/tx/detail/Transactor.cpp index 0db0484842..8f881d7252 100644 --- a/src/xrpld/app/tx/detail/Transactor.cpp +++ b/src/xrpld/app/tx/detail/Transactor.cpp @@ -18,7 +18,6 @@ //============================================================================== #include -#include #include #include #include @@ -26,11 +25,12 @@ #include #include #include -#include #include #include #include +#include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/VaultClawback.cpp b/src/xrpld/app/tx/detail/VaultClawback.cpp index 87740da179..061aacdbb8 100644 --- a/src/xrpld/app/tx/detail/VaultClawback.cpp +++ b/src/xrpld/app/tx/detail/VaultClawback.cpp @@ -18,9 +18,9 @@ //============================================================================== #include -#include #include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/VaultCreate.cpp b/src/xrpld/app/tx/detail/VaultCreate.cpp index 0b5cdd4fc0..855275bf4e 100644 --- a/src/xrpld/app/tx/detail/VaultCreate.cpp +++ b/src/xrpld/app/tx/detail/VaultCreate.cpp @@ -20,8 +20,8 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/VaultDelete.cpp b/src/xrpld/app/tx/detail/VaultDelete.cpp index d4b74ae1d5..5e4e16a99b 100644 --- a/src/xrpld/app/tx/detail/VaultDelete.cpp +++ b/src/xrpld/app/tx/detail/VaultDelete.cpp @@ -18,8 +18,8 @@ //============================================================================== #include -#include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/VaultDeposit.cpp b/src/xrpld/app/tx/detail/VaultDeposit.cpp index 5cdcb43e20..3d346d63a2 100644 --- a/src/xrpld/app/tx/detail/VaultDeposit.cpp +++ b/src/xrpld/app/tx/detail/VaultDeposit.cpp @@ -17,11 +17,11 @@ */ //============================================================================== -#include #include #include -#include +#include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/VaultSet.cpp b/src/xrpld/app/tx/detail/VaultSet.cpp index 4750f89be2..5a519f81cf 100644 --- a/src/xrpld/app/tx/detail/VaultSet.cpp +++ b/src/xrpld/app/tx/detail/VaultSet.cpp @@ -18,8 +18,8 @@ //============================================================================== #include -#include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/VaultWithdraw.cpp b/src/xrpld/app/tx/detail/VaultWithdraw.cpp index 0ceaabbfde..63cc22fe48 100644 --- a/src/xrpld/app/tx/detail/VaultWithdraw.cpp +++ b/src/xrpld/app/tx/detail/VaultWithdraw.cpp @@ -17,10 +17,10 @@ */ //============================================================================== -#include #include -#include +#include +#include #include #include #include diff --git a/src/xrpld/app/tx/detail/XChainBridge.cpp b/src/xrpld/app/tx/detail/XChainBridge.cpp index 6ca049ee66..92e3c7f625 100644 --- a/src/xrpld/app/tx/detail/XChainBridge.cpp +++ b/src/xrpld/app/tx/detail/XChainBridge.cpp @@ -21,15 +21,15 @@ #include #include #include -#include -#include -#include #include #include #include #include #include +#include +#include +#include #include #include #include diff --git a/src/xrpld/rpc/detail/RPCHelpers.cpp b/src/xrpld/rpc/detail/RPCHelpers.cpp index 52a69eb79e..4b28d44253 100644 --- a/src/xrpld/rpc/detail/RPCHelpers.cpp +++ b/src/xrpld/rpc/detail/RPCHelpers.cpp @@ -24,11 +24,11 @@ #include #include #include -#include #include #include #include +#include #include #include #include diff --git a/src/xrpld/rpc/handlers/AMMInfo.cpp b/src/xrpld/rpc/handlers/AMMInfo.cpp index b312264f90..7c1dd3ba58 100644 --- a/src/xrpld/rpc/handlers/AMMInfo.cpp +++ b/src/xrpld/rpc/handlers/AMMInfo.cpp @@ -19,11 +19,11 @@ #include #include -#include #include #include #include +#include #include #include diff --git a/src/xrpld/rpc/handlers/AccountChannels.cpp b/src/xrpld/rpc/handlers/AccountChannels.cpp index 5d810d61a0..1b0046ab64 100644 --- a/src/xrpld/rpc/handlers/AccountChannels.cpp +++ b/src/xrpld/rpc/handlers/AccountChannels.cpp @@ -17,12 +17,12 @@ */ //============================================================================== -#include -#include #include #include #include +#include +#include #include #include #include diff --git a/src/xrpld/rpc/handlers/AccountCurrenciesHandler.cpp b/src/xrpld/rpc/handlers/AccountCurrenciesHandler.cpp index 2d08561ec9..59a13f5893 100644 --- a/src/xrpld/rpc/handlers/AccountCurrenciesHandler.cpp +++ b/src/xrpld/rpc/handlers/AccountCurrenciesHandler.cpp @@ -18,10 +18,10 @@ //============================================================================== #include -#include #include #include +#include #include #include #include diff --git a/src/xrpld/rpc/handlers/AccountInfo.cpp b/src/xrpld/rpc/handlers/AccountInfo.cpp index 3432021690..0a36993d65 100644 --- a/src/xrpld/rpc/handlers/AccountInfo.cpp +++ b/src/xrpld/rpc/handlers/AccountInfo.cpp @@ -19,12 +19,12 @@ #include #include -#include #include #include #include #include +#include #include #include #include diff --git a/src/xrpld/rpc/handlers/AccountLines.cpp b/src/xrpld/rpc/handlers/AccountLines.cpp index e921eee386..893ca9a190 100644 --- a/src/xrpld/rpc/handlers/AccountLines.cpp +++ b/src/xrpld/rpc/handlers/AccountLines.cpp @@ -18,11 +18,11 @@ //============================================================================== #include -#include #include #include #include +#include #include #include #include diff --git a/src/xrpld/rpc/handlers/AccountObjects.cpp b/src/xrpld/rpc/handlers/AccountObjects.cpp index 2b2496a1dd..acd99c205e 100644 --- a/src/xrpld/rpc/handlers/AccountObjects.cpp +++ b/src/xrpld/rpc/handlers/AccountObjects.cpp @@ -18,11 +18,11 @@ //============================================================================== #include -#include #include #include #include +#include #include #include #include diff --git a/src/xrpld/rpc/handlers/AccountOffers.cpp b/src/xrpld/rpc/handlers/AccountOffers.cpp index bc575d2d86..e65b39b35b 100644 --- a/src/xrpld/rpc/handlers/AccountOffers.cpp +++ b/src/xrpld/rpc/handlers/AccountOffers.cpp @@ -17,13 +17,13 @@ */ //============================================================================== -#include -#include #include #include #include #include +#include +#include #include #include #include diff --git a/src/xrpld/rpc/handlers/AccountTx.cpp b/src/xrpld/rpc/handlers/AccountTx.cpp index d5df40303b..6b1dccdba9 100644 --- a/src/xrpld/rpc/handlers/AccountTx.cpp +++ b/src/xrpld/rpc/handlers/AccountTx.cpp @@ -22,13 +22,13 @@ #include #include #include -#include #include #include #include #include #include +#include #include #include #include diff --git a/src/xrpld/rpc/handlers/BookOffers.cpp b/src/xrpld/rpc/handlers/BookOffers.cpp index df4712209c..6506474163 100644 --- a/src/xrpld/rpc/handlers/BookOffers.cpp +++ b/src/xrpld/rpc/handlers/BookOffers.cpp @@ -19,12 +19,12 @@ #include #include -#include #include #include #include #include +#include #include #include #include diff --git a/src/xrpld/rpc/handlers/DepositAuthorized.cpp b/src/xrpld/rpc/handlers/DepositAuthorized.cpp index 1bb480544d..c184a7f845 100644 --- a/src/xrpld/rpc/handlers/DepositAuthorized.cpp +++ b/src/xrpld/rpc/handlers/DepositAuthorized.cpp @@ -17,11 +17,11 @@ */ //============================================================================== -#include -#include #include #include +#include +#include #include #include #include diff --git a/src/xrpld/rpc/handlers/GatewayBalances.cpp b/src/xrpld/rpc/handlers/GatewayBalances.cpp index ca9e370c81..a4542f682f 100644 --- a/src/xrpld/rpc/handlers/GatewayBalances.cpp +++ b/src/xrpld/rpc/handlers/GatewayBalances.cpp @@ -19,10 +19,10 @@ #include #include -#include #include #include +#include #include #include #include diff --git a/src/xrpld/rpc/handlers/GetAggregatePrice.cpp b/src/xrpld/rpc/handlers/GetAggregatePrice.cpp index 33a88ba78f..2227143d70 100644 --- a/src/xrpld/rpc/handlers/GetAggregatePrice.cpp +++ b/src/xrpld/rpc/handlers/GetAggregatePrice.cpp @@ -19,11 +19,11 @@ #include #include -#include #include #include #include +#include #include #include diff --git a/src/xrpld/rpc/handlers/LedgerData.cpp b/src/xrpld/rpc/handlers/LedgerData.cpp index 7bd50cc1e5..e6c579a4b5 100644 --- a/src/xrpld/rpc/handlers/LedgerData.cpp +++ b/src/xrpld/rpc/handlers/LedgerData.cpp @@ -18,13 +18,13 @@ //============================================================================== #include -#include #include #include #include #include #include +#include #include #include #include diff --git a/src/xrpld/rpc/handlers/LedgerEntry.cpp b/src/xrpld/rpc/handlers/LedgerEntry.cpp index 61a7e2fb2c..cead16c04d 100644 --- a/src/xrpld/rpc/handlers/LedgerEntry.cpp +++ b/src/xrpld/rpc/handlers/LedgerEntry.cpp @@ -17,8 +17,6 @@ */ //============================================================================== -#include -#include #include #include #include @@ -28,6 +26,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/src/xrpld/rpc/handlers/LedgerHandler.h b/src/xrpld/rpc/handlers/LedgerHandler.h index a573589cbc..4cbf984505 100644 --- a/src/xrpld/rpc/handlers/LedgerHandler.h +++ b/src/xrpld/rpc/handlers/LedgerHandler.h @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include @@ -31,6 +30,7 @@ #include #include +#include #include namespace Json { diff --git a/src/xrpld/rpc/handlers/LedgerHeader.cpp b/src/xrpld/rpc/handlers/LedgerHeader.cpp index 1815c8db56..a407dc4318 100644 --- a/src/xrpld/rpc/handlers/LedgerHeader.cpp +++ b/src/xrpld/rpc/handlers/LedgerHeader.cpp @@ -18,10 +18,10 @@ //============================================================================== #include -#include #include #include +#include #include namespace ripple { diff --git a/src/xrpld/rpc/handlers/NFTOffers.cpp b/src/xrpld/rpc/handlers/NFTOffers.cpp index 52a5c69ab0..598abd2c3f 100644 --- a/src/xrpld/rpc/handlers/NFTOffers.cpp +++ b/src/xrpld/rpc/handlers/NFTOffers.cpp @@ -17,13 +17,13 @@ */ //============================================================================== -#include -#include #include #include #include #include +#include +#include #include #include #include diff --git a/src/xrpld/rpc/handlers/NoRippleCheck.cpp b/src/xrpld/rpc/handlers/NoRippleCheck.cpp index a6007e9eab..abe51aa206 100644 --- a/src/xrpld/rpc/handlers/NoRippleCheck.cpp +++ b/src/xrpld/rpc/handlers/NoRippleCheck.cpp @@ -20,11 +20,11 @@ #include #include #include -#include #include #include #include +#include #include #include #include diff --git a/src/xrpld/rpc/handlers/PayChanClaim.cpp b/src/xrpld/rpc/handlers/PayChanClaim.cpp index b62f5e54e5..6945d2a051 100644 --- a/src/xrpld/rpc/handlers/PayChanClaim.cpp +++ b/src/xrpld/rpc/handlers/PayChanClaim.cpp @@ -18,11 +18,11 @@ //============================================================================== #include -#include #include #include #include +#include #include #include #include diff --git a/src/xrpld/rpc/handlers/Subscribe.cpp b/src/xrpld/rpc/handlers/Subscribe.cpp index 1696754e9c..be08769875 100644 --- a/src/xrpld/rpc/handlers/Subscribe.cpp +++ b/src/xrpld/rpc/handlers/Subscribe.cpp @@ -20,13 +20,13 @@ #include #include #include -#include #include #include #include #include #include +#include #include #include #include diff --git a/src/xrpld/rpc/handlers/TransactionEntry.cpp b/src/xrpld/rpc/handlers/TransactionEntry.cpp index c94e95063b..02ff329354 100644 --- a/src/xrpld/rpc/handlers/TransactionEntry.cpp +++ b/src/xrpld/rpc/handlers/TransactionEntry.cpp @@ -19,10 +19,10 @@ #include #include -#include #include #include +#include #include namespace ripple { From 2ae65d2fdbc2b8b6c364665486ac1e77b61e4402 Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Thu, 18 Sep 2025 18:04:12 +0100 Subject: [PATCH 195/244] Mark PermissionDelegation as unsupported --- include/xrpl/protocol/detail/features.macro | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/xrpl/protocol/detail/features.macro b/include/xrpl/protocol/detail/features.macro index e2725d1fc0..679c55b96c 100644 --- a/include/xrpl/protocol/detail/features.macro +++ b/include/xrpl/protocol/detail/features.macro @@ -41,7 +41,7 @@ XRPL_FIX (AMMv1_3, Supported::yes, VoteBehavior::DefaultNo XRPL_FEATURE(PermissionedDEX, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(Batch, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(SingleAssetVault, Supported::no, VoteBehavior::DefaultNo) -XRPL_FEATURE(PermissionDelegation, Supported::yes, VoteBehavior::DefaultNo) +XRPL_FEATURE(PermissionDelegation, Supported::no, VoteBehavior::DefaultNo) XRPL_FIX (PayChanCancelAfter, Supported::yes, VoteBehavior::DefaultNo) // Check flags in Credential transactions XRPL_FIX (InvalidTxFlags, Supported::yes, VoteBehavior::DefaultNo) From c46888f8f7d8ed06a30407863e07ede8a6a94425 Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Thu, 18 Sep 2025 18:08:57 +0100 Subject: [PATCH 196/244] Set version to 2.6.1-rc2 --- src/libxrpl/protocol/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libxrpl/protocol/BuildInfo.cpp b/src/libxrpl/protocol/BuildInfo.cpp index ae15ea6dec..ed9dd68da1 100644 --- a/src/libxrpl/protocol/BuildInfo.cpp +++ b/src/libxrpl/protocol/BuildInfo.cpp @@ -36,7 +36,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "2.6.1-rc1" +char const* const versionString = "2.6.1-rc2" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From 3cbdf818a73efc039b1143cd099e735e12ae140d Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Thu, 18 Sep 2025 13:55:49 -0400 Subject: [PATCH 197/244] Miscellaneous refactors and updates (#5590) - Added a new Invariant: `ValidPseudoAccounts` which checks that all pseudo-accounts behave consistently through creation and updates, and that no "real" accounts look like pseudo-accounts (which means they don't have a 0 sequence). - `to_short_string(base_uint)`. Like `to_string`, but only returns the first 8 characters. (Similar to how a git commit ID can be abbreviated.) Used as a wrapped sink to prefix most transaction-related messages. More can be added later. - `XRPL_ASSERT_PARTS`. Convenience wrapper for `XRPL_ASSERT`, which takes the `function` and `description` as separate parameters. - `SField::sMD_PseudoAccount`. Metadata option for `SField` definitions to indicate that the field, if set in an `AccountRoot` indicates that account is a pseudo-account. Removes the need for hard-coded field lists all over the place. Added the flag to `AMMID` and `VaultID`. - Added functionality to `SField` ctor to detect both code and name collisions using asserts. And require all SFields to have a name - Convenience type aliases `STLedgerEntry::const_pointer` and `STLedgerEntry::const_ref`. (`SLE` is an alias to `STLedgerEntry`.) - Generalized `feeunit.h` (`TaggedFee`) into `unit.h` (`ValueUnit`) and added new "BIPS"-related tags for future use. Also refactored the type restrictions to use Concepts. - Restructured `transactions.macro` to do two big things 1. Include the `#include` directives for transactor header files directly in the macro file. Removes the need to update `applySteps.cpp` and the resulting conflicts. 2. Added a `privileges` parameter to the `TRANSACTION` macro, which specifies some of the operations a transaction is allowed to do. These `privileges` are enforced by invariant checks. Again, removed the need to update scattered lists of transaction types in various checks. - Unit tests: 1. Moved more helper functions into `TestHelpers.h` and `.cpp`. 2. Cleaned up the namespaces to prevent / mitigate random collisions and ambiguous symbols, particularly in unity builds. 3. Generalized `Env::balance` to add support for `MPTIssue` and `Asset`. 4. Added a set of helper classes to simplify `Env` transaction parameter classes: `JTxField`, `JTxFieldWrapper`, and a bunch of classes derived or aliased from it. For an example of how awesome it is, check the changes `src/test/jtx/escrow.h` for how much simpler the definitions are for `finish_time`, `cancel_time`, `condition`, and `fulfillment`. 5. Generalized several of the amount-related helper classes to understand `Asset`s. 6. `env.balance` for an MPT issuer will return a negative number (or 0) for consistency with IOUs. --- include/xrpl/basics/base_uint.h | 10 + include/xrpl/basics/safe_cast.h | 7 +- include/xrpl/beast/utility/instrumentation.h | 5 + include/xrpl/ledger/View.h | 16 + include/xrpl/protocol/FeeUnits.h | 565 ------------------ include/xrpl/protocol/LedgerFormats.h | 2 +- include/xrpl/protocol/Protocol.h | 1 - include/xrpl/protocol/SField.h | 14 +- include/xrpl/protocol/STLedgerEntry.h | 9 +- include/xrpl/protocol/STObject.h | 2 +- include/xrpl/protocol/STValidation.h | 2 +- include/xrpl/protocol/TxFlags.h | 5 +- include/xrpl/protocol/Units.h | 555 +++++++++++++++++ include/xrpl/protocol/XRPAmount.h | 4 +- include/xrpl/protocol/detail/sfields.macro | 6 +- .../xrpl/protocol/detail/transactions.macro | 237 +++++++- include/xrpl/protocol/jss.h | 6 +- src/libxrpl/ledger/View.cpp | 109 +++- src/libxrpl/protocol/SField.cpp | 35 +- src/libxrpl/protocol/TxFormats.cpp | 3 +- src/test/app/AMMCalc_test.cpp | 2 +- src/test/app/AMMExtended_test.cpp | 194 +++--- src/test/app/AMM_test.cpp | 199 +++--- src/test/app/Credentials_test.cpp | 9 - src/test/app/DID_test.cpp | 8 - src/test/app/EscrowToken_test.cpp | 16 +- src/test/app/Invariants_test.cpp | 132 +++- src/test/app/LPTokenTransfer_test.cpp | 16 +- src/test/app/ValidatorSite_test.cpp | 2 +- src/test/app/Vault_test.cpp | 40 +- src/test/basics/FileUtilities_test.cpp | 2 +- .../{FeeUnits_test.cpp => Units_test.cpp} | 32 +- src/test/basics/base_uint_test.cpp | 5 + src/test/core/Config_test.cpp | 6 +- src/test/csf/Digraph.h | 6 +- src/test/jtx/Env.h | 3 + src/test/jtx/TestHelpers.h | 277 ++++++++- src/test/jtx/amount.h | 84 ++- src/test/jtx/balance.h | 8 +- src/test/jtx/escrow.h | 81 +-- src/test/jtx/fee.h | 5 + src/test/jtx/flags.h | 32 +- src/test/jtx/impl/Env.cpp | 12 +- src/test/jtx/impl/TestHelpers.cpp | 30 +- src/test/jtx/impl/amount.cpp | 8 +- src/test/jtx/impl/balance.cpp | 55 +- src/test/jtx/impl/fee.cpp | 9 +- src/test/jtx/impl/owners.cpp | 8 +- src/test/jtx/owners.h | 7 +- src/test/jtx/require.h | 7 +- src/test/jtx/tags.h | 10 + src/test/nodestore/import_test.cpp | 3 +- src/test/unit_test/FileDirGuard.h | 2 - src/test/unit_test/multi_runner.cpp | 6 +- src/test/unit_test/multi_runner.h | 3 +- src/xrpld/app/main/Application.cpp | 4 +- src/xrpld/app/misc/detail/LoadFeeTrack.cpp | 3 +- src/xrpld/app/tx/detail/DeleteAccount.cpp | 2 +- src/xrpld/app/tx/detail/Escrow.cpp | 4 +- src/xrpld/app/tx/detail/InvariantCheck.cpp | 328 +++++++--- src/xrpld/app/tx/detail/InvariantCheck.h | 31 +- src/xrpld/app/tx/detail/Transactor.cpp | 5 +- src/xrpld/app/tx/detail/Transactor.h | 2 + src/xrpld/app/tx/detail/applySteps.cpp | 65 +- src/xrpld/rpc/detail/RPCHelpers.cpp | 2 +- 65 files changed, 2148 insertions(+), 1210 deletions(-) delete mode 100644 include/xrpl/protocol/FeeUnits.h create mode 100644 include/xrpl/protocol/Units.h rename src/test/basics/{FeeUnits_test.cpp => Units_test.cpp} (92%) diff --git a/include/xrpl/basics/base_uint.h b/include/xrpl/basics/base_uint.h index d36bf74c54..b1a4622cc4 100644 --- a/include/xrpl/basics/base_uint.h +++ b/include/xrpl/basics/base_uint.h @@ -632,6 +632,16 @@ to_string(base_uint const& a) return strHex(a.cbegin(), a.cend()); } +template +inline std::string +to_short_string(base_uint const& a) +{ + static_assert( + base_uint::bytes > 4, + "For 4 bytes or less, use a native type"); + return strHex(a.cbegin(), a.cbegin() + 4) + "..."; +} + template inline std::ostream& operator<<(std::ostream& out, base_uint const& u) diff --git a/include/xrpl/basics/safe_cast.h b/include/xrpl/basics/safe_cast.h index f193f1793f..a750a3b6fc 100644 --- a/include/xrpl/basics/safe_cast.h +++ b/include/xrpl/basics/safe_cast.h @@ -28,9 +28,8 @@ namespace ripple { // the destination can hold all values of the source. This is particularly // handy when the source or destination is an enumeration type. -template -static constexpr bool is_safetocasttovalue_v = - (std::is_integral_v && std::is_integral_v) && +template +concept SafeToCast = (std::is_integral_v && std::is_integral_v) && (std::is_signed::value || std::is_unsigned::value) && (std::is_signed::value != std::is_signed::value ? sizeof(Dest) > sizeof(Src) @@ -78,7 +77,7 @@ inline constexpr std:: unsafe_cast(Src s) noexcept { static_assert( - !is_safetocasttovalue_v, + !SafeToCast, "Only unsafe if casting signed to unsigned or " "destination is too small"); return static_cast(s); diff --git a/include/xrpl/beast/utility/instrumentation.h b/include/xrpl/beast/utility/instrumentation.h index 72c48959a0..3594855eef 100644 --- a/include/xrpl/beast/utility/instrumentation.h +++ b/include/xrpl/beast/utility/instrumentation.h @@ -39,11 +39,16 @@ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. #endif #define XRPL_ASSERT ALWAYS_OR_UNREACHABLE +#define XRPL_ASSERT_PARTS(cond, function, description, ...) \ + XRPL_ASSERT(cond, function " : " description) // How to use the instrumentation macros: // // * XRPL_ASSERT if cond must be true but the line might not be reached during // fuzzing. Same like `assert` in normal use. +// * XRPL_ASSERT_PARTS is for convenience, and works like XRPL_ASSERT, but +// splits the message param into "function" and "description", then joins +// them with " : " before passing to XRPL_ASSERT. // * ALWAYS if cond must be true _and_ the line must be reached during fuzzing. // Same like `assert` in normal use. // * REACHABLE if the line must be reached during fuzzing diff --git a/include/xrpl/ledger/View.h b/include/xrpl/ledger/View.h index 3d67e25a22..9698b4fda3 100644 --- a/include/xrpl/ledger/View.h +++ b/include/xrpl/ledger/View.h @@ -561,12 +561,28 @@ createPseudoAccount( [[nodiscard]] bool isPseudoAccount(std::shared_ptr sleAcct); +// Returns the list of fields that define an ACCOUNT_ROOT as a pseudo-account if +// set +// Pseudo-account designator fields MUST be maintained by including the +// SField::sMD_PseudoAccount flag in the SField definition. (Don't forget to +// "| SField::sMD_Default"!) The fields do NOT need to be amendment-gated, +// since a non-active amendment will not set any field, by definition. +// Specific properties of a pseudo-account are NOT checked here, that's what +// InvariantCheck is for. +[[nodiscard]] std::vector const& +getPseudoAccountFields(); + [[nodiscard]] inline bool isPseudoAccount(ReadView const& view, AccountID accountId) { return isPseudoAccount(view.read(keylet::account(accountId))); } +[[nodiscard]] TER +canAddHolding(ReadView const& view, Asset const& asset); + +/// Any transactors that call addEmptyHolding() in doApply must call +/// canAddHolding() in preflight with the same View and Asset [[nodiscard]] TER addEmptyHolding( ApplyView& view, diff --git a/include/xrpl/protocol/FeeUnits.h b/include/xrpl/protocol/FeeUnits.h deleted file mode 100644 index 31a1886b7f..0000000000 --- a/include/xrpl/protocol/FeeUnits.h +++ /dev/null @@ -1,565 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2019 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ - -#ifndef BASICS_FEES_H_INCLUDED -#define BASICS_FEES_H_INCLUDED - -#include -#include -#include -#include - -#include -#include - -#include -#include -#include - -namespace ripple { - -namespace feeunit { - -/** "drops" are the smallest divisible amount of XRP. This is what most - of the code uses. */ -struct dropTag; -/** "fee units" calculations are a not-really-unitless value that is used - to express the cost of a given transaction vs. a reference transaction. - They are primarily used by the Transactor classes. */ -struct feeunitTag; -/** "fee levels" are used by the transaction queue to compare the relative - cost of transactions that require different levels of effort to process. - See also: src/ripple/app/misc/FeeEscalation.md#fee-level */ -struct feelevelTag; -/** unitless values are plain scalars wrapped in a TaggedFee. They are - used for calculations in this header. */ -struct unitlessTag; - -template -using enable_if_unit_t = typename std::enable_if_t< - std::is_class_v && std::is_object_v && - std::is_object_v>; - -/** `is_usable_unit_v` is checked to ensure that only values with - known valid type tags can be used (sometimes transparently) in - non-fee contexts. At the time of implementation, this includes - all known tags, but more may be added in the future, and they - should not be added automatically unless determined to be - appropriate. -*/ -template > -constexpr bool is_usable_unit_v = - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v; - -template -class TaggedFee : private boost::totally_ordered>, - private boost::additive>, - private boost::equality_comparable, T>, - private boost::dividable, T>, - private boost::modable, T>, - private boost::unit_steppable> -{ -public: - using unit_type = UnitTag; - using value_type = T; - -private: - value_type fee_; - -protected: - template - static constexpr bool is_compatible_v = - std::is_arithmetic_v && std::is_arithmetic_v && - std::is_convertible_v; - - template > - static constexpr bool is_compatiblefee_v = - is_compatible_v && - std::is_same_v; - - template - using enable_if_compatible_t = - typename std::enable_if_t>; - - template - using enable_if_compatiblefee_t = - typename std::enable_if_t>; - -public: - TaggedFee() = default; - constexpr TaggedFee(TaggedFee const& other) = default; - constexpr TaggedFee& - operator=(TaggedFee const& other) = default; - - constexpr explicit TaggedFee(beast::Zero) : fee_(0) - { - } - - constexpr TaggedFee& - operator=(beast::Zero) - { - fee_ = 0; - return *this; - } - - constexpr explicit TaggedFee(value_type fee) : fee_(fee) - { - } - - TaggedFee& - operator=(value_type fee) - { - fee_ = fee; - return *this; - } - - /** Instances with the same unit, and a type that is - "safe" to convert to this one can be converted - implicitly */ - template < - class Other, - class = std::enable_if_t< - is_compatible_v && - is_safetocasttovalue_v>> - constexpr TaggedFee(TaggedFee const& fee) - : TaggedFee(safe_cast(fee.fee())) - { - } - - constexpr TaggedFee - operator*(value_type const& rhs) const - { - return TaggedFee{fee_ * rhs}; - } - - friend constexpr TaggedFee - operator*(value_type lhs, TaggedFee const& rhs) - { - // multiplication is commutative - return rhs * lhs; - } - - constexpr value_type - operator/(TaggedFee const& rhs) const - { - return fee_ / rhs.fee_; - } - - TaggedFee& - operator+=(TaggedFee const& other) - { - fee_ += other.fee(); - return *this; - } - - TaggedFee& - operator-=(TaggedFee const& other) - { - fee_ -= other.fee(); - return *this; - } - - TaggedFee& - operator++() - { - ++fee_; - return *this; - } - - TaggedFee& - operator--() - { - --fee_; - return *this; - } - - TaggedFee& - operator*=(value_type const& rhs) - { - fee_ *= rhs; - return *this; - } - - TaggedFee& - operator/=(value_type const& rhs) - { - fee_ /= rhs; - return *this; - } - - template - std::enable_if_t, TaggedFee&> - operator%=(value_type const& rhs) - { - fee_ %= rhs; - return *this; - } - - TaggedFee - operator-() const - { - static_assert( - std::is_signed_v, "- operator illegal on unsigned fee types"); - return TaggedFee{-fee_}; - } - - bool - operator==(TaggedFee const& other) const - { - return fee_ == other.fee_; - } - - template > - bool - operator==(TaggedFee const& other) const - { - return fee_ == other.fee(); - } - - bool - operator==(value_type other) const - { - return fee_ == other; - } - - template > - bool - operator!=(TaggedFee const& other) const - { - return !operator==(other); - } - - bool - operator<(TaggedFee const& other) const - { - return fee_ < other.fee_; - } - - /** Returns true if the amount is not zero */ - explicit constexpr - operator bool() const noexcept - { - return fee_ != 0; - } - - /** Return the sign of the amount */ - constexpr int - signum() const noexcept - { - return (fee_ < 0) ? -1 : (fee_ ? 1 : 0); - } - - /** Returns the number of drops */ - constexpr value_type - fee() const - { - return fee_; - } - - template - constexpr double - decimalFromReference(TaggedFee reference) const - { - return static_cast(fee_) / reference.fee(); - } - - // `is_usable_unit_v` is checked to ensure that only values with - // known valid type tags can be converted to JSON. At the time - // of implementation, that includes all known tags, but more may - // be added in the future. - std::enable_if_t, Json::Value> - jsonClipped() const - { - if constexpr (std::is_integral_v) - { - using jsontype = std::conditional_t< - std::is_signed_v, - Json::Int, - Json::UInt>; - - constexpr auto min = std::numeric_limits::min(); - constexpr auto max = std::numeric_limits::max(); - - if (fee_ < min) - return min; - if (fee_ > max) - return max; - return static_cast(fee_); - } - else - { - return fee_; - } - } - - /** Returns the underlying value. Code SHOULD NOT call this - function unless the type has been abstracted away, - e.g. in a templated function. - */ - constexpr value_type - value() const - { - return fee_; - } - - friend std::istream& - operator>>(std::istream& s, TaggedFee& val) - { - s >> val.fee_; - return s; - } -}; - -// Output Fees as just their numeric value. -template -std::basic_ostream& -operator<<(std::basic_ostream& os, TaggedFee const& q) -{ - return os << q.value(); -} - -template -std::string -to_string(TaggedFee const& amount) -{ - return std::to_string(amount.fee()); -} - -template > -constexpr bool can_muldiv_source_v = - std::is_convertible_v; - -template > -constexpr bool can_muldiv_dest_v = - can_muldiv_source_v && // Dest is also a source - std::is_convertible_v && - sizeof(typename Dest::value_type) >= sizeof(std::uint64_t); - -template < - class Source1, - class Source2, - class = enable_if_unit_t, - class = enable_if_unit_t> -constexpr bool can_muldiv_sources_v = - can_muldiv_source_v && can_muldiv_source_v && - std::is_same_v; - -template < - class Source1, - class Source2, - class Dest, - class = enable_if_unit_t, - class = enable_if_unit_t, - class = enable_if_unit_t> -constexpr bool can_muldiv_v = - can_muldiv_sources_v && can_muldiv_dest_v; -// Source and Dest can be the same by default - -template < - class Source1, - class Source2, - class Dest, - class = enable_if_unit_t, - class = enable_if_unit_t, - class = enable_if_unit_t> -constexpr bool can_muldiv_commute_v = can_muldiv_v && - !std::is_same_v; - -template -using enable_muldiv_source_t = - typename std::enable_if_t>; - -template -using enable_muldiv_dest_t = typename std::enable_if_t>; - -template -using enable_muldiv_sources_t = - typename std::enable_if_t>; - -template -using enable_muldiv_t = - typename std::enable_if_t>; - -template -using enable_muldiv_commute_t = - typename std::enable_if_t>; - -template -TaggedFee -scalar(T value) -{ - return TaggedFee{value}; -} - -template < - class Source1, - class Source2, - class Dest, - class = enable_muldiv_t> -std::optional -mulDivU(Source1 value, Dest mul, Source2 div) -{ - // Fees can never be negative in any context. - if (value.value() < 0 || mul.value() < 0 || div.value() < 0) - { - // split the asserts so if one hits, the user can tell which - // without a debugger. - XRPL_ASSERT( - value.value() >= 0, - "ripple::feeunit::mulDivU : minimum value input"); - XRPL_ASSERT( - mul.value() >= 0, "ripple::feeunit::mulDivU : minimum mul input"); - XRPL_ASSERT( - div.value() >= 0, "ripple::feeunit::mulDivU : minimum div input"); - return std::nullopt; - } - - using desttype = typename Dest::value_type; - constexpr auto max = std::numeric_limits::max(); - - // Shortcuts, since these happen a lot in the real world - if (value == div) - return mul; - if (mul.value() == div.value()) - { - if (value.value() > max) - return std::nullopt; - return Dest{static_cast(value.value())}; - } - - using namespace boost::multiprecision; - - uint128_t product; - product = multiply( - product, - static_cast(value.value()), - static_cast(mul.value())); - - auto quotient = product / div.value(); - - if (quotient > max) - return std::nullopt; - - return Dest{static_cast(quotient)}; -} - -} // namespace feeunit - -template -using FeeLevel = feeunit::TaggedFee; -using FeeLevel64 = FeeLevel; -using FeeLevelDouble = FeeLevel; - -template < - class Source1, - class Source2, - class Dest, - class = feeunit::enable_muldiv_t> -std::optional -mulDiv(Source1 value, Dest mul, Source2 div) -{ - return feeunit::mulDivU(value, mul, div); -} - -template < - class Source1, - class Source2, - class Dest, - class = feeunit::enable_muldiv_commute_t> -std::optional -mulDiv(Dest value, Source1 mul, Source2 div) -{ - // Multiplication is commutative - return feeunit::mulDivU(mul, value, div); -} - -template > -std::optional -mulDiv(std::uint64_t value, Dest mul, std::uint64_t div) -{ - // Give the scalars a non-tag so the - // unit-handling version gets called. - return feeunit::mulDivU(feeunit::scalar(value), mul, feeunit::scalar(div)); -} - -template > -std::optional -mulDiv(Dest value, std::uint64_t mul, std::uint64_t div) -{ - // Multiplication is commutative - return mulDiv(mul, value, div); -} - -template < - class Source1, - class Source2, - class = feeunit::enable_muldiv_sources_t> -std::optional -mulDiv(Source1 value, std::uint64_t mul, Source2 div) -{ - // Give the scalars a dimensionless unit so the - // unit-handling version gets called. - auto unitresult = feeunit::mulDivU(value, feeunit::scalar(mul), div); - - if (!unitresult) - return std::nullopt; - - return unitresult->value(); -} - -template < - class Source1, - class Source2, - class = feeunit::enable_muldiv_sources_t> -std::optional -mulDiv(std::uint64_t value, Source1 mul, Source2 div) -{ - // Multiplication is commutative - return mulDiv(mul, value, div); -} - -template -constexpr std::enable_if_t< - std::is_same_v && - std::is_integral_v && - std::is_integral_v, - Dest> -safe_cast(Src s) noexcept -{ - // Dest may not have an explicit value constructor - return Dest{safe_cast(s.value())}; -} - -template -constexpr std::enable_if_t< - std::is_same_v && - std::is_integral_v && - std::is_integral_v, - Dest> -unsafe_cast(Src s) noexcept -{ - // Dest may not have an explicit value constructor - return Dest{unsafe_cast(s.value())}; -} - -} // namespace ripple - -#endif // BASICS_FEES_H_INCLUDED diff --git a/include/xrpl/protocol/LedgerFormats.h b/include/xrpl/protocol/LedgerFormats.h index 7cf92d0822..40c9fce1bb 100644 --- a/include/xrpl/protocol/LedgerFormats.h +++ b/include/xrpl/protocol/LedgerFormats.h @@ -56,7 +56,7 @@ enum LedgerEntryType : std::uint16_t #pragma push_macro("LEDGER_ENTRY") #undef LEDGER_ENTRY -#define LEDGER_ENTRY(tag, value, name, rpcName, fields) tag = value, +#define LEDGER_ENTRY(tag, value, ...) tag = value, #include diff --git a/include/xrpl/protocol/Protocol.h b/include/xrpl/protocol/Protocol.h index a0fcfee34c..84b8c3889b 100644 --- a/include/xrpl/protocol/Protocol.h +++ b/include/xrpl/protocol/Protocol.h @@ -22,7 +22,6 @@ #include #include -#include #include diff --git a/include/xrpl/protocol/SField.h b/include/xrpl/protocol/SField.h index 777cfa02ba..2f85cf3b7c 100644 --- a/include/xrpl/protocol/SField.h +++ b/include/xrpl/protocol/SField.h @@ -22,6 +22,7 @@ #include #include +#include #include #include @@ -148,8 +149,10 @@ public: sMD_ChangeNew = 0x02, // new value when it changes sMD_DeleteFinal = 0x04, // final value when it is deleted sMD_Create = 0x08, // value when it's created - sMD_Always = 0x10, // value when node containing it is affected at all - sMD_BaseTen = 0x20, + sMD_Always = 0x10, // value when node containing it is affected at all + sMD_BaseTen = 0x20, // value is treated as base 10, overriding behavior + sMD_PseudoAccount = 0x40, // if this field is set in an ACCOUNT_ROOT + // _only_, then it is a pseudo-account sMD_Default = sMD_ChangeOrig | sMD_ChangeNew | sMD_DeleteFinal | sMD_Create }; @@ -184,7 +187,7 @@ public: char const* fn, int meta = sMD_Default, IsSigning signing = IsSigning::yes); - explicit SField(private_access_tag_t, int fc); + explicit SField(private_access_tag_t, int fc, char const* fn); static SField const& getField(int fieldCode); @@ -297,7 +300,7 @@ public: static int compare(SField const& f1, SField const& f2); - static std::map const& + static std::unordered_map const& getKnownCodeToField() { return knownCodeToField; @@ -305,7 +308,8 @@ public: private: static int num; - static std::map knownCodeToField; + static std::unordered_map knownCodeToField; + static std::unordered_map knownNameToField; }; /** A field with a type known at compile time. */ diff --git a/include/xrpl/protocol/STLedgerEntry.h b/include/xrpl/protocol/STLedgerEntry.h index 3609a04d4b..571c7af5fe 100644 --- a/include/xrpl/protocol/STLedgerEntry.h +++ b/include/xrpl/protocol/STLedgerEntry.h @@ -26,7 +26,9 @@ namespace ripple { class Rules; +namespace test { class Invariants_test; +} class STLedgerEntry final : public STObject, public CountedObject { @@ -36,6 +38,8 @@ class STLedgerEntry final : public STObject, public CountedObject public: using pointer = std::shared_ptr; using ref = std::shared_ptr const&; + using const_pointer = std::shared_ptr; + using const_ref = std::shared_ptr const&; /** Create an empty object with the given key and type. */ explicit STLedgerEntry(Keylet const& k); @@ -54,7 +58,7 @@ public: getText() const override; Json::Value - getJson(JsonOptions options) const override; + getJson(JsonOptions options = JsonOptions::none) const override; /** Returns the 'key' (or 'index') of this item. The key identifies this entry's position in @@ -84,7 +88,8 @@ private: void setSLEType(); - friend Invariants_test; // this test wants access to the private type_ + friend test::Invariants_test; // this test wants access to the private + // type_ STBase* copy(std::size_t n, void* buf) const override; diff --git a/include/xrpl/protocol/STObject.h b/include/xrpl/protocol/STObject.h index 6cd083ef85..b3cb561390 100644 --- a/include/xrpl/protocol/STObject.h +++ b/include/xrpl/protocol/STObject.h @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include @@ -34,6 +33,7 @@ #include #include #include +#include #include #include diff --git a/include/xrpl/protocol/STValidation.h b/include/xrpl/protocol/STValidation.h index 2aa74203a2..991922514d 100644 --- a/include/xrpl/protocol/STValidation.h +++ b/include/xrpl/protocol/STValidation.h @@ -22,10 +22,10 @@ #include #include -#include #include #include #include +#include #include #include diff --git a/include/xrpl/protocol/TxFlags.h b/include/xrpl/protocol/TxFlags.h index 70c6833d3a..30d991e680 100644 --- a/include/xrpl/protocol/TxFlags.h +++ b/include/xrpl/protocol/TxFlags.h @@ -127,6 +127,8 @@ constexpr std::uint32_t tfTrustSetPermissionMask = ~(tfUniversal | tfSetfAuth | // EnableAmendment flags: constexpr std::uint32_t tfGotMajority = 0x00010000; constexpr std::uint32_t tfLostMajority = 0x00020000; +constexpr std::uint32_t tfChangeMask = + ~( tfUniversal | tfGotMajority | tfLostMajority); // PaymentChannelClaim flags: constexpr std::uint32_t tfRenew = 0x00010000; @@ -141,7 +143,8 @@ constexpr std::uint32_t const tfTransferable = 0x00000008; constexpr std::uint32_t const tfMutable = 0x00000010; // MPTokenIssuanceCreate flags: -// NOTE - there is intentionally no flag here for lsfMPTLocked, which this transaction cannot mutate. +// Note: tf/lsfMPTLocked is intentionally omitted, since this transaction +// is not allowed to modify it. constexpr std::uint32_t const tfMPTCanLock = lsfMPTCanLock; constexpr std::uint32_t const tfMPTRequireAuth = lsfMPTRequireAuth; constexpr std::uint32_t const tfMPTCanEscrow = lsfMPTCanEscrow; diff --git a/include/xrpl/protocol/Units.h b/include/xrpl/protocol/Units.h new file mode 100644 index 0000000000..6b2ae67059 --- /dev/null +++ b/include/xrpl/protocol/Units.h @@ -0,0 +1,555 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2019 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ + +#ifndef PROTOCOL_UNITS_H_INCLUDED +#define PROTOCOL_UNITS_H_INCLUDED + +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +namespace ripple { + +namespace unit { + +/** "drops" are the smallest divisible amount of XRP. This is what most + of the code uses. */ +struct dropTag; +/** "fee levels" are used by the transaction queue to compare the relative + cost of transactions that require different levels of effort to process. + See also: src/ripple/app/misc/FeeEscalation.md#fee-level */ +struct feelevelTag; +/** unitless values are plain scalars wrapped in a ValueUnit. They are + used for calculations in this header. */ +struct unitlessTag; + +/** Units to represent basis points (bips) and 1/10 basis points */ +class BipsTag; +class TenthBipsTag; + +// These names don't have to be too descriptive, because we're in the "unit" +// namespace. + +template +concept Valid = std::is_class_v && std::is_object_v && + std::is_object_v; + +/** `Usable` is checked to ensure that only values with + known valid type tags can be used (sometimes transparently) in + non-unit contexts. At the time of implementation, this includes + all known tags, but more may be added in the future, and they + should not be added automatically unless determined to be + appropriate. +*/ +template +concept Usable = Valid && + (std::is_same_v || + std::is_same_v || + std::is_same_v || + std::is_same_v || + std::is_same_v); + +template +concept Compatible = Valid && std::is_arithmetic_v && + std::is_arithmetic_v && + std::is_convertible_v; + +template +concept Integral = std::is_integral_v; + +template +concept IntegralValue = Integral; + +template +concept CastableValue = IntegralValue && IntegralValue && + std::is_same_v; + +template +class ValueUnit : private boost::totally_ordered>, + private boost::additive>, + private boost::equality_comparable, T>, + private boost::dividable, T>, + private boost::modable, T>, + private boost::unit_steppable> +{ +public: + using unit_type = UnitTag; + using value_type = T; + +private: + value_type value_; + +public: + ValueUnit() = default; + constexpr ValueUnit(ValueUnit const& other) = default; + constexpr ValueUnit& + operator=(ValueUnit const& other) = default; + + constexpr explicit ValueUnit(beast::Zero) : value_(0) + { + } + + constexpr ValueUnit& + operator=(beast::Zero) + { + value_ = 0; + return *this; + } + + constexpr explicit ValueUnit(value_type value) : value_(value) + { + } + + constexpr ValueUnit& + operator=(value_type value) + { + value_ = value; + return *this; + } + + /** Instances with the same unit, and a type that is + "safe" to convert to this one can be converted + implicitly */ + template Other> + constexpr ValueUnit(ValueUnit const& value) + requires SafeToCast + : ValueUnit(safe_cast(value.value())) + { + } + + constexpr ValueUnit + operator+(value_type const& rhs) const + { + return ValueUnit{value_ + rhs}; + } + + friend constexpr ValueUnit + operator+(value_type lhs, ValueUnit const& rhs) + { + // addition is commutative + return rhs + lhs; + } + + constexpr ValueUnit + operator-(value_type const& rhs) const + { + return ValueUnit{value_ - rhs}; + } + + friend constexpr ValueUnit + operator-(value_type lhs, ValueUnit const& rhs) + { + // subtraction is NOT commutative, but (lhs + (-rhs)) is addition, which + // is + return -rhs + lhs; + } + + constexpr ValueUnit + operator*(value_type const& rhs) const + { + return ValueUnit{value_ * rhs}; + } + + friend constexpr ValueUnit + operator*(value_type lhs, ValueUnit const& rhs) + { + // multiplication is commutative + return rhs * lhs; + } + + constexpr value_type + operator/(ValueUnit const& rhs) const + { + return value_ / rhs.value_; + } + + ValueUnit& + operator+=(ValueUnit const& other) + { + value_ += other.value(); + return *this; + } + + ValueUnit& + operator-=(ValueUnit const& other) + { + value_ -= other.value(); + return *this; + } + + ValueUnit& + operator++() + { + ++value_; + return *this; + } + + ValueUnit& + operator--() + { + --value_; + return *this; + } + + ValueUnit& + operator*=(value_type const& rhs) + { + value_ *= rhs; + return *this; + } + + ValueUnit& + operator/=(value_type const& rhs) + { + value_ /= rhs; + return *this; + } + + template + ValueUnit& + operator%=(value_type const& rhs) + { + value_ %= rhs; + return *this; + } + + ValueUnit + operator-() const + { + static_assert( + std::is_signed_v, "- operator illegal on unsigned value types"); + return ValueUnit{-value_}; + } + + constexpr bool + operator==(ValueUnit const& other) const + { + return value_ == other.value_; + } + + template Other> + constexpr bool + operator==(ValueUnit const& other) const + { + return value_ == other.value(); + } + + constexpr bool + operator==(value_type other) const + { + return value_ == other; + } + + template Other> + constexpr bool + operator!=(ValueUnit const& other) const + { + return !operator==(other); + } + + constexpr bool + operator<(ValueUnit const& other) const + { + return value_ < other.value_; + } + + /** Returns true if the amount is not zero */ + explicit constexpr + operator bool() const noexcept + { + return value_ != 0; + } + + /** Return the sign of the amount */ + constexpr int + signum() const noexcept + { + return (value_ < 0) ? -1 : (value_ ? 1 : 0); + } + + /** Returns the number of drops */ + // TODO: Move this to a new class, maybe with the old "TaggedFee" name + constexpr value_type + fee() const + { + return value_; + } + + template + constexpr double + decimalFromReference(ValueUnit reference) const + { + return static_cast(value_) / reference.value(); + } + + // `Usable` is checked to ensure that only values with + // known valid type tags can be converted to JSON. At the time + // of implementation, that includes all known tags, but more may + // be added in the future. + Json::Value + jsonClipped() const + requires Usable + { + if constexpr (std::is_integral_v) + { + using jsontype = std::conditional_t< + std::is_signed_v, + Json::Int, + Json::UInt>; + + constexpr auto min = std::numeric_limits::min(); + constexpr auto max = std::numeric_limits::max(); + + if (value_ < min) + return min; + if (value_ > max) + return max; + return static_cast(value_); + } + else + { + return value_; + } + } + + /** Returns the underlying value. Code SHOULD NOT call this + function unless the type has been abstracted away, + e.g. in a templated function. + */ + constexpr value_type + value() const + { + return value_; + } + + friend std::istream& + operator>>(std::istream& s, ValueUnit& val) + { + s >> val.value_; + return s; + } +}; + +// Output Values as just their numeric value. +template +std::basic_ostream& +operator<<(std::basic_ostream& os, ValueUnit const& q) +{ + return os << q.value(); +} + +template +std::string +to_string(ValueUnit const& amount) +{ + return std::to_string(amount.value()); +} + +template +concept muldivSource = Valid && + std::is_convertible_v; + +template +concept muldivDest = muldivSource && // Dest is also a source + std::is_convertible_v && + sizeof(typename Dest::value_type) >= sizeof(std::uint64_t); + +template +concept muldivSources = muldivSource && muldivSource && + std::is_same_v; + +template +concept muldivable = muldivSources && muldivDest; +// Source and Dest can be the same by default + +template +concept muldivCommutable = muldivable && + !std::is_same_v; + +template +ValueUnit +scalar(T value) +{ + return ValueUnit{value}; +} + +template Dest> +std::optional +mulDivU(Source1 value, Dest mul, Source2 div) +{ + // values can never be negative in any context. + if (value.value() < 0 || mul.value() < 0 || div.value() < 0) + { + // split the asserts so if one hits, the user can tell which + // without a debugger. + XRPL_ASSERT( + value.value() >= 0, "ripple::unit::mulDivU : minimum value input"); + XRPL_ASSERT( + mul.value() >= 0, "ripple::unit::mulDivU : minimum mul input"); + XRPL_ASSERT( + div.value() > 0, "ripple::unit::mulDivU : minimum div input"); + return std::nullopt; + } + + using desttype = typename Dest::value_type; + constexpr auto max = std::numeric_limits::max(); + + // Shortcuts, since these happen a lot in the real world + if (value == div) + return mul; + if (mul.value() == div.value()) + { + if (value.value() > max) + return std::nullopt; + return Dest{static_cast(value.value())}; + } + + using namespace boost::multiprecision; + + uint128_t product; + product = multiply( + product, + static_cast(value.value()), + static_cast(mul.value())); + + auto quotient = product / div.value(); + + if (quotient > max) + return std::nullopt; + + return Dest{static_cast(quotient)}; +} + +} // namespace unit + +// Fee Levels +template +using FeeLevel = unit::ValueUnit; +using FeeLevel64 = FeeLevel; +using FeeLevelDouble = FeeLevel; + +// Basis points (Bips) +template +using Bips = unit::ValueUnit; +using Bips16 = Bips; +using Bips32 = Bips; +template +using TenthBips = unit::ValueUnit; +using TenthBips16 = TenthBips; +using TenthBips32 = TenthBips; + +template Dest> +std::optional +mulDiv(Source1 value, Dest mul, Source2 div) +{ + return unit::mulDivU(value, mul, div); +} + +template < + class Source1, + class Source2, + unit::muldivCommutable Dest> +std::optional +mulDiv(Dest value, Source1 mul, Source2 div) +{ + // Multiplication is commutative + return unit::mulDivU(mul, value, div); +} + +template +std::optional +mulDiv(std::uint64_t value, Dest mul, std::uint64_t div) +{ + // Give the scalars a non-tag so the + // unit-handling version gets called. + return unit::mulDivU(unit::scalar(value), mul, unit::scalar(div)); +} + +template +std::optional +mulDiv(Dest value, std::uint64_t mul, std::uint64_t div) +{ + // Multiplication is commutative + return mulDiv(mul, value, div); +} + +template Source2> +std::optional +mulDiv(Source1 value, std::uint64_t mul, Source2 div) +{ + // Give the scalars a dimensionless unit so the + // unit-handling version gets called. + auto unitresult = unit::mulDivU(value, unit::scalar(mul), div); + + if (!unitresult) + return std::nullopt; + + return unitresult->value(); +} + +template Source2> +std::optional +mulDiv(std::uint64_t value, Source1 mul, Source2 div) +{ + // Multiplication is commutative + return mulDiv(mul, value, div); +} + +template Src> +constexpr Dest +safe_cast(Src s) noexcept +{ + // Dest may not have an explicit value constructor + return Dest{safe_cast(s.value())}; +} + +template +constexpr Dest +safe_cast(Src s) noexcept +{ + // Dest may not have an explicit value constructor + return Dest{safe_cast(s)}; +} + +template Src> +constexpr Dest +unsafe_cast(Src s) noexcept +{ + // Dest may not have an explicit value constructor + return Dest{unsafe_cast(s.value())}; +} + +template +constexpr Dest +unsafe_cast(Src s) noexcept +{ + // Dest may not have an explicit value constructor + return Dest{unsafe_cast(s)}; +} + +} // namespace ripple + +#endif // PROTOCOL_UNITS_H_INCLUDED diff --git a/include/xrpl/protocol/XRPAmount.h b/include/xrpl/protocol/XRPAmount.h index 332735dc6f..a7a013d625 100644 --- a/include/xrpl/protocol/XRPAmount.h +++ b/include/xrpl/protocol/XRPAmount.h @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include @@ -42,7 +42,7 @@ class XRPAmount : private boost::totally_ordered, private boost::additive { public: - using unit_type = feeunit::dropTag; + using unit_type = unit::dropTag; using value_type = std::int64_t; private: diff --git a/include/xrpl/protocol/detail/sfields.macro b/include/xrpl/protocol/detail/sfields.macro index 96192324fd..10fe015dac 100644 --- a/include/xrpl/protocol/detail/sfields.macro +++ b/include/xrpl/protocol/detail/sfields.macro @@ -174,7 +174,8 @@ TYPED_SFIELD(sfNFTokenID, UINT256, 10) TYPED_SFIELD(sfEmitParentTxnID, UINT256, 11) TYPED_SFIELD(sfEmitNonce, UINT256, 12) TYPED_SFIELD(sfEmitHookHash, UINT256, 13) -TYPED_SFIELD(sfAMMID, UINT256, 14) +TYPED_SFIELD(sfAMMID, UINT256, 14, + SField::sMD_PseudoAccount | SField::sMD_Default) // 256-bit (uncommon) TYPED_SFIELD(sfBookDirectory, UINT256, 16) @@ -196,7 +197,8 @@ TYPED_SFIELD(sfHookHash, UINT256, 31) TYPED_SFIELD(sfHookNamespace, UINT256, 32) TYPED_SFIELD(sfHookSetTxnID, UINT256, 33) TYPED_SFIELD(sfDomainID, UINT256, 34) -TYPED_SFIELD(sfVaultID, UINT256, 35) +TYPED_SFIELD(sfVaultID, UINT256, 35, + SField::sMD_PseudoAccount | SField::sMD_Default) TYPED_SFIELD(sfParentBatchID, UINT256, 36) // number (common) diff --git a/include/xrpl/protocol/detail/transactions.macro b/include/xrpl/protocol/detail/transactions.macro index 3aaa5a40a3..3ea4a3bbec 100644 --- a/include/xrpl/protocol/detail/transactions.macro +++ b/include/xrpl/protocol/detail/transactions.macro @@ -22,16 +22,31 @@ #endif /** - * TRANSACTION(tag, value, name, delegatable, amendments, fields) + * TRANSACTION(tag, value, name, delegatable, amendments, privileges, fields) + * + * To ease maintenance, you may replace any unneeded values with "..." + * e.g. #define TRANSACTION(tag, value, name, ...) * * You must define a transactor class in the `ripple` namespace named `name`, - * and include its header in `src/xrpld/app/tx/detail/applySteps.cpp`. + * and include its header alongside the TRANSACTOR definition using this + * format: + * #if TRANSACTION_INCLUDE + * # include + * #endif + * + * The `privileges` parameter of the TRANSACTION macro is a bitfield + * defining which operations the transaction can perform. + * The values are defined and used in InvariantCheck.cpp */ /** This transaction type executes a payment. */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttPAYMENT, 0, Payment, Delegation::delegatable, uint256{}, + createAcct, ({ {sfDestination, soeREQUIRED}, {sfAmount, soeREQUIRED, soeMPTSupported}, @@ -45,9 +60,13 @@ TRANSACTION(ttPAYMENT, 0, Payment, })) /** This transaction type creates an escrow object. */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttESCROW_CREATE, 1, EscrowCreate, Delegation::delegatable, uint256{}, + noPriv, ({ {sfDestination, soeREQUIRED}, {sfAmount, soeREQUIRED, soeMPTSupported}, @@ -61,6 +80,7 @@ TRANSACTION(ttESCROW_CREATE, 1, EscrowCreate, TRANSACTION(ttESCROW_FINISH, 2, EscrowFinish, Delegation::delegatable, uint256{}, + noPriv, ({ {sfOwner, soeREQUIRED}, {sfOfferSequence, soeREQUIRED}, @@ -71,9 +91,13 @@ TRANSACTION(ttESCROW_FINISH, 2, EscrowFinish, /** This transaction type adjusts various account settings. */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttACCOUNT_SET, 3, AccountSet, Delegation::notDelegatable, uint256{}, + noPriv, ({ {sfEmailHash, soeOPTIONAL}, {sfWalletLocator, soeOPTIONAL}, @@ -88,18 +112,26 @@ TRANSACTION(ttACCOUNT_SET, 3, AccountSet, })) /** This transaction type cancels an existing escrow. */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttESCROW_CANCEL, 4, EscrowCancel, Delegation::delegatable, uint256{}, + noPriv, ({ {sfOwner, soeREQUIRED}, {sfOfferSequence, soeREQUIRED}, })) /** This transaction type sets or clears an account's "regular key". */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttREGULAR_KEY_SET, 5, SetRegularKey, Delegation::notDelegatable, uint256{}, + noPriv, ({ {sfRegularKey, soeOPTIONAL}, })) @@ -107,9 +139,13 @@ TRANSACTION(ttREGULAR_KEY_SET, 5, SetRegularKey, // 6 deprecated /** This transaction type creates an offer to trade one asset for another. */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttOFFER_CREATE, 7, OfferCreate, Delegation::delegatable, uint256{}, + noPriv, ({ {sfTakerPays, soeREQUIRED}, {sfTakerGets, soeREQUIRED}, @@ -119,9 +155,13 @@ TRANSACTION(ttOFFER_CREATE, 7, OfferCreate, })) /** This transaction type cancels existing offers to trade one asset for another. */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttOFFER_CANCEL, 8, OfferCancel, Delegation::delegatable, uint256{}, + noPriv, ({ {sfOfferSequence, soeREQUIRED}, })) @@ -129,9 +169,13 @@ TRANSACTION(ttOFFER_CANCEL, 8, OfferCancel, // 9 deprecated /** This transaction type creates a new set of tickets. */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttTICKET_CREATE, 10, TicketCreate, Delegation::delegatable, featureTicketBatch, + noPriv, ({ {sfTicketCount, soeREQUIRED}, })) @@ -141,18 +185,26 @@ TRANSACTION(ttTICKET_CREATE, 10, TicketCreate, /** This transaction type modifies the signer list associated with an account. */ // The SignerEntries are optional because a SignerList is deleted by // setting the SignerQuorum to zero and omitting SignerEntries. +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttSIGNER_LIST_SET, 12, SignerListSet, Delegation::notDelegatable, uint256{}, + noPriv, ({ {sfSignerQuorum, soeREQUIRED}, {sfSignerEntries, soeOPTIONAL}, })) /** This transaction type creates a new unidirectional XRP payment channel. */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttPAYCHAN_CREATE, 13, PaymentChannelCreate, Delegation::delegatable, uint256{}, + noPriv, ({ {sfDestination, soeREQUIRED}, {sfAmount, soeREQUIRED}, @@ -166,6 +218,7 @@ TRANSACTION(ttPAYCHAN_CREATE, 13, PaymentChannelCreate, TRANSACTION(ttPAYCHAN_FUND, 14, PaymentChannelFund, Delegation::delegatable, uint256{}, + noPriv, ({ {sfChannel, soeREQUIRED}, {sfAmount, soeREQUIRED}, @@ -176,6 +229,7 @@ TRANSACTION(ttPAYCHAN_FUND, 14, PaymentChannelFund, TRANSACTION(ttPAYCHAN_CLAIM, 15, PaymentChannelClaim, Delegation::delegatable, uint256{}, + noPriv, ({ {sfChannel, soeREQUIRED}, {sfAmount, soeOPTIONAL}, @@ -186,9 +240,13 @@ TRANSACTION(ttPAYCHAN_CLAIM, 15, PaymentChannelClaim, })) /** This transaction type creates a new check. */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttCHECK_CREATE, 16, CheckCreate, Delegation::delegatable, featureChecks, + noPriv, ({ {sfDestination, soeREQUIRED}, {sfSendMax, soeREQUIRED}, @@ -198,9 +256,13 @@ TRANSACTION(ttCHECK_CREATE, 16, CheckCreate, })) /** This transaction type cashes an existing check. */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttCHECK_CASH, 17, CheckCash, Delegation::delegatable, featureChecks, + noPriv, ({ {sfCheckID, soeREQUIRED}, {sfAmount, soeOPTIONAL}, @@ -208,17 +270,25 @@ TRANSACTION(ttCHECK_CASH, 17, CheckCash, })) /** This transaction type cancels an existing check. */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttCHECK_CANCEL, 18, CheckCancel, Delegation::delegatable, featureChecks, + noPriv, ({ {sfCheckID, soeREQUIRED}, })) /** This transaction type grants or revokes authorization to transfer funds. */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttDEPOSIT_PREAUTH, 19, DepositPreauth, Delegation::delegatable, featureDepositPreauth, + noPriv, ({ {sfAuthorize, soeOPTIONAL}, {sfUnauthorize, soeOPTIONAL}, @@ -227,9 +297,13 @@ TRANSACTION(ttDEPOSIT_PREAUTH, 19, DepositPreauth, })) /** This transaction type modifies a trustline between two accounts. */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttTRUST_SET, 20, TrustSet, Delegation::delegatable, uint256{}, + noPriv, ({ {sfLimitAmount, soeOPTIONAL}, {sfQualityIn, soeOPTIONAL}, @@ -237,9 +311,13 @@ TRANSACTION(ttTRUST_SET, 20, TrustSet, })) /** This transaction type deletes an existing account. */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttACCOUNT_DELETE, 21, AccountDelete, Delegation::notDelegatable, uint256{}, + mustDeleteAcct, ({ {sfDestination, soeREQUIRED}, {sfDestinationTag, soeOPTIONAL}, @@ -249,9 +327,13 @@ TRANSACTION(ttACCOUNT_DELETE, 21, AccountDelete, // 22 reserved /** This transaction mints a new NFT. */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttNFTOKEN_MINT, 25, NFTokenMint, Delegation::delegatable, featureNonFungibleTokensV1, + changeNFTCounts, ({ {sfNFTokenTaxon, soeREQUIRED}, {sfTransferFee, soeOPTIONAL}, @@ -263,18 +345,26 @@ TRANSACTION(ttNFTOKEN_MINT, 25, NFTokenMint, })) /** This transaction burns (i.e. destroys) an existing NFT. */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttNFTOKEN_BURN, 26, NFTokenBurn, Delegation::delegatable, featureNonFungibleTokensV1, + changeNFTCounts, ({ {sfNFTokenID, soeREQUIRED}, {sfOwner, soeOPTIONAL}, })) /** This transaction creates a new offer to buy or sell an NFT. */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttNFTOKEN_CREATE_OFFER, 27, NFTokenCreateOffer, Delegation::delegatable, featureNonFungibleTokensV1, + noPriv, ({ {sfNFTokenID, soeREQUIRED}, {sfAmount, soeREQUIRED}, @@ -284,17 +374,25 @@ TRANSACTION(ttNFTOKEN_CREATE_OFFER, 27, NFTokenCreateOffer, })) /** This transaction cancels an existing offer to buy or sell an existing NFT. */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttNFTOKEN_CANCEL_OFFER, 28, NFTokenCancelOffer, Delegation::delegatable, featureNonFungibleTokensV1, + noPriv, ({ {sfNFTokenOffers, soeREQUIRED}, })) /** This transaction accepts an existing offer to buy or sell an existing NFT. */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttNFTOKEN_ACCEPT_OFFER, 29, NFTokenAcceptOffer, Delegation::delegatable, featureNonFungibleTokensV1, + noPriv, ({ {sfNFTokenBuyOffer, soeOPTIONAL}, {sfNFTokenSellOffer, soeOPTIONAL}, @@ -302,18 +400,26 @@ TRANSACTION(ttNFTOKEN_ACCEPT_OFFER, 29, NFTokenAcceptOffer, })) /** This transaction claws back issued tokens. */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttCLAWBACK, 30, Clawback, Delegation::delegatable, featureClawback, + noPriv, ({ {sfAmount, soeREQUIRED, soeMPTSupported}, {sfHolder, soeOPTIONAL}, })) /** This transaction claws back tokens from an AMM pool. */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttAMM_CLAWBACK, 31, AMMClawback, Delegation::delegatable, featureAMMClawback, + mayDeleteAcct | overrideFreeze, ({ {sfHolder, soeREQUIRED}, {sfAsset, soeREQUIRED}, @@ -322,9 +428,13 @@ TRANSACTION(ttAMM_CLAWBACK, 31, AMMClawback, })) /** This transaction type creates an AMM instance */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttAMM_CREATE, 35, AMMCreate, Delegation::delegatable, featureAMM, + createPseudoAcct, ({ {sfAmount, soeREQUIRED}, {sfAmount2, soeREQUIRED}, @@ -332,9 +442,13 @@ TRANSACTION(ttAMM_CREATE, 35, AMMCreate, })) /** This transaction type deposits into an AMM instance */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttAMM_DEPOSIT, 36, AMMDeposit, Delegation::delegatable, featureAMM, + noPriv, ({ {sfAsset, soeREQUIRED}, {sfAsset2, soeREQUIRED}, @@ -346,9 +460,13 @@ TRANSACTION(ttAMM_DEPOSIT, 36, AMMDeposit, })) /** This transaction type withdraws from an AMM instance */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttAMM_WITHDRAW, 37, AMMWithdraw, Delegation::delegatable, featureAMM, + mayDeleteAcct, ({ {sfAsset, soeREQUIRED}, {sfAsset2, soeREQUIRED}, @@ -359,9 +477,13 @@ TRANSACTION(ttAMM_WITHDRAW, 37, AMMWithdraw, })) /** This transaction type votes for the trading fee */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttAMM_VOTE, 38, AMMVote, Delegation::delegatable, featureAMM, + noPriv, ({ {sfAsset, soeREQUIRED}, {sfAsset2, soeREQUIRED}, @@ -369,9 +491,13 @@ TRANSACTION(ttAMM_VOTE, 38, AMMVote, })) /** This transaction type bids for the auction slot */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttAMM_BID, 39, AMMBid, Delegation::delegatable, featureAMM, + noPriv, ({ {sfAsset, soeREQUIRED}, {sfAsset2, soeREQUIRED}, @@ -381,18 +507,26 @@ TRANSACTION(ttAMM_BID, 39, AMMBid, })) /** This transaction type deletes AMM in the empty state */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttAMM_DELETE, 40, AMMDelete, Delegation::delegatable, featureAMM, + mustDeleteAcct, ({ {sfAsset, soeREQUIRED}, {sfAsset2, soeREQUIRED}, })) /** This transactions creates a crosschain sequence number */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttXCHAIN_CREATE_CLAIM_ID, 41, XChainCreateClaimID, Delegation::delegatable, featureXChainBridge, + noPriv, ({ {sfXChainBridge, soeREQUIRED}, {sfSignatureReward, soeREQUIRED}, @@ -403,6 +537,7 @@ TRANSACTION(ttXCHAIN_CREATE_CLAIM_ID, 41, XChainCreateClaimID, TRANSACTION(ttXCHAIN_COMMIT, 42, XChainCommit, Delegation::delegatable, featureXChainBridge, + noPriv, ({ {sfXChainBridge, soeREQUIRED}, {sfXChainClaimID, soeREQUIRED}, @@ -414,6 +549,7 @@ TRANSACTION(ttXCHAIN_COMMIT, 42, XChainCommit, TRANSACTION(ttXCHAIN_CLAIM, 43, XChainClaim, Delegation::delegatable, featureXChainBridge, + noPriv, ({ {sfXChainBridge, soeREQUIRED}, {sfXChainClaimID, soeREQUIRED}, @@ -426,6 +562,7 @@ TRANSACTION(ttXCHAIN_CLAIM, 43, XChainClaim, TRANSACTION(ttXCHAIN_ACCOUNT_CREATE_COMMIT, 44, XChainAccountCreateCommit, Delegation::delegatable, featureXChainBridge, + noPriv, ({ {sfXChainBridge, soeREQUIRED}, {sfDestination, soeREQUIRED}, @@ -437,6 +574,7 @@ TRANSACTION(ttXCHAIN_ACCOUNT_CREATE_COMMIT, 44, XChainAccountCreateCommit, TRANSACTION(ttXCHAIN_ADD_CLAIM_ATTESTATION, 45, XChainAddClaimAttestation, Delegation::delegatable, featureXChainBridge, + createAcct, ({ {sfXChainBridge, soeREQUIRED}, @@ -453,9 +591,11 @@ TRANSACTION(ttXCHAIN_ADD_CLAIM_ATTESTATION, 45, XChainAddClaimAttestation, })) /** This transaction adds an attestation to an account */ -TRANSACTION(ttXCHAIN_ADD_ACCOUNT_CREATE_ATTESTATION, 46, XChainAddAccountCreateAttestation, +TRANSACTION(ttXCHAIN_ADD_ACCOUNT_CREATE_ATTESTATION, 46, + XChainAddAccountCreateAttestation, Delegation::delegatable, featureXChainBridge, + createAcct, ({ {sfXChainBridge, soeREQUIRED}, @@ -476,6 +616,7 @@ TRANSACTION(ttXCHAIN_ADD_ACCOUNT_CREATE_ATTESTATION, 46, XChainAddAccountCreateA TRANSACTION(ttXCHAIN_MODIFY_BRIDGE, 47, XChainModifyBridge, Delegation::delegatable, featureXChainBridge, + noPriv, ({ {sfXChainBridge, soeREQUIRED}, {sfSignatureReward, soeOPTIONAL}, @@ -486,6 +627,7 @@ TRANSACTION(ttXCHAIN_MODIFY_BRIDGE, 47, XChainModifyBridge, TRANSACTION(ttXCHAIN_CREATE_BRIDGE, 48, XChainCreateBridge, Delegation::delegatable, featureXChainBridge, + noPriv, ({ {sfXChainBridge, soeREQUIRED}, {sfSignatureReward, soeREQUIRED}, @@ -493,9 +635,13 @@ TRANSACTION(ttXCHAIN_CREATE_BRIDGE, 48, XChainCreateBridge, })) /** This transaction type creates or updates a DID */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttDID_SET, 49, DIDSet, Delegation::delegatable, featureDID, + noPriv, ({ {sfDIDDocument, soeOPTIONAL}, {sfURI, soeOPTIONAL}, @@ -506,12 +652,17 @@ TRANSACTION(ttDID_SET, 49, DIDSet, TRANSACTION(ttDID_DELETE, 50, DIDDelete, Delegation::delegatable, featureDID, + noPriv, ({})) /** This transaction type creates an Oracle instance */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttORACLE_SET, 51, OracleSet, Delegation::delegatable, featurePriceOracle, + noPriv, ({ {sfOracleDocumentID, soeREQUIRED}, {sfProvider, soeOPTIONAL}, @@ -522,26 +673,38 @@ TRANSACTION(ttORACLE_SET, 51, OracleSet, })) /** This transaction type deletes an Oracle instance */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttORACLE_DELETE, 52, OracleDelete, Delegation::delegatable, featurePriceOracle, + noPriv, ({ {sfOracleDocumentID, soeREQUIRED}, })) /** This transaction type fixes a problem in the ledger state */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttLEDGER_STATE_FIX, 53, LedgerStateFix, Delegation::delegatable, fixNFTokenPageLinks, + noPriv, ({ {sfLedgerFixType, soeREQUIRED}, {sfOwner, soeOPTIONAL}, })) /** This transaction type creates a MPTokensIssuance instance */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttMPTOKEN_ISSUANCE_CREATE, 54, MPTokenIssuanceCreate, Delegation::delegatable, featureMPTokensV1, + createMPTIssuance, ({ {sfAssetScale, soeOPTIONAL}, {sfTransferFee, soeOPTIONAL}, @@ -552,17 +715,25 @@ TRANSACTION(ttMPTOKEN_ISSUANCE_CREATE, 54, MPTokenIssuanceCreate, })) /** This transaction type destroys a MPTokensIssuance instance */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttMPTOKEN_ISSUANCE_DESTROY, 55, MPTokenIssuanceDestroy, Delegation::delegatable, featureMPTokensV1, + destroyMPTIssuance, ({ {sfMPTokenIssuanceID, soeREQUIRED}, })) /** This transaction type sets flags on a MPTokensIssuance or MPToken instance */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttMPTOKEN_ISSUANCE_SET, 56, MPTokenIssuanceSet, Delegation::delegatable, featureMPTokensV1, + noPriv, ({ {sfMPTokenIssuanceID, soeREQUIRED}, {sfHolder, soeOPTIONAL}, @@ -573,18 +744,26 @@ TRANSACTION(ttMPTOKEN_ISSUANCE_SET, 56, MPTokenIssuanceSet, })) /** This transaction type authorizes a MPToken instance */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttMPTOKEN_AUTHORIZE, 57, MPTokenAuthorize, Delegation::delegatable, featureMPTokensV1, + mustAuthorizeMPT, ({ {sfMPTokenIssuanceID, soeREQUIRED}, {sfHolder, soeOPTIONAL}, })) /** This transaction type create an Credential instance */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttCREDENTIAL_CREATE, 58, CredentialCreate, Delegation::delegatable, featureCredentials, + noPriv, ({ {sfSubject, soeREQUIRED}, {sfCredentialType, soeREQUIRED}, @@ -596,6 +775,7 @@ TRANSACTION(ttCREDENTIAL_CREATE, 58, CredentialCreate, TRANSACTION(ttCREDENTIAL_ACCEPT, 59, CredentialAccept, Delegation::delegatable, featureCredentials, + noPriv, ({ {sfIssuer, soeREQUIRED}, {sfCredentialType, soeREQUIRED}, @@ -605,6 +785,7 @@ TRANSACTION(ttCREDENTIAL_ACCEPT, 59, CredentialAccept, TRANSACTION(ttCREDENTIAL_DELETE, 60, CredentialDelete, Delegation::delegatable, featureCredentials, + noPriv, ({ {sfSubject, soeOPTIONAL}, {sfIssuer, soeOPTIONAL}, @@ -612,9 +793,13 @@ TRANSACTION(ttCREDENTIAL_DELETE, 60, CredentialDelete, })) /** This transaction type modify a NFToken */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttNFTOKEN_MODIFY, 61, NFTokenModify, Delegation::delegatable, featureDynamicNFT, + noPriv, ({ {sfNFTokenID, soeREQUIRED}, {sfOwner, soeOPTIONAL}, @@ -622,35 +807,51 @@ TRANSACTION(ttNFTOKEN_MODIFY, 61, NFTokenModify, })) /** This transaction type creates or modifies a Permissioned Domain */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttPERMISSIONED_DOMAIN_SET, 62, PermissionedDomainSet, Delegation::delegatable, featurePermissionedDomains, + noPriv, ({ {sfDomainID, soeOPTIONAL}, {sfAcceptedCredentials, soeREQUIRED}, })) /** This transaction type deletes a Permissioned Domain */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttPERMISSIONED_DOMAIN_DELETE, 63, PermissionedDomainDelete, Delegation::delegatable, featurePermissionedDomains, + noPriv, ({ {sfDomainID, soeREQUIRED}, })) /** This transaction type delegates authorized account specified permissions */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttDELEGATE_SET, 64, DelegateSet, Delegation::notDelegatable, featurePermissionDelegation, + noPriv, ({ {sfAuthorize, soeREQUIRED}, {sfPermissions, soeREQUIRED}, })) /** This transaction creates a single asset vault. */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttVAULT_CREATE, 65, VaultCreate, Delegation::delegatable, featureSingleAssetVault, + createPseudoAcct | createMPTIssuance, ({ {sfAsset, soeREQUIRED, soeMPTSupported}, {sfAssetsMaximum, soeOPTIONAL}, @@ -662,9 +863,13 @@ TRANSACTION(ttVAULT_CREATE, 65, VaultCreate, })) /** This transaction updates a single asset vault. */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttVAULT_SET, 66, VaultSet, Delegation::delegatable, featureSingleAssetVault, + noPriv, ({ {sfVaultID, soeREQUIRED}, {sfAssetsMaximum, soeOPTIONAL}, @@ -673,26 +878,38 @@ TRANSACTION(ttVAULT_SET, 66, VaultSet, })) /** This transaction deletes a single asset vault. */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttVAULT_DELETE, 67, VaultDelete, Delegation::delegatable, featureSingleAssetVault, + mustDeleteAcct | destroyMPTIssuance, ({ {sfVaultID, soeREQUIRED}, })) /** This transaction trades assets for shares with a vault. */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttVAULT_DEPOSIT, 68, VaultDeposit, Delegation::delegatable, featureSingleAssetVault, + mayAuthorizeMPT, ({ {sfVaultID, soeREQUIRED}, {sfAmount, soeREQUIRED, soeMPTSupported}, })) /** This transaction trades shares for assets with a vault. */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttVAULT_WITHDRAW, 69, VaultWithdraw, Delegation::delegatable, featureSingleAssetVault, + mayDeleteMPT, ({ {sfVaultID, soeREQUIRED}, {sfAmount, soeREQUIRED, soeMPTSupported}, @@ -701,9 +918,13 @@ TRANSACTION(ttVAULT_WITHDRAW, 69, VaultWithdraw, })) /** This transaction claws back tokens from a vault. */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttVAULT_CLAWBACK, 70, VaultClawback, Delegation::delegatable, featureSingleAssetVault, + mayDeleteMPT, ({ {sfVaultID, soeREQUIRED}, {sfHolder, soeREQUIRED}, @@ -711,9 +932,13 @@ TRANSACTION(ttVAULT_CLAWBACK, 70, VaultClawback, })) /** This transaction type batches together transactions. */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttBATCH, 71, Batch, Delegation::notDelegatable, featureBatch, + noPriv, ({ {sfRawTransactions, soeREQUIRED}, {sfBatchSigners, soeOPTIONAL}, @@ -723,9 +948,13 @@ TRANSACTION(ttBATCH, 71, Batch, For details, see: https://xrpl.org/amendments.html */ +#if TRANSACTION_INCLUDE +# include +#endif TRANSACTION(ttAMENDMENT, 100, EnableAmendment, Delegation::notDelegatable, uint256{}, + noPriv, ({ {sfLedgerSequence, soeREQUIRED}, {sfAmendment, soeREQUIRED}, @@ -737,6 +966,7 @@ TRANSACTION(ttAMENDMENT, 100, EnableAmendment, TRANSACTION(ttFEE, 101, SetFee, Delegation::notDelegatable, uint256{}, + noPriv, ({ {sfLedgerSequence, soeOPTIONAL}, // Old version uses raw numbers @@ -757,6 +987,7 @@ TRANSACTION(ttFEE, 101, SetFee, TRANSACTION(ttUNL_MODIFY, 102, UNLModify, Delegation::notDelegatable, uint256{}, + noPriv, ({ {sfUNLModifyDisabling, soeREQUIRED}, {sfLedgerSequence, soeREQUIRED}, diff --git a/include/xrpl/protocol/jss.h b/include/xrpl/protocol/jss.h index d847cf6012..8609aedaef 100644 --- a/include/xrpl/protocol/jss.h +++ b/include/xrpl/protocol/jss.h @@ -722,11 +722,11 @@ JSS(write_load); // out: GetCounts #pragma push_macro("LEDGER_ENTRY_DUPLICATE") #undef LEDGER_ENTRY_DUPLICATE -#define LEDGER_ENTRY(tag, value, name, rpcName, fields) \ - JSS(name); \ +#define LEDGER_ENTRY(tag, value, name, rpcName, ...) \ + JSS(name); \ JSS(rpcName); -#define LEDGER_ENTRY_DUPLICATE(tag, value, name, rpcName, fields) JSS(rpcName); +#define LEDGER_ENTRY_DUPLICATE(tag, value, name, rpcName, ...) JSS(rpcName); #include diff --git a/src/libxrpl/ledger/View.cpp b/src/libxrpl/ledger/View.cpp index 45aded0030..3e27741c2f 100644 --- a/src/libxrpl/ledger/View.cpp +++ b/src/libxrpl/ledger/View.cpp @@ -626,8 +626,8 @@ xrpLiquid( std::uint32_t const ownerCount = confineOwnerCount( view.ownerCountHook(id, sle->getFieldU32(sfOwnerCount)), ownerCountAdj); - // AMMs have no reserve requirement - auto const reserve = sle->isFieldPresent(sfAMMID) + // Pseudo-accounts have no reserve requirement + auto const reserve = isPseudoAccount(sle) ? XRPAmount{0} : view.fees().accountReserve(ownerCount); @@ -1039,7 +1039,7 @@ adjustOwnerCount( AccountID const id = (*sle)[sfAccount]; std::uint32_t const adjusted = confineOwnerCount(current, amount, id, j); view.adjustOwnerCountHook(id, current, adjusted); - sle->setFieldU32(sfOwnerCount, adjusted); + sle->at(sfOwnerCount) = adjusted; view.update(sle); } @@ -1079,15 +1079,51 @@ pseudoAccountAddress(ReadView const& view, uint256 const& pseudoOwnerKey) return beast::zero; } -// Note, the list of the pseudo-account designator fields below MUST be -// maintained but it does NOT need to be amendment-gated, since a -// non-active amendment will not set any field, by definition. Specific -// properties of a pseudo-account are NOT checked here, that's what +// Pseudo-account designator fields MUST be maintained by including the +// SField::sMD_PseudoAccount flag in the SField definition. (Don't forget to +// "| SField::sMD_Default"!) The fields do NOT need to be amendment-gated, +// since a non-active amendment will not set any field, by definition. +// Specific properties of a pseudo-account are NOT checked here, that's what // InvariantCheck is for. -static std::array const pseudoAccountOwnerFields = { - &sfAMMID, // - &sfVaultID, // -}; +[[nodiscard]] std::vector const& +getPseudoAccountFields() +{ + static std::vector const pseudoFields = []() { + auto const ar = LedgerFormats::getInstance().findByType(ltACCOUNT_ROOT); + if (!ar) + { + // LCOV_EXCL_START + LogicError( + "ripple::isPseudoAccount : unable to find account root ledger " + "format"); + // LCOV_EXCL_STOP + } + auto const& soTemplate = ar->getSOTemplate(); + + std::vector pseudoFields; + for (auto const& field : soTemplate) + { + if (field.sField().shouldMeta(SField::sMD_PseudoAccount)) + pseudoFields.emplace_back(&field.sField()); + } + return pseudoFields; + }(); + return pseudoFields; +} + +[[nodiscard]] bool +isPseudoAccount(std::shared_ptr sleAcct) +{ + auto const& fields = getPseudoAccountFields(); + + // Intentionally use defensive coding here because it's cheap and makes the + // semantics of true return value clean. + return sleAcct && sleAcct->getType() == ltACCOUNT_ROOT && + std::count_if( + fields.begin(), fields.end(), [&sleAcct](SField const* sf) -> bool { + return sleAcct->isFieldPresent(*sf); + }) > 0; +} Expected, TER> createPseudoAccount( @@ -1095,10 +1131,11 @@ createPseudoAccount( uint256 const& pseudoOwnerKey, SField const& ownerField) { + auto const& fields = getPseudoAccountFields(); XRPL_ASSERT( std::count_if( - pseudoAccountOwnerFields.begin(), - pseudoAccountOwnerFields.end(), + fields.begin(), + fields.end(), [&ownerField](SField const* sf) -> bool { return *sf == ownerField; }) == 1, @@ -1134,18 +1171,42 @@ createPseudoAccount( return account; } -[[nodiscard]] bool -isPseudoAccount(std::shared_ptr sleAcct) +[[nodiscard]] TER +canAddHolding(ReadView const& view, Issue const& issue) { - // Intentionally use defensive coding here because it's cheap and makes the - // semantics of true return value clean. - return sleAcct && sleAcct->getType() == ltACCOUNT_ROOT && - std::count_if( - pseudoAccountOwnerFields.begin(), - pseudoAccountOwnerFields.end(), - [&sleAcct](SField const* sf) -> bool { - return sleAcct->isFieldPresent(*sf); - }) > 0; + if (issue.native()) + return tesSUCCESS; // No special checks for XRP + + auto const issuer = view.read(keylet::account(issue.getIssuer())); + if (!issuer) + return terNO_ACCOUNT; + else if (!issuer->isFlag(lsfDefaultRipple)) + return terNO_RIPPLE; + + return tesSUCCESS; +} + +[[nodiscard]] TER +canAddHolding(ReadView const& view, MPTIssue const& mptIssue) +{ + auto mptID = mptIssue.getMptID(); + auto issuance = view.read(keylet::mptIssuance(mptID)); + if (!issuance) + return tecOBJECT_NOT_FOUND; + if (!issuance->isFlag(lsfMPTCanTransfer)) + return tecNO_AUTH; + + return tesSUCCESS; +} + +[[nodiscard]] TER +canAddHolding(ReadView const& view, Asset const& asset) +{ + return std::visit( + [&](TIss const& issue) -> TER { + return canAddHolding(view, issue); + }, + asset.value()); } [[nodiscard]] TER diff --git a/src/libxrpl/protocol/SField.cpp b/src/libxrpl/protocol/SField.cpp index 1ffce099b8..57e5849c00 100644 --- a/src/libxrpl/protocol/SField.cpp +++ b/src/libxrpl/protocol/SField.cpp @@ -17,6 +17,7 @@ */ //============================================================================== +#include #include #include @@ -27,7 +28,8 @@ namespace ripple { // Storage for static const members. SField::IsSigning const SField::notSigning; int SField::num = 0; -std::map SField::knownCodeToField; +std::unordered_map SField::knownCodeToField; +std::unordered_map SField::knownNameToField; // Give only this translation unit permission to construct SFields struct SField::private_access_tag_t @@ -45,7 +47,7 @@ TypedField::TypedField(private_access_tag_t pat, Args&&... args) } // Construct all compile-time SFields, and register them in the knownCodeToField -// database: +// and knownNameToField databases: // Use macros for most SField construction to enforce naming conventions. #pragma push_macro("UNTYPED_SFIELD") @@ -69,8 +71,8 @@ TypedField::TypedField(private_access_tag_t pat, Args&&... args) ##__VA_ARGS__); // SFields which, for historical reasons, do not follow naming conventions. -SField const sfInvalid(access, -1); -SField const sfGeneric(access, 0); +SField const sfInvalid(access, -1, ""); +SField const sfGeneric(access, 0, "Generic"); // The following two fields aren't used anywhere, but they break tests/have // downstream effects. SField const sfHash(access, STI_UINT256, 257, "hash"); @@ -99,19 +101,34 @@ SField::SField( , signingField(signing) , jsonName(fieldName.c_str()) { + XRPL_ASSERT( + !knownCodeToField.contains(fieldCode), + "ripple::SField::SField(tid,fv,fn,meta,signing) : fieldCode is unique"); + XRPL_ASSERT( + !knownNameToField.contains(fieldName), + "ripple::SField::SField(tid,fv,fn,meta,signing) : fieldName is unique"); knownCodeToField[fieldCode] = this; + knownNameToField[fieldName] = this; } -SField::SField(private_access_tag_t, int fc) +SField::SField(private_access_tag_t, int fc, char const* fn) : fieldCode(fc) , fieldType(STI_UNKNOWN) , fieldValue(0) + , fieldName(fn) , fieldMeta(sMD_Never) , fieldNum(++num) , signingField(IsSigning::yes) , jsonName(fieldName.c_str()) { + XRPL_ASSERT( + !knownCodeToField.contains(fieldCode), + "ripple::SField::SField(fc,fn) : fieldCode is unique"); + XRPL_ASSERT( + !knownNameToField.contains(fieldName), + "ripple::SField::SField(fc,fn) : fieldName is unique"); knownCodeToField[fieldCode] = this; + knownNameToField[fieldName] = this; } SField const& @@ -145,11 +162,11 @@ SField::compare(SField const& f1, SField const& f2) SField const& SField::getField(std::string const& fieldName) { - for (auto const& [_, f] : knownCodeToField) + auto it = knownNameToField.find(fieldName); + + if (it != knownNameToField.end()) { - (void)_; - if (f->fieldName == fieldName) - return *f; + return *(it->second); } return sfInvalid; } diff --git a/src/libxrpl/protocol/TxFormats.cpp b/src/libxrpl/protocol/TxFormats.cpp index c10c023ee9..1966c29b51 100644 --- a/src/libxrpl/protocol/TxFormats.cpp +++ b/src/libxrpl/protocol/TxFormats.cpp @@ -55,7 +55,8 @@ TxFormats::TxFormats() #undef TRANSACTION #define UNWRAP(...) __VA_ARGS__ -#define TRANSACTION(tag, value, name, delegatable, amendment, fields) \ +#define TRANSACTION( \ + tag, value, name, delegatable, amendment, privileges, fields) \ add(jss::name, tag, UNWRAP fields, commonFields); #include diff --git a/src/test/app/AMMCalc_test.cpp b/src/test/app/AMMCalc_test.cpp index bebf2844b6..7349b38766 100644 --- a/src/test/app/AMMCalc_test.cpp +++ b/src/test/app/AMMCalc_test.cpp @@ -67,7 +67,7 @@ class AMMCalc_test : public beast::unit_test::suite // drops else if (match[1] == "XRPA") return XRPAmount{std::stoll(match[2])}; - return amountFromString(gw[match[1]], match[2]); + return amountFromString(gw[match[1]].asset(), match[2]); } return std::nullopt; } diff --git a/src/test/app/AMMExtended_test.cpp b/src/test/app/AMMExtended_test.cpp index cb937038fe..dcbebd2dc3 100644 --- a/src/test/app/AMMExtended_test.cpp +++ b/src/test/app/AMMExtended_test.cpp @@ -181,9 +181,9 @@ private: BEAST_EXPECT(expectLedgerEntryRoot( env, alice, XRP(20'000) - XRP(50) - txfee(env, 1))); - BEAST_EXPECT(expectLine(env, bob, USD1(100))); - BEAST_EXPECT(expectLine(env, bob, USD2(0))); - BEAST_EXPECT(expectLine(env, carol, USD2(50))); + BEAST_EXPECT(expectHolding(env, bob, USD1(100))); + BEAST_EXPECT(expectHolding(env, bob, USD2(0))); + BEAST_EXPECT(expectHolding(env, carol, USD2(50))); } } @@ -220,7 +220,7 @@ private: BEAST_EXPECT(expectLedgerEntryRoot( env, carol, XRP(30'000) - (txfee(env, 1)))); BEAST_EXPECT(expectOffers(env, carol, 0)); - BEAST_EXPECT(expectLine(env, carol, USD(30'000))); + BEAST_EXPECT(expectHolding(env, carol, USD(30'000))); // Order that can be filled env(offer(carol, XRP(100), USD(100)), @@ -230,7 +230,7 @@ private: XRP(10'000), USD(10'100), ammAlice.tokens())); BEAST_EXPECT(expectLedgerEntryRoot( env, carol, XRP(30'000) + XRP(100) - txfee(env, 2))); - BEAST_EXPECT(expectLine(env, carol, USD(29'900))); + BEAST_EXPECT(expectHolding(env, carol, USD(29'900))); BEAST_EXPECT(expectOffers(env, carol, 0)); }, {{XRP(10'100), USD(10'000)}}, @@ -254,7 +254,7 @@ private: BEAST_EXPECT(expectLedgerEntryRoot( env, carol, XRP(30'000) + XRP(100) - txfee(env, 1))); // AMM - BEAST_EXPECT(expectLine(env, carol, USD(29'900))); + BEAST_EXPECT(expectHolding(env, carol, USD(29'900))); BEAST_EXPECT(expectOffers(env, carol, 0)); }, {{XRP(10'100), USD(10'000)}}, @@ -327,7 +327,7 @@ private: USD(49), IOUAmount{273'861'278752583, -8})); - BEAST_EXPECT(expectLine(env, bob, STAmount{USD, 101})); + BEAST_EXPECT(expectHolding(env, bob, STAmount{USD, 101})); BEAST_EXPECT(expectLedgerEntryRoot( env, bob, XRP(300'000) - xrpTransferred - txfee(env, 1))); BEAST_EXPECT(expectOffers(env, bob, 0)); @@ -390,7 +390,7 @@ private: BEAST_EXPECT( ammBob.expectBalances(USD(300), XRP(1'000), ammBob.tokens())); - BEAST_EXPECT(expectLine(env, alice, USD(0))); + BEAST_EXPECT(expectHolding(env, alice, USD(0))); auto jrr = ledgerEntryRoot(env, alice); BEAST_EXPECT( @@ -423,7 +423,7 @@ private: BEAST_EXPECT(ammAlice.expectBalances( XRPAmount{9'900'990'100}, USD(10'100), ammAlice.tokens())); // initial 30,000 - 10,000AMM - 100pay - BEAST_EXPECT(expectLine(env, alice, USD(19'900))); + BEAST_EXPECT(expectHolding(env, alice, USD(19'900))); // initial 30,000 - 10,0000AMM + 99.009900pay - fee*3 BEAST_EXPECT(expectLedgerEntryRoot( env, @@ -453,7 +453,7 @@ private: env(pay(alice, bob, USD(100)), sendmax(XRP(100))); BEAST_EXPECT(ammAlice.expectBalances( XRP(10'100), USD(10'000), ammAlice.tokens())); - BEAST_EXPECT(expectLine(env, bob, USD(100))); + BEAST_EXPECT(expectHolding(env, bob, USD(100))); }, {{XRP(10'000), USD(10'100)}}, 0, @@ -533,7 +533,7 @@ private: STAmount{USD1, UINT64_C(5'030'181086519115), -12}, ammCarol.tokens())); BEAST_EXPECT(expectOffers(env, dan, 1, {{Amounts{XRP(200), EUR(20)}}})); - BEAST_EXPECT(expectLine(env, bob, STAmount{EUR1, 30})); + BEAST_EXPECT(expectHolding(env, bob, STAmount{EUR1, 30})); } void @@ -642,7 +642,7 @@ private: BEAST_EXPECT(ammAlice.expectBalances( XRP(10'000), USD(9'999), ammAlice.tokens())); BEAST_EXPECT(expectOffers(env, carol, 0)); - BEAST_EXPECT(expectLine(env, carol, USD(30'101))); + BEAST_EXPECT(expectHolding(env, carol, USD(30'101))); BEAST_EXPECT(expectLedgerEntryRoot( env, carol, XRP(30'000) - XRP(100) - txfee(env, 1))); }, @@ -682,7 +682,7 @@ private: env(offer(alice, USD(100), XRP(200)), json(jss::Flags, tfSell)); BEAST_EXPECT( ammBob.expectBalances(XRP(1'100), USD(2'000), ammBob.tokens())); - BEAST_EXPECT(expectLine(env, alice, USD(200))); + BEAST_EXPECT(expectHolding(env, alice, USD(200))); BEAST_EXPECT(expectLedgerEntryRoot(env, alice, XRP(250))); BEAST_EXPECT(expectOffers(env, alice, 0)); } @@ -733,7 +733,7 @@ private: STAmount(XTS, UINT64_C(101'010101010101), -12), XXX(99), ammAlice.tokens())); - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, bob, STAmount{XTS, UINT64_C(98'989898989899), -12})); } else @@ -742,10 +742,10 @@ private: STAmount(XTS, UINT64_C(101'0101010101011), -13), XXX(99), ammAlice.tokens())); - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, bob, STAmount{XTS, UINT64_C(98'9898989898989), -13})); } - BEAST_EXPECT(expectLine(env, bob, XXX(101))); + BEAST_EXPECT(expectHolding(env, bob, XXX(101))); } void @@ -783,8 +783,8 @@ private: XRP(10'100), USD(10'000), ammAlice.tokens())); BEAST_EXPECT(ammBob.expectBalances( XRP(10'000), EUR(10'100), ammBob.tokens())); - BEAST_EXPECT(expectLine(env, carol, USD(15'100))); - BEAST_EXPECT(expectLine(env, carol, EUR(14'900))); + BEAST_EXPECT(expectHolding(env, carol, USD(15'100))); + BEAST_EXPECT(expectHolding(env, carol, EUR(14'900))); BEAST_EXPECT(expectOffers(env, carol, 0)); } @@ -816,8 +816,8 @@ private: BEAST_EXPECT(ammAlice.expectBalances( XRP(10'100), USD(10'000), ammAlice.tokens())); - BEAST_EXPECT(expectLine(env, carol, USD(15'100))); - BEAST_EXPECT(expectLine(env, carol, EUR(14'900))); + BEAST_EXPECT(expectHolding(env, carol, USD(15'100))); + BEAST_EXPECT(expectHolding(env, carol, EUR(14'900))); BEAST_EXPECT(expectOffers(env, carol, 0)); BEAST_EXPECT(expectOffers(env, bob, 0)); } @@ -850,8 +850,8 @@ private: BEAST_EXPECT(ammBob.expectBalances( XRP(10'000), EUR(10'100), ammBob.tokens())); - BEAST_EXPECT(expectLine(env, carol, USD(15'100))); - BEAST_EXPECT(expectLine(env, carol, EUR(14'900))); + BEAST_EXPECT(expectHolding(env, carol, USD(15'100))); + BEAST_EXPECT(expectHolding(env, carol, EUR(14'900))); BEAST_EXPECT(expectOffers(env, carol, 0)); BEAST_EXPECT(expectOffers(env, alice, 0)); } @@ -894,7 +894,7 @@ private: XRP(20'220), STAmount{USD, UINT64_C(197'8239366963403), -13}, ammBob.tokens())); - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, alice, STAmount{USD, UINT64_C(1'002'17606330366), -11})); BEAST_EXPECT(expectOffers(env, alice, 0)); } @@ -912,7 +912,7 @@ private: XRP(21'500), STAmount{USD, UINT64_C(186'046511627907), -12}, ammBob.tokens())); - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, alice, STAmount{USD, UINT64_C(1'013'953488372093), -12})); BEAST_EXPECT(expectOffers(env, alice, 0)); } @@ -953,7 +953,7 @@ private: // AMM doesn't pay the transfer fee BEAST_EXPECT(ammAlice.expectBalances( XRP(10'100), USD(10'000), ammAlice.tokens())); - BEAST_EXPECT(expectLine(env, carol, USD(30'100))); + BEAST_EXPECT(expectHolding(env, carol, USD(30'100))); BEAST_EXPECT(expectOffers(env, carol, 0)); }, {{XRP(10'000), USD(10'100)}}, @@ -974,7 +974,7 @@ private: BEAST_EXPECT(ammAlice.expectBalances( XRP(10'000), USD(10'100), ammAlice.tokens())); // Carol pays 25% transfer fee - BEAST_EXPECT(expectLine(env, carol, USD(29'875))); + BEAST_EXPECT(expectHolding(env, carol, USD(29'875))); BEAST_EXPECT(expectOffers(env, carol, 0)); }, {{XRP(10'100), USD(10'000)}}, @@ -1011,9 +1011,9 @@ private: // AMM doesn't pay the transfer fee BEAST_EXPECT(ammAlice.expectBalances( XRP(10'100), USD(10'000), ammAlice.tokens())); - BEAST_EXPECT(expectLine(env, carol, USD(15'100))); + BEAST_EXPECT(expectHolding(env, carol, USD(15'100))); // Carol pays 25% transfer fee. - BEAST_EXPECT(expectLine(env, carol, EUR(14'875))); + BEAST_EXPECT(expectHolding(env, carol, EUR(14'875))); BEAST_EXPECT(expectOffers(env, carol, 0)); BEAST_EXPECT(expectOffers(env, bob, 0)); } @@ -1051,9 +1051,9 @@ private: // AMM doesn't pay the transfer fee BEAST_EXPECT(ammAlice.expectBalances( XRP(10'050), USD(10'000), ammAlice.tokens())); - BEAST_EXPECT(expectLine(env, carol, USD(15'050))); + BEAST_EXPECT(expectHolding(env, carol, USD(15'050))); // Carol pays 25% transfer fee. - BEAST_EXPECT(expectLine(env, carol, EUR(14'937.5))); + BEAST_EXPECT(expectHolding(env, carol, EUR(14'937.5))); BEAST_EXPECT(expectOffers(env, carol, 0)); BEAST_EXPECT( expectOffers(env, bob, 1, {{Amounts{EUR(50), XRP(50)}}})); @@ -1077,7 +1077,7 @@ private: env(pay(gw, carol, EUR(1'000)), sendmax(EUR(10'000))); env.close(); // 1000 / 0.8 - BEAST_EXPECT(expectLine(env, carol, EUR(1'250))); + BEAST_EXPECT(expectHolding(env, carol, EUR(1'250))); // The scenario: // o USD/XRP AMM is created. // o EUR/XRP Offer is created. @@ -1096,9 +1096,9 @@ private: // AMM doesn't pay the transfer fee BEAST_EXPECT(ammAlice.expectBalances( XRP(10'100), USD(10'000), ammAlice.tokens())); - BEAST_EXPECT(expectLine(env, carol, USD(100))); + BEAST_EXPECT(expectHolding(env, carol, USD(100))); // Carol pays 25% transfer fee: 1250 - 100(offer) - 25(transfer fee) - BEAST_EXPECT(expectLine(env, carol, EUR(1'125))); + BEAST_EXPECT(expectHolding(env, carol, EUR(1'125))); BEAST_EXPECT(expectOffers(env, carol, 0)); BEAST_EXPECT(expectOffers(env, bob, 0)); } @@ -1120,7 +1120,7 @@ private: env(pay(gw, alice, USD(11'000))); env(pay(gw, carol, EUR(1'000)), sendmax(EUR(10'000))); env.close(); - BEAST_EXPECT(expectLine(env, carol, EUR(1'000))); + BEAST_EXPECT(expectHolding(env, carol, EUR(1'000))); // The scenario: // o USD/XRP AMM is created. // o EUR/XRP Offer is created. @@ -1139,9 +1139,9 @@ private: // AMM pay doesn't transfer fee BEAST_EXPECT(ammAlice.expectBalances( XRP(10'100), USD(10'000), ammAlice.tokens())); - BEAST_EXPECT(expectLine(env, carol, USD(100))); + BEAST_EXPECT(expectHolding(env, carol, USD(100))); // Carol pays 25% transfer fee: 1000 - 100(offer) - 25(transfer fee) - BEAST_EXPECT(expectLine(env, carol, EUR(875))); + BEAST_EXPECT(expectHolding(env, carol, EUR(875))); BEAST_EXPECT(expectOffers(env, carol, 0)); BEAST_EXPECT(expectOffers(env, bob, 0)); } @@ -1170,7 +1170,7 @@ private: BEAST_EXPECT(ammBob.expectBalances( XRP(10'100), USD_bob(10'000), ammBob.tokens())); BEAST_EXPECT(expectOffers(env, alice, 0)); - BEAST_EXPECT(expectLine(env, alice, USD_bob(100))); + BEAST_EXPECT(expectHolding(env, alice, USD_bob(100))); } void @@ -1206,19 +1206,19 @@ private: env.close(); env(pay(dan, bob, D_BUX(100))); env.close(); - BEAST_EXPECT(expectLine(env, bob, D_BUX(100))); + BEAST_EXPECT(expectHolding(env, bob, D_BUX(100))); env(pay(ann, cam, D_BUX(60)), path(bob, dan), sendmax(A_BUX(200))); env.close(); - BEAST_EXPECT(expectLine(env, ann, A_BUX(none))); - BEAST_EXPECT(expectLine(env, ann, D_BUX(none))); - BEAST_EXPECT(expectLine(env, bob, A_BUX(72))); - BEAST_EXPECT(expectLine(env, bob, D_BUX(40))); - BEAST_EXPECT(expectLine(env, cam, A_BUX(none))); - BEAST_EXPECT(expectLine(env, cam, D_BUX(60))); - BEAST_EXPECT(expectLine(env, dan, A_BUX(none))); - BEAST_EXPECT(expectLine(env, dan, D_BUX(none))); + BEAST_EXPECT(expectHolding(env, ann, A_BUX(none))); + BEAST_EXPECT(expectHolding(env, ann, D_BUX(none))); + BEAST_EXPECT(expectHolding(env, bob, A_BUX(72))); + BEAST_EXPECT(expectHolding(env, bob, D_BUX(40))); + BEAST_EXPECT(expectHolding(env, cam, A_BUX(none))); + BEAST_EXPECT(expectHolding(env, cam, D_BUX(60))); + BEAST_EXPECT(expectHolding(env, dan, A_BUX(none))); + BEAST_EXPECT(expectHolding(env, dan, D_BUX(none))); AMM ammBob(env, bob, A_BUX(30), D_BUX(30)); @@ -1234,12 +1234,12 @@ private: BEAST_EXPECT( ammBob.expectBalances(A_BUX(30), D_BUX(30), ammBob.tokens())); - BEAST_EXPECT(expectLine(env, ann, A_BUX(none))); - BEAST_EXPECT(expectLine(env, ann, D_BUX(0))); - BEAST_EXPECT(expectLine(env, cam, A_BUX(none))); - BEAST_EXPECT(expectLine(env, cam, D_BUX(60))); - BEAST_EXPECT(expectLine(env, dan, A_BUX(0))); - BEAST_EXPECT(expectLine(env, dan, D_BUX(none))); + BEAST_EXPECT(expectHolding(env, ann, A_BUX(none))); + BEAST_EXPECT(expectHolding(env, ann, D_BUX(0))); + BEAST_EXPECT(expectHolding(env, cam, A_BUX(none))); + BEAST_EXPECT(expectHolding(env, cam, D_BUX(60))); + BEAST_EXPECT(expectHolding(env, dan, A_BUX(0))); + BEAST_EXPECT(expectHolding(env, dan, D_BUX(none))); } } @@ -1363,7 +1363,7 @@ private: env(pay(gw, bob, USD(50))); env.close(); - BEAST_EXPECT(expectLine(env, bob, USD(50))); + BEAST_EXPECT(expectHolding(env, bob, USD(50))); // Bob's offer should cross Alice's AMM env(offer(bob, XRP(50), USD(50))); @@ -1372,7 +1372,7 @@ private: BEAST_EXPECT( ammAlice.expectBalances(USD(1'050), XRP(1'000), ammAlice.tokens())); BEAST_EXPECT(expectOffers(env, bob, 0)); - BEAST_EXPECT(expectLine(env, bob, USD(0))); + BEAST_EXPECT(expectHolding(env, bob, USD(0))); } void @@ -1403,7 +1403,7 @@ private: env(pay(gw, bob, USD(50))); env.close(); - BEAST_EXPECT(expectLine(env, bob, USD(50))); + BEAST_EXPECT(expectHolding(env, bob, USD(50))); // Alice should not be able to create AMM without authorization. { @@ -1440,7 +1440,7 @@ private: BEAST_EXPECT( ammAlice.expectBalances(USD(1'050), XRP(1'000), ammAlice.tokens())); BEAST_EXPECT(expectOffers(env, bob, 0)); - BEAST_EXPECT(expectLine(env, bob, USD(0))); + BEAST_EXPECT(expectHolding(env, bob, USD(0))); } void @@ -1535,7 +1535,7 @@ private: // AMM offer is 51.282052XRP/11AUD, 11AUD/1.1 = 10AUD to bob BEAST_EXPECT( ammCarol.expectBalances(XRP(51), AUD(40), ammCarol.tokens())); - BEAST_EXPECT(expectLine(env, bob, AUD(10))); + BEAST_EXPECT(expectHolding(env, bob, AUD(10))); auto const result = find_paths(env, alice, bob, Account(bob)["USD"](25)); @@ -1950,10 +1950,10 @@ private: env(pay(alice, carol, USD(50)), path(~USD), sendmax(BTC(50))); - BEAST_EXPECT(expectLine(env, alice, BTC(50))); - BEAST_EXPECT(expectLine(env, bob, BTC(0))); - BEAST_EXPECT(expectLine(env, bob, USD(0))); - BEAST_EXPECT(expectLine(env, carol, USD(200))); + BEAST_EXPECT(expectHolding(env, alice, BTC(50))); + BEAST_EXPECT(expectHolding(env, bob, BTC(0))); + BEAST_EXPECT(expectHolding(env, bob, USD(0))); + BEAST_EXPECT(expectHolding(env, carol, USD(200))); BEAST_EXPECT( ammBob.expectBalances(BTC(150), USD(100), ammBob.tokens())); } @@ -1974,10 +1974,10 @@ private: env(pay(alice, carol, USD(50)), path(~XRP, ~USD), sendmax(BTC(50))); - BEAST_EXPECT(expectLine(env, alice, BTC(50))); - BEAST_EXPECT(expectLine(env, bob, BTC(0))); - BEAST_EXPECT(expectLine(env, bob, USD(0))); - BEAST_EXPECT(expectLine(env, carol, USD(200))); + BEAST_EXPECT(expectHolding(env, alice, BTC(50))); + BEAST_EXPECT(expectHolding(env, bob, BTC(0))); + BEAST_EXPECT(expectHolding(env, bob, USD(0))); + BEAST_EXPECT(expectHolding(env, carol, USD(200))); BEAST_EXPECT(ammBobBTC_XRP.expectBalances( BTC(150), XRP(100), ammBobBTC_XRP.tokens())); BEAST_EXPECT(ammBobXRP_USD.expectBalances( @@ -2003,8 +2003,8 @@ private: env, alice, xrpMinusFee(env, 10'000 - 50))); BEAST_EXPECT(expectLedgerEntryRoot( env, bob, XRP(10'000) - XRP(100) - ammCrtFee(env))); - BEAST_EXPECT(expectLine(env, bob, USD(0))); - BEAST_EXPECT(expectLine(env, carol, USD(200))); + BEAST_EXPECT(expectHolding(env, bob, USD(0))); + BEAST_EXPECT(expectHolding(env, carol, USD(200))); BEAST_EXPECT( ammBob.expectBalances(XRP(150), USD(100), ammBob.tokens())); } @@ -2024,10 +2024,10 @@ private: env(pay(alice, carol, XRP(50)), path(~XRP), sendmax(USD(50))); - BEAST_EXPECT(expectLine(env, alice, USD(50))); + BEAST_EXPECT(expectHolding(env, alice, USD(50))); BEAST_EXPECT(expectLedgerEntryRoot( env, bob, XRP(10'000) - XRP(150) - ammCrtFee(env))); - BEAST_EXPECT(expectLine(env, bob, USD(0))); + BEAST_EXPECT(expectHolding(env, bob, USD(0))); BEAST_EXPECT(expectLedgerEntryRoot(env, carol, XRP(10'000 + 50))); BEAST_EXPECT( ammBob.expectBalances(USD(150), XRP(100), ammBob.tokens())); @@ -2209,7 +2209,7 @@ private: sendmax(USD(0.4)), txflags(tfNoRippleDirect | tfPartialPayment)); - BEAST_EXPECT(expectLine(env, carol, EUR(1))); + BEAST_EXPECT(expectHolding(env, carol, EUR(1))); BEAST_EXPECT(ammBob.expectBalances( USD(8.4), XRPAmount{20}, ammBob.tokens())); } @@ -2244,7 +2244,7 @@ private: // alice buys 107.1428USD with 120GBP and pays 25% tr fee on 120GBP // 1,000 - 120*1.25 = 850GBP - BEAST_EXPECT(expectLine(env, alice, GBP(850))); + BEAST_EXPECT(expectHolding(env, alice, GBP(850))); if (!features[fixAMMv1_1]) { // 120GBP is swapped in for 107.1428USD @@ -2262,7 +2262,7 @@ private: } // 25% of 85.7142USD is paid in tr fee // 85.7142*1.25 = 107.1428USD - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, carol, STAmount(USD, UINT64_C(1'085'714285714286), -12))); } @@ -2294,10 +2294,10 @@ private: // alice buys 120EUR with 120GBP via the offer // and pays 25% tr fee on 120GBP // 1,000 - 120*1.25 = 850GBP - BEAST_EXPECT(expectLine(env, alice, GBP(850))); + BEAST_EXPECT(expectHolding(env, alice, GBP(850))); // consumed offer is 120GBP/120EUR // ed doesn't pay tr fee - BEAST_EXPECT(expectLine(env, ed, EUR(880), GBP(1'120))); + BEAST_EXPECT(expectHolding(env, ed, EUR(880), GBP(1'120))); BEAST_EXPECT( expectOffers(env, ed, 1, {Amounts{GBP(880), EUR(880)}})); // 25% on 96EUR is paid in tr fee 96*1.25 = 120EUR @@ -2307,7 +2307,7 @@ private: STAmount{USD, UINT64_C(912'4087591240876), -13}, amm.tokens())); // 25% on 70.0729USD is paid in tr fee 70.0729*1.25 = 87.5912USD - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, carol, STAmount(USD, UINT64_C(1'070'07299270073), -11))); } { @@ -2333,7 +2333,7 @@ private: txflags(tfNoRippleDirect | tfPartialPayment)); env.close(); - BEAST_EXPECT(expectLine(env, alice, GBP(850))); + BEAST_EXPECT(expectHolding(env, alice, GBP(850))); if (!features[fixAMMv1_1]) { // alice buys 107.1428EUR with 120GBP and pays 25% tr fee on @@ -2367,7 +2367,7 @@ private: amm2.tokens())); } // 25% on 63.1578USD is paid in tr fee 63.1578*1.25 = 78.9473USD - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, carol, STAmount(USD, UINT64_C(1'063'157894736842), -12))); } { @@ -2386,7 +2386,7 @@ private: BEAST_EXPECT( amm.expectBalances(USD(1'100), EUR(1'000), amm.tokens())); // alice pays 25% tr fee on 100USD 1100-100*1.25 = 975USD - BEAST_EXPECT(expectLine(env, alice, USD(975), EUR(1'200))); + BEAST_EXPECT(expectHolding(env, alice, USD(975), EUR(1'200))); BEAST_EXPECT(expectOffers(env, alice, 0)); } @@ -2416,7 +2416,7 @@ private: // alice buys 125USD with 142.8571GBP and pays 25% tr fee // on 142.8571GBP // 1,000 - 142.8571*1.25 = 821.4285GBP - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, alice, STAmount(GBP, UINT64_C(821'4285714285712), -13))); // 142.8571GBP is swapped in for 125USD BEAST_EXPECT(amm.expectBalances( @@ -2425,7 +2425,7 @@ private: amm.tokens())); // 25% on 100USD is paid in tr fee // 100*1.25 = 125USD - BEAST_EXPECT(expectLine(env, carol, USD(1'100))); + BEAST_EXPECT(expectHolding(env, carol, USD(1'100))); } { // Payment via AMM with limit quality, deliver less @@ -2456,7 +2456,7 @@ private: // alice buys 28.125USD with 24GBP and pays 25% tr fee // on 24GBP // 1,200 - 24*1.25 = 1,170GBP - BEAST_EXPECT(expectLine(env, alice, GBP(1'170))); + BEAST_EXPECT(expectHolding(env, alice, GBP(1'170))); // 24GBP is swapped in for 28.125USD BEAST_EXPECT(amm.expectBalances( GBP(1'024), USD(1'171.875), amm.tokens())); @@ -2466,7 +2466,7 @@ private: // alice buys 28.125USD with 24GBP and pays 25% tr fee // on 24GBP // 1,200 - 24*1.25 =~ 1,170GBP - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, alice, STAmount{GBP, UINT64_C(1'169'999999999999), -12})); @@ -2478,7 +2478,7 @@ private: } // 25% on 22.5USD is paid in tr fee // 22.5*1.25 = 28.125USD - BEAST_EXPECT(expectLine(env, carol, USD(1'222.5))); + BEAST_EXPECT(expectHolding(env, carol, USD(1'222.5))); } { // Payment via offer and AMM with limit quality, deliver less @@ -2513,13 +2513,13 @@ private: // alice buys 70.4210EUR with 70.4210GBP via the offer // and pays 25% tr fee on 70.4210GBP // 1,400 - 70.4210*1.25 = 1400 - 88.0262 = 1311.9736GBP - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, alice, STAmount{GBP, UINT64_C(1'311'973684210527), -12})); // ed doesn't pay tr fee, the balances reflect consumed offer // 70.4210GBP/70.4210EUR - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, ed, STAmount{EUR, UINT64_C(1'329'578947368421), -12}, @@ -2543,13 +2543,13 @@ private: // alice buys 70.4210EUR with 70.4210GBP via the offer // and pays 25% tr fee on 70.4210GBP // 1,400 - 70.4210*1.25 = 1400 - 88.0262 = 1311.9736GBP - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, alice, STAmount{GBP, UINT64_C(1'311'973684210525), -12})); // ed doesn't pay tr fee, the balances reflect consumed offer // 70.4210GBP/70.4210EUR - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, ed, STAmount{EUR, UINT64_C(1'329'57894736842), -11}, @@ -2569,7 +2569,7 @@ private: amm.tokens())); } // 25% on 59.7321USD is paid in tr fee 59.7321*1.25 = 74.6651USD - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, carol, STAmount(USD, UINT64_C(1'459'732142857143), -12))); } { @@ -2605,7 +2605,7 @@ private: // alice buys 53.3322EUR with 56.3368GBP via the amm // and pays 25% tr fee on 56.3368GBP // 1,400 - 56.3368*1.25 = 1400 - 70.4210 = 1329.5789GBP - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, alice, STAmount{GBP, UINT64_C(1'329'578947368421), -12})); @@ -2622,7 +2622,7 @@ private: // alice buys 53.3322EUR with 56.3368GBP via the amm // and pays 25% tr fee on 56.3368GBP // 1,400 - 56.3368*1.25 = 1400 - 70.4210 = 1329.5789GBP - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, alice, STAmount{GBP, UINT64_C(1'329'57894736842), -11})); @@ -2636,7 +2636,7 @@ private: } // 25% on 42.6658EUR is paid in tr fee 42.6658*1.25 = 53.3322EUR // 42.6658EUR/59.7321USD - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, ed, STAmount{USD, UINT64_C(1'340'267857142857), -12}, @@ -2649,7 +2649,7 @@ private: STAmount{EUR, UINT64_C(957'3341836734693), -13}, STAmount{USD, UINT64_C(1'340'267857142857), -12}}})); // 25% on 47.7857USD is paid in tr fee 47.7857*1.25 = 59.7321USD - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, carol, STAmount(USD, UINT64_C(1'447'785714285714), -12))); } { @@ -2683,7 +2683,7 @@ private: // alice buys 53.3322EUR with 107.5308GBP // 25% on 86.0246GBP is paid in tr fee // 1,400 - 86.0246*1.25 = 1400 - 107.5308 = 1229.4691GBP - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, alice, STAmount{GBP, UINT64_C(1'292'469135802469), -12})); @@ -2704,7 +2704,7 @@ private: // alice buys 53.3322EUR with 107.5308GBP // 25% on 86.0246GBP is paid in tr fee // 1,400 - 86.0246*1.25 = 1400 - 107.5308 = 1229.4691GBP - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, alice, STAmount{GBP, UINT64_C(1'292'469135802466), -12})); @@ -2721,7 +2721,7 @@ private: amm2.tokens())); } // 25% on 66.7432USD is paid in tr fee 66.7432*1.25 = 83.4291USD - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, carol, STAmount(USD, UINT64_C(1'466'743295019157), -12))); } { @@ -2778,7 +2778,7 @@ private: amm2.tokens())); } // 25% on 81.1111USD is paid in tr fee 81.1111*1.25 = 101.3888USD - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, carol, STAmount{USD, UINT64_C(1'481'111111111111), -12})); } } @@ -2808,7 +2808,7 @@ private: BEAST_EXPECT( ammBob.expectBalances(XRP(1'050), USD(1'000), ammBob.tokens())); - BEAST_EXPECT(expectLine(env, carol, USD(2'050))); + BEAST_EXPECT(expectHolding(env, carol, USD(2'050))); BEAST_EXPECT(expectOffers(env, bob, 1, {{{XRP(100), USD(50)}}})); } } diff --git a/src/test/app/AMM_test.cpp b/src/test/app/AMM_test.cpp index c89aebf813..cfe1ffab16 100644 --- a/src/test/app/AMM_test.cpp +++ b/src/test/app/AMM_test.cpp @@ -97,8 +97,8 @@ private: AMM ammAlice(env, alice, USD(20'000), BTC(0.5)); BEAST_EXPECT(ammAlice.expectBalances( USD(20'000), BTC(0.5), IOUAmount{100, 0})); - BEAST_EXPECT(expectLine(env, alice, USD(0))); - BEAST_EXPECT(expectLine(env, alice, BTC(0))); + BEAST_EXPECT(expectHolding(env, alice, USD(0))); + BEAST_EXPECT(expectHolding(env, alice, BTC(0))); } // Require authorization is set, account is authorized @@ -1394,7 +1394,7 @@ private: BEAST_EXPECT(ammAlice.expectBalances( XRP(11'000), USD(11'000), IOUAmount{11'000'000, 0})); // 30,000 less deposited 1,000 - BEAST_EXPECT(expectLine(env, carol, USD(29'000))); + BEAST_EXPECT(expectHolding(env, carol, USD(29'000))); // 30,000 less deposited 1,000 and 10 drops tx fee BEAST_EXPECT(expectLedgerEntryRoot( env, carol, XRPAmount{29'000'000'000 - baseFee})); @@ -1449,7 +1449,8 @@ private: IOUAmount{1, 7} + newLPTokens)); // 30,000 less deposited depositUSD - BEAST_EXPECT(expectLine(env, carol, USD(30'000) - depositUSD)); + BEAST_EXPECT( + expectHolding(env, carol, USD(30'000) - depositUSD)); // 30,000 less deposited depositXRP and 10 drops tx fee BEAST_EXPECT(expectLedgerEntryRoot( env, carol, XRP(30'000) - depositXRP - txfee(env, 1))); @@ -1553,15 +1554,15 @@ private: AMM ammAlice(env, alice, USD(20'000), BTC(0.5)); BEAST_EXPECT(ammAlice.expectBalances( USD(20'000), BTC(0.5), IOUAmount{100, 0})); - BEAST_EXPECT(expectLine(env, alice, USD(0))); - BEAST_EXPECT(expectLine(env, alice, BTC(0))); + BEAST_EXPECT(expectHolding(env, alice, USD(0))); + BEAST_EXPECT(expectHolding(env, alice, BTC(0))); fund(env, gw, {carol}, {USD(2'000), BTC(0.05)}, Fund::Acct); // no transfer fee on deposit ammAlice.deposit(carol, 10); BEAST_EXPECT(ammAlice.expectBalances( USD(22'000), BTC(0.55), IOUAmount{110, 0})); - BEAST_EXPECT(expectLine(env, carol, USD(0))); - BEAST_EXPECT(expectLine(env, carol, BTC(0))); + BEAST_EXPECT(expectHolding(env, carol, USD(0))); + BEAST_EXPECT(expectHolding(env, carol, BTC(0))); } // Tiny deposits @@ -2281,7 +2282,7 @@ private: BEAST_EXPECT( ammAlice.expectLPTokens(carol, IOUAmount{1'000'000, 0})); // 30,000 less deposited 1,000 - BEAST_EXPECT(expectLine(env, carol, USD(29'000))); + BEAST_EXPECT(expectHolding(env, carol, USD(29'000))); // 30,000 less deposited 1,000 and 10 drops tx fee BEAST_EXPECT(expectLedgerEntryRoot( env, carol, XRPAmount{29'000'000'000 - baseFee})); @@ -2290,7 +2291,7 @@ private: ammAlice.withdraw(carol, 1'000'000); BEAST_EXPECT( ammAlice.expectLPTokens(carol, IOUAmount(beast::Zero()))); - BEAST_EXPECT(expectLine(env, carol, USD(30'000))); + BEAST_EXPECT(expectHolding(env, carol, USD(30'000))); BEAST_EXPECT(expectLedgerEntryRoot( env, carol, XRPAmount{30'000'000'000 - 2 * baseFee})); }); @@ -2525,22 +2526,22 @@ private: AMM ammAlice(env, alice, USD(20'000), BTC(0.5)); BEAST_EXPECT(ammAlice.expectBalances( USD(20'000), BTC(0.5), IOUAmount{100, 0})); - BEAST_EXPECT(expectLine(env, alice, USD(0))); - BEAST_EXPECT(expectLine(env, alice, BTC(0))); + BEAST_EXPECT(expectHolding(env, alice, USD(0))); + BEAST_EXPECT(expectHolding(env, alice, BTC(0))); fund(env, gw, {carol}, {USD(2'000), BTC(0.05)}, Fund::Acct); // no transfer fee on deposit ammAlice.deposit(carol, 10); BEAST_EXPECT(ammAlice.expectBalances( USD(22'000), BTC(0.55), IOUAmount{110, 0})); - BEAST_EXPECT(expectLine(env, carol, USD(0))); - BEAST_EXPECT(expectLine(env, carol, BTC(0))); + BEAST_EXPECT(expectHolding(env, carol, USD(0))); + BEAST_EXPECT(expectHolding(env, carol, BTC(0))); // no transfer fee on withdraw ammAlice.withdraw(carol, 10); BEAST_EXPECT(ammAlice.expectBalances( USD(20'000), BTC(0.5), IOUAmount{100, 0})); BEAST_EXPECT(ammAlice.expectLPTokens(carol, IOUAmount{0, 0})); - BEAST_EXPECT(expectLine(env, carol, USD(2'000))); - BEAST_EXPECT(expectLine(env, carol, BTC(0.05))); + BEAST_EXPECT(expectHolding(env, carol, USD(2'000))); + BEAST_EXPECT(expectHolding(env, carol, BTC(0.05))); } // Tiny withdraw @@ -3527,7 +3528,7 @@ private: // Alice doesn't have anymore lp tokens env(amm.bid({.account = alice, .bidMin = 500})); BEAST_EXPECT(amm.expectAuctionSlot(100, 0, IOUAmount{500})); - BEAST_EXPECT(expectLine(env, alice, STAmount{lpIssue, 0})); + BEAST_EXPECT(expectHolding(env, alice, STAmount{lpIssue, 0})); // But trades with the discounted fee since she still owns the slot. // Alice pays 10011 drops in fees env(pay(alice, bob, USD(10)), path(~USD), sendmax(XRP(11))); @@ -3790,7 +3791,7 @@ private: BEAST_EXPECT(ammAlice.expectBalances( XRP(10'100), USD(10'000), ammAlice.tokens())); // Initial balance 30,000 + 100 - BEAST_EXPECT(expectLine(env, carol, USD(30'100))); + BEAST_EXPECT(expectHolding(env, carol, USD(30'100))); // Initial balance 30,000 - 100(sendmax) - 10(tx fee) BEAST_EXPECT(expectLedgerEntryRoot( env, bob, XRP(30'000) - XRP(100) - txfee(env, 1))); @@ -3810,7 +3811,7 @@ private: BEAST_EXPECT(ammAlice.expectBalances( XRP(10'100), USD(10'000), ammAlice.tokens())); // Initial balance 30,000 + 100 - BEAST_EXPECT(expectLine(env, carol, USD(30'100))); + BEAST_EXPECT(expectHolding(env, carol, USD(30'100))); // Initial balance 30,000 - 100(sendmax) - 10(tx fee) BEAST_EXPECT(expectLedgerEntryRoot( env, bob, XRP(30'000) - XRP(100) - txfee(env, 1))); @@ -3831,7 +3832,7 @@ private: BEAST_EXPECT(ammAlice.expectBalances( XRP(10'100), USD(10'000), ammAlice.tokens())); // Initial balance 30,000 + 100 - BEAST_EXPECT(expectLine(env, carol, USD(30'100))); + BEAST_EXPECT(expectHolding(env, carol, USD(30'100))); // Initial balance 30,000 - 100(sendmax) - 10(tx fee) BEAST_EXPECT(expectLedgerEntryRoot( env, bob, XRP(30'000) - XRP(100) - txfee(env, 1))); @@ -3857,7 +3858,7 @@ private: BEAST_EXPECT(ammAlice.expectBalances( XRP(10'010), USD(10'000), ammAlice.tokens())); // Initial balance 30,000 + 10(limited by limitQuality) - BEAST_EXPECT(expectLine(env, carol, USD(30'010))); + BEAST_EXPECT(expectHolding(env, carol, USD(30'010))); // Initial balance 30,000 - 10(limited by limitQuality) - 10(tx // fee) BEAST_EXPECT(expectLedgerEntryRoot( @@ -3897,7 +3898,7 @@ private: BEAST_EXPECT(ammAlice.expectBalances( XRP(10'010), USD(10'000), ammAlice.tokens())); // 10USD - 10% transfer fee - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, carol, STAmount{USD, UINT64_C(30'009'09090909091), -11})); @@ -3984,7 +3985,7 @@ private: BEAST_EXPECT(expectOffers(env, alice, 1, {{expectedAmounts}})); } // Initial 30,000 + 100 - BEAST_EXPECT(expectLine(env, carol, STAmount{USD, 30'100})); + BEAST_EXPECT(expectHolding(env, carol, STAmount{USD, 30'100})); // Initial 1,000 - 30082730(AMM pool) - 70798251(offer) - 10(tx fee) BEAST_EXPECT(expectLedgerEntryRoot( env, @@ -4027,7 +4028,7 @@ private: STAmount(EUR, UINT64_C(49'98750312422), -11), STAmount(USD, UINT64_C(49'98750312422), -11)}}})); // Initial 30,000 + 99.99999999999 - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, carol, STAmount{USD, UINT64_C(30'099'99999999999), -11})); @@ -4061,7 +4062,7 @@ private: BEAST_EXPECT(ammAlice.expectBalances( XRP(10'100), USD(10'000), ammAlice.tokens())); // Initial 30,000 + 200 - BEAST_EXPECT(expectLine(env, carol, USD(30'200))); + BEAST_EXPECT(expectHolding(env, carol, USD(30'200))); } else { @@ -4069,7 +4070,7 @@ private: XRP(10'100), STAmount(USD, UINT64_C(10'000'00000000001), -11), ammAlice.tokens())); - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, carol, STAmount(USD, UINT64_C(30'199'99999999999), -11))); @@ -4104,7 +4105,7 @@ private: env.close(); BEAST_EXPECT(ammAlice.expectBalances( XRP(1'050), USD(1'000), ammAlice.tokens())); - BEAST_EXPECT(expectLine(env, carol, USD(2'200))); + BEAST_EXPECT(expectHolding(env, carol, USD(2'200))); BEAST_EXPECT(expectOffers(env, bob, 0)); } @@ -4118,7 +4119,7 @@ private: BEAST_EXPECT(ammAlice.expectBalances( XRP(10'100), USD(10'000), ammAlice.tokens())); // Initial 1,000 + 100 - BEAST_EXPECT(expectLine(env, bob, USD(1'100))); + BEAST_EXPECT(expectHolding(env, bob, USD(1'100))); // Initial 30,000 - 100(offer) - 10(tx fee) BEAST_EXPECT(expectLedgerEntryRoot( env, bob, XRP(30'000) - XRP(100) - txfee(env, 1))); @@ -4145,9 +4146,9 @@ private: BEAST_EXPECT(ammAlice.expectBalances( GBP(1'100), EUR(1'000), ammAlice.tokens())); // Initial 30,000 - 100(offer) - 25% transfer fee - BEAST_EXPECT(expectLine(env, carol, GBP(29'875))); + BEAST_EXPECT(expectHolding(env, carol, GBP(29'875))); // Initial 30,000 + 100(offer) - BEAST_EXPECT(expectLine(env, carol, EUR(30'100))); + BEAST_EXPECT(expectHolding(env, carol, EUR(30'100))); BEAST_EXPECT(expectOffers(env, bob, 0)); }, {{GBP(1'000), EUR(1'100)}}, @@ -4285,12 +4286,12 @@ private: // = 58.825 = ~29941.17 // carol bought ~72.93EUR at the cost of ~70.68GBP // the offer is partially consumed - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, carol, STAmount{GBP, UINT64_C(29'941'16770347333), -11})); // Initial 30,000 + ~49.3(offers = 39.3(AMM) + 10(LOB)) - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, carol, STAmount{EUR, UINT64_C(30'049'31517120716), -11})); @@ -4324,20 +4325,20 @@ private: // = 88.35 = ~29911.64 // carol bought ~72.93EUR at the cost of ~70.68GBP // the offer is partially consumed - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, carol, STAmount{GBP, UINT64_C(29'911'64396400896), -11})); // Initial 30,000 + ~72.93(offers = 62.93(AMM) + 10(LOB)) - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, carol, STAmount{EUR, UINT64_C(30'072'93416277865), -11})); } // Initial 2000 + 10 = 2010 - BEAST_EXPECT(expectLine(env, bob, GBP(2'010))); + BEAST_EXPECT(expectHolding(env, bob, GBP(2'010))); // Initial 2000 - 10 * 1.25 = 1987.5 - BEAST_EXPECT(expectLine(env, ed, EUR(1'987.5))); + BEAST_EXPECT(expectHolding(env, ed, EUR(1'987.5))); }, {{GBP(1'000), EUR(1'100)}}, 0, @@ -4363,8 +4364,8 @@ private: env.close(); BEAST_EXPECT(ammAlice.expectBalances( GBP(1'100), EUR(1'000), ammAlice.tokens())); - BEAST_EXPECT(expectLine(env, bob, GBP(75))); - BEAST_EXPECT(expectLine(env, carol, EUR(30'080))); + BEAST_EXPECT(expectHolding(env, bob, GBP(75))); + BEAST_EXPECT(expectHolding(env, carol, EUR(30'080))); }, {{GBP(1'000), EUR(1'100)}}, 0, @@ -4401,12 +4402,12 @@ private: sendmax(CAN(195.3125)), txflags(tfPartialPayment)); env.close(); - BEAST_EXPECT(expectLine(env, bob, CAN(0))); - BEAST_EXPECT(expectLine(env, dan, CAN(356.25), GBP(43.75))); + BEAST_EXPECT(expectHolding(env, bob, CAN(0))); + BEAST_EXPECT(expectHolding(env, dan, CAN(356.25), GBP(43.75))); BEAST_EXPECT(ammAlice.expectBalances( GBP(10'125), EUR(10'000), ammAlice.tokens())); - BEAST_EXPECT(expectLine(env, ed, EUR(300), USD(100))); - BEAST_EXPECT(expectLine(env, carol, USD(80))); + BEAST_EXPECT(expectHolding(env, ed, EUR(300), USD(100))); + BEAST_EXPECT(expectHolding(env, carol, USD(80))); }, {{GBP(10'000), EUR(10'125)}}, 0, @@ -4523,7 +4524,7 @@ private: BEAST_EXPECT(btc_usd.expectBalances( BTC(10'100), USD(10'000), btc_usd.tokens())); - BEAST_EXPECT(expectLine(env, carol, USD(300))); + BEAST_EXPECT(expectHolding(env, carol, USD(300))); } // Dependent AMM @@ -4594,7 +4595,7 @@ private: STAmount{EUR, UINT64_C(10'917'2945958102), -10}, eth_eur.tokens())); } - BEAST_EXPECT(expectLine(env, carol, USD(300))); + BEAST_EXPECT(expectHolding(env, carol, USD(300))); } // AMM offers limit @@ -4620,7 +4621,7 @@ private: XRP(10'030), STAmount{USD, UINT64_C(9'970'089730807577), -12}, ammAlice.tokens())); - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, carol, STAmount{USD, UINT64_C(30'029'91026919241), -11})); @@ -4631,7 +4632,7 @@ private: XRP(10'030), STAmount{USD, UINT64_C(9'970'089730807827), -12}, ammAlice.tokens())); - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, carol, STAmount{USD, UINT64_C(30'029'91026919217), -11})); @@ -4663,14 +4664,14 @@ private: if (!features[fixAMMv1_1]) { // Carol gets ~100USD - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, carol, STAmount{USD, UINT64_C(30'099'99999999999), -11})); } else { - BEAST_EXPECT(expectLine(env, carol, USD(30'100))); + BEAST_EXPECT(expectHolding(env, carol, USD(30'100))); } BEAST_EXPECT(expectOffers( env, @@ -4717,7 +4718,7 @@ private: 1, {{{XRPAmount{50'074'628}, STAmount{USD, UINT64_C(50'07512950697), -11}}}})); - BEAST_EXPECT(expectLine(env, carol, USD(30'100))); + BEAST_EXPECT(expectHolding(env, carol, USD(30'100))); } } @@ -4809,11 +4810,11 @@ private: env(offer(carol, STAmount{token2, 100}, STAmount{token1, 100})); env.close(); BEAST_EXPECT( - expectLine(env, alice, STAmount{token1, 10'000'100}) && - expectLine(env, alice, STAmount{token2, 9'999'900})); + expectHolding(env, alice, STAmount{token1, 10'000'100}) && + expectHolding(env, alice, STAmount{token2, 9'999'900})); BEAST_EXPECT( - expectLine(env, carol, STAmount{token2, 1'000'100}) && - expectLine(env, carol, STAmount{token1, 999'900})); + expectHolding(env, carol, STAmount{token2, 1'000'100}) && + expectHolding(env, carol, STAmount{token1, 999'900})); BEAST_EXPECT( expectOffers(env, alice, 0) && expectOffers(env, carol, 0)); }); @@ -5034,7 +5035,7 @@ private: BEAST_EXPECT(ammAlice.expectLPTokens(carol, IOUAmount{1'000})); ammAlice.withdrawAll(carol, USD(3'000)); BEAST_EXPECT(ammAlice.expectLPTokens(carol, IOUAmount{0})); - BEAST_EXPECT(expectLine(env, carol, USD(30'000))); + BEAST_EXPECT(expectHolding(env, carol, USD(30'000))); // Set fee to 1% ammAlice.vote(alice, 1'000); BEAST_EXPECT(ammAlice.expectTradingFee(1'000)); @@ -5043,12 +5044,12 @@ private: ammAlice.deposit(carol, USD(3'000)); BEAST_EXPECT(ammAlice.expectLPTokens( carol, IOUAmount{994'981155689671, -12})); - BEAST_EXPECT(expectLine(env, carol, USD(27'000))); + BEAST_EXPECT(expectHolding(env, carol, USD(27'000))); // Set fee to 0 ammAlice.vote(alice, 0); ammAlice.withdrawAll(carol, USD(0)); // Carol gets back less than the original deposit - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, carol, STAmount{USD, UINT64_C(29'994'96220068281), -11})); @@ -5109,13 +5110,13 @@ private: ammAlice.deposit(carol, USD(3'000)); BEAST_EXPECT(ammAlice.expectLPTokens(carol, IOUAmount{1'000})); - BEAST_EXPECT(expectLine(env, carol, USD(27'000))); + BEAST_EXPECT(expectHolding(env, carol, USD(27'000))); // Set fee to 1% ammAlice.vote(alice, 1'000); BEAST_EXPECT(ammAlice.expectTradingFee(1'000)); // Single withdrawal. Carol gets ~5USD less than deposited. ammAlice.withdrawAll(carol, USD(0)); - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, carol, STAmount{USD, UINT64_C(29'994'97487437186), -11})); @@ -5189,9 +5190,9 @@ private: {USD(1'000), EUR(1'000)}, Fund::Acct); // Alice contributed 1010EUR and 1000USD to the pool - BEAST_EXPECT(expectLine(env, alice, EUR(28'990))); - BEAST_EXPECT(expectLine(env, alice, USD(29'000))); - BEAST_EXPECT(expectLine(env, carol, USD(30'000))); + BEAST_EXPECT(expectHolding(env, alice, EUR(28'990))); + BEAST_EXPECT(expectHolding(env, alice, USD(29'000))); + BEAST_EXPECT(expectHolding(env, carol, USD(30'000))); // Carol pays to Alice with no fee env(pay(carol, alice, EUR(10)), path(~EUR), @@ -5199,9 +5200,9 @@ private: txflags(tfNoRippleDirect)); env.close(); // Alice has 10EUR more and Carol has 10USD less - BEAST_EXPECT(expectLine(env, alice, EUR(29'000))); - BEAST_EXPECT(expectLine(env, alice, USD(29'000))); - BEAST_EXPECT(expectLine(env, carol, USD(29'990))); + BEAST_EXPECT(expectHolding(env, alice, EUR(29'000))); + BEAST_EXPECT(expectHolding(env, alice, USD(29'000))); + BEAST_EXPECT(expectHolding(env, carol, USD(29'990))); // Set fee to 1% ammAlice.vote(alice, 1'000); @@ -5213,10 +5214,10 @@ private: txflags(tfNoRippleDirect)); env.close(); // Bob sends 10.1~EUR to pay 10USD - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, bob, STAmount{EUR, UINT64_C(989'8989898989899), -13})); // Carol got 10USD - BEAST_EXPECT(expectLine(env, carol, USD(30'000))); + BEAST_EXPECT(expectHolding(env, carol, USD(30'000))); BEAST_EXPECT(ammAlice.expectBalances( USD(1'000), STAmount{EUR, UINT64_C(1'010'10101010101), -11}, @@ -5233,8 +5234,8 @@ private: // No fee env(offer(carol, EUR(10), USD(10))); env.close(); - BEAST_EXPECT(expectLine(env, carol, USD(29'990))); - BEAST_EXPECT(expectLine(env, carol, EUR(30'010))); + BEAST_EXPECT(expectHolding(env, carol, USD(29'990))); + BEAST_EXPECT(expectHolding(env, carol, EUR(30'010))); // Change pool composition back env(offer(carol, USD(10), EUR(10))); env.close(); @@ -5245,11 +5246,11 @@ private: env.close(); // Alice gets fewer ~4.97EUR for ~5.02USD, the difference goes // to the pool - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, carol, STAmount{USD, UINT64_C(29'995'02512562814), -11})); - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, carol, STAmount{EUR, UINT64_C(30'004'97487437186), -11})); @@ -5299,16 +5300,16 @@ private: path(~USD), sendmax(EUR(15)), txflags(tfNoRippleDirect)); - BEAST_EXPECT(expectLine(env, ed, USD(2'010))); + BEAST_EXPECT(expectHolding(env, ed, USD(2'010))); if (!features[fixAMMv1_1]) { - BEAST_EXPECT(expectLine(env, bob, EUR(1'990))); + BEAST_EXPECT(expectHolding(env, bob, EUR(1'990))); BEAST_EXPECT(ammAlice.expectBalances( USD(1'000), EUR(1'005), ammAlice.tokens())); } else { - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, bob, STAmount(EUR, UINT64_C(1989'999999999999), -12))); BEAST_EXPECT(ammAlice.expectBalances( USD(1'000), @@ -5336,10 +5337,10 @@ private: path(~USD), sendmax(EUR(15)), txflags(tfNoRippleDirect)); - BEAST_EXPECT(expectLine(env, ed, USD(2'010))); + BEAST_EXPECT(expectHolding(env, ed, USD(2'010))); if (!features[fixAMMv1_1]) { - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, bob, STAmount{EUR, UINT64_C(1'989'987453007618), -12})); @@ -5350,7 +5351,7 @@ private: } else { - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, bob, STAmount{EUR, UINT64_C(1'989'987453007628), -12})); @@ -5381,8 +5382,8 @@ private: path(~USD), sendmax(EUR(15)), txflags(tfNoRippleDirect)); - BEAST_EXPECT(expectLine(env, ed, USD(2'010))); - BEAST_EXPECT(expectLine(env, bob, EUR(1'990))); + BEAST_EXPECT(expectHolding(env, ed, USD(2'010))); + BEAST_EXPECT(expectHolding(env, bob, EUR(1'990))); BEAST_EXPECT(ammAlice.expectBalances( USD(1'005), EUR(1'000), ammAlice.tokens())); BEAST_EXPECT(expectOffers(env, carol, 0)); @@ -5408,8 +5409,8 @@ private: path(~USD), sendmax(EUR(15)), txflags(tfNoRippleDirect)); - BEAST_EXPECT(expectLine(env, ed, USD(2'010))); - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding(env, ed, USD(2'010))); + BEAST_EXPECT(expectHolding( env, bob, STAmount{EUR, UINT64_C(1'989'993923296712), -12})); BEAST_EXPECT(ammAlice.expectBalances( USD(1'004), @@ -5480,47 +5481,47 @@ private: else BEAST_EXPECT(ammAlice.expectBalances( XRP(10'000), USD(10'000), IOUAmount{10'000'000})); - BEAST_EXPECT(expectLine(env, ben, USD(1'500'000))); - BEAST_EXPECT(expectLine(env, simon, USD(1'500'000))); - BEAST_EXPECT(expectLine(env, chris, USD(1'500'000))); - BEAST_EXPECT(expectLine(env, dan, USD(1'500'000))); + BEAST_EXPECT(expectHolding(env, ben, USD(1'500'000))); + BEAST_EXPECT(expectHolding(env, simon, USD(1'500'000))); + BEAST_EXPECT(expectHolding(env, chris, USD(1'500'000))); + BEAST_EXPECT(expectHolding(env, dan, USD(1'500'000))); if (!features[fixAMMv1_1] && !features[fixAMMv1_3]) - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, carol, STAmount{USD, UINT64_C(30'000'00000000001), -11})); else if (features[fixAMMv1_1] && !features[fixAMMv1_3]) - BEAST_EXPECT(expectLine(env, carol, USD(30'000))); + BEAST_EXPECT(expectHolding(env, carol, USD(30'000))); else - BEAST_EXPECT(expectLine(env, carol, USD(30'000))); - BEAST_EXPECT(expectLine(env, ed, USD(1'500'000))); - BEAST_EXPECT(expectLine(env, paul, USD(1'500'000))); + BEAST_EXPECT(expectHolding(env, carol, USD(30'000))); + BEAST_EXPECT(expectHolding(env, ed, USD(1'500'000))); + BEAST_EXPECT(expectHolding(env, paul, USD(1'500'000))); if (!features[fixAMMv1_1] && !features[fixAMMv1_3]) - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, nataly, STAmount{USD, UINT64_C(1'500'000'000000002), -9})); else if (features[fixAMMv1_1] && !features[fixAMMv1_3]) - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, nataly, STAmount{USD, UINT64_C(1'500'000'000000005), -9})); else - BEAST_EXPECT(expectLine(env, nataly, USD(1'500'000))); + BEAST_EXPECT(expectHolding(env, nataly, USD(1'500'000))); ammAlice.withdrawAll(alice); BEAST_EXPECT(!ammAlice.ammExists()); if (!features[fixAMMv1_1]) - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, alice, STAmount{USD, UINT64_C(30'000'0000000013), -10})); else if (features[fixAMMv1_3]) - BEAST_EXPECT(expectLine( + BEAST_EXPECT(expectHolding( env, alice, STAmount{USD, UINT64_C(30'000'0000000003), -10})); else - BEAST_EXPECT(expectLine(env, alice, USD(30'000))); + BEAST_EXPECT(expectHolding(env, alice, USD(30'000))); // alice XRP balance is 30,000initial - 50 ammcreate fee - // 10drops fee BEAST_EXPECT( @@ -5883,7 +5884,7 @@ private: BEAST_EXPECT(amm->expectBalances( USD(1'000), ETH(1'000), amm->tokens())); } - BEAST_EXPECT(expectLine(env, bob, USD(2'100))); + BEAST_EXPECT(expectHolding(env, bob, USD(2'100))); q[i] = Quality(Amounts{ ETH(2'000) - env.balance(carol, ETH), env.balance(bob, USD) - USD(2'000)}); @@ -6056,7 +6057,7 @@ private: -13}}}})); } } - BEAST_EXPECT(expectLine(env, bob, USD(2'100))); + BEAST_EXPECT(expectHolding(env, bob, USD(2'100))); q[i] = Quality(Amounts{ ETH(2'000) - env.balance(carol, ETH), env.balance(bob, USD) - USD(2'000)}); @@ -6203,7 +6204,7 @@ private: sendmax(ETH(600))); env.close(); - BEAST_EXPECT(expectLine(env, bob, USD(2'100))); + BEAST_EXPECT(expectHolding(env, bob, USD(2'100))); if (i == 2 && !features[fixAMMv1_1]) { @@ -7484,7 +7485,7 @@ private: using namespace test::jtx; auto const testCase = [&](std::string suffix, FeatureBitset features) { - testcase("Failed pseudo-account allocation " + suffix); + testcase("Fail pseudo-account allocation " + suffix); std::string logs; Env env{*this, features, std::make_unique(&logs)}; env.fund(XRP(30'000), gw, alice); diff --git a/src/test/app/Credentials_test.cpp b/src/test/app/Credentials_test.cpp index 23aa7ad952..102d516b89 100644 --- a/src/test/app/Credentials_test.cpp +++ b/src/test/app/Credentials_test.cpp @@ -33,15 +33,6 @@ namespace ripple { namespace test { -static inline bool -checkVL( - std::shared_ptr const& sle, - SField const& field, - std::string const& expected) -{ - return strHex(expected) == strHex(sle->getFieldVL(field)); -} - struct Credentials_test : public beast::unit_test::suite { void diff --git a/src/test/app/DID_test.cpp b/src/test/app/DID_test.cpp index 1f28af2d6a..21fb6b584e 100644 --- a/src/test/app/DID_test.cpp +++ b/src/test/app/DID_test.cpp @@ -27,14 +27,6 @@ namespace ripple { namespace test { -bool -checkVL(Slice const& result, std::string expected) -{ - Serializer s; - s.addRaw(result); - return s.getString() == expected; -} - struct DID_test : public beast::unit_test::suite { void diff --git a/src/test/app/EscrowToken_test.cpp b/src/test/app/EscrowToken_test.cpp index 9c1868134f..6caedb53f1 100644 --- a/src/test/app/EscrowToken_test.cpp +++ b/src/test/app/EscrowToken_test.cpp @@ -3426,7 +3426,7 @@ struct EscrowToken_test : public beast::unit_test::suite auto const preAliceMPT = env.balance(alice, MPT); auto const preOutstanding = env.balance(gw, MPT); auto const preEscrowed = issuerMPTEscrowed(env, MPT); - BEAST_EXPECT(preOutstanding == MPT(10'000)); + BEAST_EXPECT(preOutstanding == MPT(-10'000)); BEAST_EXPECT(preEscrowed == 0); env(escrow::create(alice, gw, MPT(1'000)), @@ -3449,7 +3449,7 @@ struct EscrowToken_test : public beast::unit_test::suite BEAST_EXPECT(env.balance(alice, MPT) == preAliceMPT - MPT(1'000)); BEAST_EXPECT(mptEscrowed(env, alice, MPT) == 0); - BEAST_EXPECT(env.balance(gw, MPT) == preOutstanding - MPT(1'000)); + BEAST_EXPECT(env.balance(gw, MPT) == preOutstanding + MPT(1'000)); BEAST_EXPECT(issuerMPTEscrowed(env, MPT) == preEscrowed); } } @@ -3503,7 +3503,7 @@ struct EscrowToken_test : public beast::unit_test::suite BEAST_EXPECT(mptEscrowed(env, alice, MPT) == 125); BEAST_EXPECT(issuerMPTEscrowed(env, MPT) == 125); - BEAST_EXPECT(env.balance(gw, MPT) == MPT(20'000)); + BEAST_EXPECT(env.balance(gw, MPT) == MPT(-20'000)); // bob can finish escrow env(escrow::finish(bob, alice, seq1), @@ -3522,7 +3522,7 @@ struct EscrowToken_test : public beast::unit_test::suite : MPT(20'000); BEAST_EXPECT(mptEscrowed(env, alice, MPT) == escrowedWithFix); BEAST_EXPECT(issuerMPTEscrowed(env, MPT) == escrowedWithFix); - BEAST_EXPECT(env.balance(gw, MPT) == outstandingWithFix); + BEAST_EXPECT(env.balance(gw, MPT) == -outstandingWithFix); } // test locked rate: cancel @@ -3567,7 +3567,7 @@ struct EscrowToken_test : public beast::unit_test::suite BEAST_EXPECT(env.balance(alice, MPT) == preAlice); BEAST_EXPECT(env.balance(bob, MPT) == preBob); - BEAST_EXPECT(env.balance(gw, MPT) == MPT(20'000)); + BEAST_EXPECT(env.balance(gw, MPT) == MPT(-20'000)); BEAST_EXPECT(mptEscrowed(env, alice, MPT) == 0); BEAST_EXPECT(issuerMPTEscrowed(env, MPT) == 0); } @@ -3608,7 +3608,7 @@ struct EscrowToken_test : public beast::unit_test::suite BEAST_EXPECT(mptEscrowed(env, alice, MPT) == 125); BEAST_EXPECT(issuerMPTEscrowed(env, MPT) == 125); - BEAST_EXPECT(env.balance(gw, MPT) == MPT(20'000)); + BEAST_EXPECT(env.balance(gw, MPT) == MPT(-20'000)); // bob can finish escrow env(escrow::finish(gw, alice, seq1), @@ -3620,7 +3620,7 @@ struct EscrowToken_test : public beast::unit_test::suite BEAST_EXPECT(env.balance(alice, MPT) == preAlice - delta); BEAST_EXPECT(mptEscrowed(env, alice, MPT) == 0); BEAST_EXPECT(issuerMPTEscrowed(env, MPT) == 0); - BEAST_EXPECT(env.balance(gw, MPT) == MPT(19'875)); + BEAST_EXPECT(env.balance(gw, MPT) == MPT(-19'875)); } } @@ -3826,7 +3826,7 @@ struct EscrowToken_test : public beast::unit_test::suite BEAST_EXPECT(mptEscrowed(env, alice, MPT) == 10); BEAST_EXPECT(env.balance(bob, MPT) == MPT(0)); BEAST_EXPECT(mptEscrowed(env, bob, MPT) == 0); - BEAST_EXPECT(env.balance(gw, MPT) == MPT(10)); + BEAST_EXPECT(env.balance(gw, MPT) == MPT(-10)); mptGw.authorize({.account = bob, .flags = tfMPTUnauthorize}); mptGw.destroy( {.id = mptGw.issuanceID(), diff --git a/src/test/app/Invariants_test.cpp b/src/test/app/Invariants_test.cpp index ae2a1c45df..c91149b2f7 100644 --- a/src/test/app/Invariants_test.cpp +++ b/src/test/app/Invariants_test.cpp @@ -31,6 +31,7 @@ #include namespace ripple { +namespace test { class Invariants_test : public beast::unit_test::suite { @@ -112,13 +113,13 @@ class Invariants_test : public beast::unit_test::suite { terActual = ac.checkInvariants(terActual, fee); BEAST_EXPECT(terExpect == terActual); + auto const messages = sink.messages().str(); BEAST_EXPECT( - sink.messages().str().starts_with("Invariant failed:") || - sink.messages().str().starts_with( - "Transaction caused an exception")); + messages.starts_with("Invariant failed:") || + messages.starts_with("Transaction caused an exception")); for (auto const& m : expect_logs) { - if (sink.messages().str().find(m) == std::string::npos) + if (messages.find(m) == std::string::npos) { // uncomment if you want to log the invariant failure // message log << " --> " << m << std::endl; @@ -1435,6 +1436,127 @@ class Invariants_test : public beast::unit_test::suite {tecINVARIANT_FAILED, tecINVARIANT_FAILED}); } + void + testValidPseudoAccounts() + { + testcase << "valid pseudo accounts"; + + using namespace jtx; + + AccountID pseudoAccountID; + Preclose createPseudo = + [&, this](Account const& a, Account const& b, Env& env) { + PrettyAsset const xrpAsset{xrpIssue(), 1'000'000}; + + // Create vault + Vault vault{env}; + auto [tx, vKeylet] = + vault.create({.owner = a, .asset = xrpAsset}); + env(tx); + env.close(); + if (auto const vSle = env.le(vKeylet); BEAST_EXPECT(vSle)) + { + pseudoAccountID = vSle->at(sfAccount); + } + + return BEAST_EXPECT(env.le(keylet::account(pseudoAccountID))); + }; + + /* Cases to check + "pseudo-account has 0 pseudo-account fields set" + "pseudo-account has 2 pseudo-account fields set" + "pseudo-account sequence changed" + "pseudo-account flags are not set" + "pseudo-account has a regular key" + */ + struct Mod + { + std::string expectedFailure; + std::function func; + }; + auto const mods = std::to_array({ + { + "pseudo-account has 0 pseudo-account fields set", + [this](SLE::pointer& sle) { + BEAST_EXPECT(sle->at(~sfVaultID)); + sle->at(~sfVaultID) = std::nullopt; + }, + }, + { + "pseudo-account sequence changed", + [](SLE::pointer& sle) { sle->at(sfSequence) = 12345; }, + }, + { + "pseudo-account flags are not set", + [](SLE::pointer& sle) { sle->at(sfFlags) = lsfNoFreeze; }, + }, + { + "pseudo-account has a regular key", + [](SLE::pointer& sle) { + sle->at(sfRegularKey) = Account("regular").id(); + }, + }, + }); + + for (auto const& mod : mods) + { + doInvariantCheck( + {{mod.expectedFailure}}, + [&](Account const& A1, Account const&, ApplyContext& ac) { + auto sle = ac.view().peek(keylet::account(pseudoAccountID)); + if (!sle) + return false; + mod.func(sle); + ac.view().update(sle); + return true; + }, + XRPAmount{}, + STTx{ttACCOUNT_SET, [](STObject& tx) {}}, + {tecINVARIANT_FAILED, tefINVARIANT_FAILED}, + createPseudo); + } + for (auto const pField : getPseudoAccountFields()) + { + // createPseudo creates a vault, so sfVaultID will be set, and + // setting it again will not cause an error + if (pField == &sfVaultID) + continue; + doInvariantCheck( + {{"pseudo-account has 2 pseudo-account fields set"}}, + [&](Account const& A1, Account const&, ApplyContext& ac) { + auto sle = ac.view().peek(keylet::account(pseudoAccountID)); + if (!sle) + return false; + + auto const vaultID = ~sle->at(~sfVaultID); + BEAST_EXPECT(vaultID && !sle->isFieldPresent(*pField)); + sle->setFieldH256(*pField, *vaultID); + + ac.view().update(sle); + return true; + }, + XRPAmount{}, + STTx{ttACCOUNT_SET, [](STObject& tx) {}}, + {tecINVARIANT_FAILED, tefINVARIANT_FAILED}, + createPseudo); + } + + // Take one of the regular accounts and set the sequence to 0, which + // will make it look like a pseudo-account + doInvariantCheck( + {{"pseudo-account has 0 pseudo-account fields set"}, + {"pseudo-account sequence changed"}, + {"pseudo-account flags are not set"}}, + [&](Account const& A1, Account const&, ApplyContext& ac) { + auto sle = ac.view().peek(keylet::account(A1.id())); + if (!sle) + return false; + sle->at(sfSequence) = 0; + ac.view().update(sle); + return true; + }); + } + void testPermissionedDEX() { @@ -1622,10 +1744,12 @@ public: testValidNewAccountRoot(); testNFTokenPageInvariants(); testPermissionedDomainInvariants(); + testValidPseudoAccounts(); testPermissionedDEX(); } }; BEAST_DEFINE_TESTSUITE(Invariants, app, ripple); +} // namespace test } // namespace ripple diff --git a/src/test/app/LPTokenTransfer_test.cpp b/src/test/app/LPTokenTransfer_test.cpp index e95e974547..eccd864e71 100644 --- a/src/test/app/LPTokenTransfer_test.cpp +++ b/src/test/app/LPTokenTransfer_test.cpp @@ -287,11 +287,11 @@ class LPTokenTransfer_test : public jtx::AMMTest // with fixFrozenLPTokenTransfer enabled, alice's offer can no // longer cross with carol's offer BEAST_EXPECT( - expectLine(env, alice, STAmount{token1, 10'000'000}) && - expectLine(env, alice, STAmount{token2, 10'000'000})); + expectHolding(env, alice, STAmount{token1, 10'000'000}) && + expectHolding(env, alice, STAmount{token2, 10'000'000})); BEAST_EXPECT( - expectLine(env, carol, STAmount{token2, 10'000'000}) && - expectLine(env, carol, STAmount{token1, 10'000'000})); + expectHolding(env, carol, STAmount{token2, 10'000'000}) && + expectHolding(env, carol, STAmount{token1, 10'000'000})); BEAST_EXPECT( expectOffers(env, alice, 1) && expectOffers(env, carol, 0)); } @@ -300,11 +300,11 @@ class LPTokenTransfer_test : public jtx::AMMTest // alice's offer still crosses with carol's offer despite carol's // token1 is frozen BEAST_EXPECT( - expectLine(env, alice, STAmount{token1, 10'000'100}) && - expectLine(env, alice, STAmount{token2, 9'999'900})); + expectHolding(env, alice, STAmount{token1, 10'000'100}) && + expectHolding(env, alice, STAmount{token2, 9'999'900})); BEAST_EXPECT( - expectLine(env, carol, STAmount{token2, 10'000'100}) && - expectLine(env, carol, STAmount{token1, 9'999'900})); + expectHolding(env, carol, STAmount{token2, 10'000'100}) && + expectHolding(env, carol, STAmount{token1, 9'999'900})); BEAST_EXPECT( expectOffers(env, alice, 0) && expectOffers(env, carol, 0)); } diff --git a/src/test/app/ValidatorSite_test.cpp b/src/test/app/ValidatorSite_test.cpp index 579cd79a5a..cd60a5ed99 100644 --- a/src/test/app/ValidatorSite_test.cpp +++ b/src/test/app/ValidatorSite_test.cpp @@ -37,7 +37,6 @@ #include namespace ripple { -namespace test { namespace detail { constexpr char const* realValidatorContents() @@ -56,6 +55,7 @@ auto constexpr default_expires = std::chrono::seconds{3600}; auto constexpr default_effective_overlap = std::chrono::seconds{30}; } // namespace detail +namespace test { class ValidatorSite_test : public beast::unit_test::suite { private: diff --git a/src/test/app/Vault_test.cpp b/src/test/app/Vault_test.cpp index 3cd52eaad3..159bfd0796 100644 --- a/src/test/app/Vault_test.cpp +++ b/src/test/app/Vault_test.cpp @@ -1339,7 +1339,7 @@ class Vault_test : public beast::unit_test::suite env.close(); Vault vault{env}; - Asset asset = issuer["IOU"]; + Asset asset = issuer["IOU"].asset(); auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); @@ -1358,7 +1358,7 @@ class Vault_test : public beast::unit_test::suite env.close(); Vault vault{env}; - Asset asset = issuer["IOU"]; + Asset asset = issuer["IOU"].asset(); auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); env(tx, ter(terNO_RIPPLE)); @@ -1374,7 +1374,7 @@ class Vault_test : public beast::unit_test::suite env.close(); Vault vault{env}; - Asset asset = issuer["IOU"]; + Asset asset = issuer["IOU"].asset(); { auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); @@ -3165,7 +3165,7 @@ class Vault_test : public beast::unit_test::suite { using namespace test::jtx; - testcase("failed pseudo-account allocation"); + testcase("fail pseudo-account allocation"); Env env{*this, testable_amendments() | featureSingleAssetVault}; Account const owner{"owner"}; Vault vault{env}; @@ -3502,7 +3502,7 @@ class Vault_test : public beast::unit_test::suite STAmount(d.asset, Number(100, 0))); BEAST_EXPECT( env.balance(d.vaultAccount, d.shares) == - STAmount(d.share, Number(1000, 0))); + STAmount(d.share, Number(-1000, 0))); { testcase("Scale redeem exact"); @@ -3527,7 +3527,7 @@ class Vault_test : public beast::unit_test::suite STAmount(d.asset, Number(90, 0))); BEAST_EXPECT( env.balance(d.vaultAccount, d.shares) == - STAmount(d.share, Number(900, 0))); + STAmount(d.share, Number(-900, 0))); } { @@ -3562,7 +3562,7 @@ class Vault_test : public beast::unit_test::suite STAmount(d.asset, Number(900 - 25, -1))); BEAST_EXPECT( env.balance(d.vaultAccount, d.shares) == - STAmount(d.share, Number(900 - 25, 0))); + STAmount(d.share, -Number(900 - 25, 0))); } { @@ -3589,7 +3589,7 @@ class Vault_test : public beast::unit_test::suite STAmount(d.asset, Number(875 - 21, -1))); BEAST_EXPECT( env.balance(d.vaultAccount, d.shares) == - STAmount(d.share, Number(875 - 21, 0))); + STAmount(d.share, -Number(875 - 21, 0))); } { @@ -3650,7 +3650,7 @@ class Vault_test : public beast::unit_test::suite STAmount(d.asset, Number(100, 0))); BEAST_EXPECT( env.balance(d.vaultAccount, d.shares) == - STAmount(d.share, Number(1000, 0))); + STAmount(d.share, Number(-1000, 0))); { testcase("Scale withdraw exact"); @@ -3678,7 +3678,7 @@ class Vault_test : public beast::unit_test::suite STAmount(d.asset, Number(90, 0))); BEAST_EXPECT( env.balance(d.vaultAccount, d.shares) == - STAmount(d.share, Number(900, 0))); + STAmount(d.share, Number(-900, 0))); } { @@ -3725,7 +3725,7 @@ class Vault_test : public beast::unit_test::suite STAmount(d.asset, Number(900 - 25, -1))); BEAST_EXPECT( env.balance(d.vaultAccount, d.shares) == - STAmount(d.share, Number(900 - 25, 0))); + STAmount(d.share, -Number(900 - 25, 0))); } { @@ -3754,7 +3754,7 @@ class Vault_test : public beast::unit_test::suite STAmount(d.asset, Number(875 - 38, -1))); BEAST_EXPECT( env.balance(d.vaultAccount, d.shares) == - STAmount(d.share, Number(875 - 38, 0))); + STAmount(d.share, -Number(875 - 38, 0))); } { @@ -3783,7 +3783,7 @@ class Vault_test : public beast::unit_test::suite STAmount(d.asset, Number(837 - 37, -1))); BEAST_EXPECT( env.balance(d.vaultAccount, d.shares) == - STAmount(d.share, Number(837 - 37, 0))); + STAmount(d.share, -Number(837 - 37, 0))); } { @@ -3806,7 +3806,7 @@ class Vault_test : public beast::unit_test::suite STAmount(d.asset, Number(800 - 1, -1))); BEAST_EXPECT( env.balance(d.vaultAccount, d.shares) == - STAmount(d.share, Number(800 - 1, 0))); + STAmount(d.share, -Number(800 - 1, 0))); } { @@ -3869,7 +3869,7 @@ class Vault_test : public beast::unit_test::suite STAmount(d.asset, Number(100, 0))); BEAST_EXPECT( env.balance(d.vaultAccount, d.shares) == - STAmount(d.share, Number(1000, 0))); + STAmount(d.share, -Number(1000, 0))); { testcase("Scale clawback exact"); // assetsToSharesWithdraw: @@ -3897,7 +3897,7 @@ class Vault_test : public beast::unit_test::suite STAmount(d.asset, Number(90, 0))); BEAST_EXPECT( env.balance(d.vaultAccount, d.shares) == - STAmount(d.share, Number(900, 0))); + STAmount(d.share, -Number(900, 0))); } { @@ -3937,7 +3937,7 @@ class Vault_test : public beast::unit_test::suite STAmount(d.asset, Number(900 - 25, -1))); BEAST_EXPECT( env.balance(d.vaultAccount, d.shares) == - STAmount(d.share, Number(900 - 25, 0))); + STAmount(d.share, -Number(900 - 25, 0))); } { @@ -3967,7 +3967,7 @@ class Vault_test : public beast::unit_test::suite STAmount(d.asset, Number(875 - 38, -1))); BEAST_EXPECT( env.balance(d.vaultAccount, d.shares) == - STAmount(d.share, Number(875 - 38, 0))); + STAmount(d.share, -Number(875 - 38, 0))); } { @@ -3997,7 +3997,7 @@ class Vault_test : public beast::unit_test::suite STAmount(d.asset, Number(837 - 37, -1))); BEAST_EXPECT( env.balance(d.vaultAccount, d.shares) == - STAmount(d.share, Number(837 - 37, 0))); + STAmount(d.share, -Number(837 - 37, 0))); } { @@ -4021,7 +4021,7 @@ class Vault_test : public beast::unit_test::suite STAmount(d.asset, Number(800 - 1, -1))); BEAST_EXPECT( env.balance(d.vaultAccount, d.shares) == - STAmount(d.share, Number(800 - 1, 0))); + STAmount(d.share, -Number(800 - 1, 0))); } { diff --git a/src/test/basics/FileUtilities_test.cpp b/src/test/basics/FileUtilities_test.cpp index 9071ac7231..d1a1d50216 100644 --- a/src/test/basics/FileUtilities_test.cpp +++ b/src/test/basics/FileUtilities_test.cpp @@ -31,7 +31,7 @@ public: void testGetFileContents() { - using namespace ripple::test::detail; + using namespace ripple::detail; using namespace boost::system; constexpr char const* expectedContents = diff --git a/src/test/basics/FeeUnits_test.cpp b/src/test/basics/Units_test.cpp similarity index 92% rename from src/test/basics/FeeUnits_test.cpp rename to src/test/basics/Units_test.cpp index f9be632644..33dce42abb 100644 --- a/src/test/basics/FeeUnits_test.cpp +++ b/src/test/basics/Units_test.cpp @@ -17,13 +17,13 @@ */ #include -#include #include +#include namespace ripple { namespace test { -class feeunits_test : public beast::unit_test::suite +class units_test : public beast::unit_test::suite { private: void @@ -35,16 +35,16 @@ private: XRPAmount x{100}; BEAST_EXPECT(x.drops() == 100); BEAST_EXPECT( - (std::is_same_v)); + (std::is_same_v)); auto y = 4u * x; BEAST_EXPECT(y.value() == 400); BEAST_EXPECT( - (std::is_same_v)); + (std::is_same_v)); auto z = 4 * y; BEAST_EXPECT(z.value() == 1600); BEAST_EXPECT( - (std::is_same_v)); + (std::is_same_v)); FeeLevel32 f{10}; FeeLevel32 baseFee{100}; @@ -55,7 +55,7 @@ private: BEAST_EXPECT(drops.value() == 1000); BEAST_EXPECT((std::is_same_v< std::remove_reference_t::unit_type, - feeunit::dropTag>)); + unit::dropTag>)); BEAST_EXPECT((std::is_same_v< std::remove_reference_t, @@ -65,11 +65,11 @@ private: XRPAmount x{100}; BEAST_EXPECT(x.value() == 100); BEAST_EXPECT( - (std::is_same_v)); + (std::is_same_v)); auto y = 4u * x; BEAST_EXPECT(y.value() == 400); BEAST_EXPECT( - (std::is_same_v)); + (std::is_same_v)); FeeLevel64 f{10}; FeeLevel64 baseFee{100}; @@ -80,7 +80,7 @@ private: BEAST_EXPECT(drops.value() == 1000); BEAST_EXPECT((std::is_same_v< std::remove_reference_t::unit_type, - feeunit::dropTag>)); + unit::dropTag>)); BEAST_EXPECT((std::is_same_v< std::remove_reference_t, XRPAmount>)); @@ -89,12 +89,12 @@ private: FeeLevel64 x{1024}; BEAST_EXPECT(x.value() == 1024); BEAST_EXPECT( - (std::is_same_v)); + (std::is_same_v)); std::uint64_t m = 4; auto y = m * x; BEAST_EXPECT(y.value() == 4096); BEAST_EXPECT( - (std::is_same_v)); + (std::is_same_v)); XRPAmount basefee{10}; FeeLevel64 referencefee{256}; @@ -105,7 +105,7 @@ private: BEAST_EXPECT(drops.value() == 40); BEAST_EXPECT((std::is_same_v< std::remove_reference_t::unit_type, - feeunit::dropTag>)); + unit::dropTag>)); BEAST_EXPECT((std::is_same_v< std::remove_reference_t, XRPAmount>)); @@ -181,7 +181,7 @@ private: void testFunctions() { - // Explicitly test every defined function for the TaggedFee class + // Explicitly test every defined function for the ValueUnit class // since some of them are templated, but not used anywhere else. using FeeLevel32 = FeeLevel; @@ -191,8 +191,8 @@ private: return FeeLevel64{x}; }; + [[maybe_unused]] FeeLevel64 defaulted; - (void)defaulted; FeeLevel64 test{0}; BEAST_EXPECT(test.fee() == 0); @@ -278,8 +278,8 @@ private: return FeeLevelDouble{x}; }; + [[maybe_unused]] FeeLevelDouble defaulted; - (void)defaulted; FeeLevelDouble test{0}; BEAST_EXPECT(test.fee() == 0); @@ -371,7 +371,7 @@ public: } }; -BEAST_DEFINE_TESTSUITE(feeunits, basics, ripple); +BEAST_DEFINE_TESTSUITE(units, basics, ripple); } // namespace test } // namespace ripple diff --git a/src/test/basics/base_uint_test.cpp b/src/test/basics/base_uint_test.cpp index 6ee9f0901a..458032d203 100644 --- a/src/test/basics/base_uint_test.cpp +++ b/src/test/basics/base_uint_test.cpp @@ -152,6 +152,7 @@ struct base_uint_test : beast::unit_test::suite uset.insert(u); BEAST_EXPECT(raw.size() == u.size()); BEAST_EXPECT(to_string(u) == "0102030405060708090A0B0C"); + BEAST_EXPECT(to_short_string(u) == "01020304..."); BEAST_EXPECT(*u.data() == 1); BEAST_EXPECT(u.signum() == 1); BEAST_EXPECT(!!u); @@ -174,6 +175,7 @@ struct base_uint_test : beast::unit_test::suite test96 v{~u}; uset.insert(v); BEAST_EXPECT(to_string(v) == "FEFDFCFBFAF9F8F7F6F5F4F3"); + BEAST_EXPECT(to_short_string(v) == "FEFDFCFB..."); BEAST_EXPECT(*v.data() == 0xfe); BEAST_EXPECT(v.signum() == 1); BEAST_EXPECT(!!v); @@ -194,6 +196,7 @@ struct base_uint_test : beast::unit_test::suite test96 z{beast::zero}; uset.insert(z); BEAST_EXPECT(to_string(z) == "000000000000000000000000"); + BEAST_EXPECT(to_short_string(z) == "00000000..."); BEAST_EXPECT(*z.data() == 0); BEAST_EXPECT(*z.begin() == 0); BEAST_EXPECT(*std::prev(z.end(), 1) == 0); @@ -214,6 +217,7 @@ struct base_uint_test : beast::unit_test::suite BEAST_EXPECT(n == z); n--; BEAST_EXPECT(to_string(n) == "FFFFFFFFFFFFFFFFFFFFFFFF"); + BEAST_EXPECT(to_short_string(n) == "FFFFFFFF..."); n = beast::zero; BEAST_EXPECT(n == z); @@ -224,6 +228,7 @@ struct base_uint_test : beast::unit_test::suite test96 x{zm1 ^ zp1}; uset.insert(x); BEAST_EXPECTS(to_string(x) == "FFFFFFFFFFFFFFFFFFFFFFFE", to_string(x)); + BEAST_EXPECTS(to_short_string(x) == "FFFFFFFF...", to_short_string(x)); BEAST_EXPECT(uset.size() == 4); diff --git a/src/test/core/Config_test.cpp b/src/test/core/Config_test.cpp index 8b0fce1e20..a1a6a079cc 100644 --- a/src/test/core/Config_test.cpp +++ b/src/test/core/Config_test.cpp @@ -128,7 +128,7 @@ backend=sqlite /** Write a rippled config file and remove when done. */ -class RippledCfgGuard : public ripple::test::detail::FileDirGuard +class RippledCfgGuard : public ripple::detail::FileDirGuard { private: path dataDir_; @@ -239,7 +239,7 @@ moreripplevalidators.net /** Write a validators.txt file and remove when done. */ -class ValidatorsTxtGuard : public test::detail::FileDirGuard +class ValidatorsTxtGuard : public detail::FileDirGuard { public: ValidatorsTxtGuard( @@ -345,7 +345,7 @@ port_wss_admin { // read from file absolute path auto const cwd = current_path(); - ripple::test::detail::DirGuard const g0(*this, "test_db"); + ripple::detail::DirGuard const g0(*this, "test_db"); path const dataDirRel("test_data_dir"); path const dataDirAbs(cwd / g0.subdir() / dataDirRel); detail::RippledCfgGuard const g( diff --git a/src/test/csf/Digraph.h b/src/test/csf/Digraph.h index 3f079eac17..e65a7af913 100644 --- a/src/test/csf/Digraph.h +++ b/src/test/csf/Digraph.h @@ -30,9 +30,6 @@ #include namespace ripple { -namespace test { -namespace csf { - namespace detail { // Dummy class when no edge data needed for graph struct NoEdgeData @@ -41,6 +38,9 @@ struct NoEdgeData } // namespace detail +namespace test { +namespace csf { + /** Directed graph Basic directed graph that uses an adjacency list to represent out edges. diff --git a/src/test/jtx/Env.h b/src/test/jtx/Env.h index 21a239e3d7..68d8d3e53f 100644 --- a/src/test/jtx/Env.h +++ b/src/test/jtx/Env.h @@ -469,6 +469,9 @@ public: Returns 0 if the trust line does not exist. */ // VFALCO NOTE This should return a unit-less amount + PrettyAmount + balance(Account const& account, Asset const& asset) const; + PrettyAmount balance(Account const& account, Issue const& issue) const; diff --git a/src/test/jtx/TestHelpers.h b/src/test/jtx/TestHelpers.h index d4a39b6498..7d14f23c92 100644 --- a/src/test/jtx/TestHelpers.h +++ b/src/test/jtx/TestHelpers.h @@ -27,7 +27,9 @@ #include #include #include +#include #include +#include #include #include @@ -44,6 +46,252 @@ namespace ripple { namespace test { namespace jtx { +/** Generic helper class for helper clases that set a field on a JTx. + + Not every helper will be able to use this because of conversions and other + issues, but for classes where it's straightforward, this can simplify things. +*/ +template < + class SField, + class StoredValue = typename SField::type::value_type, + class OutputValue = StoredValue> +struct JTxField +{ + using SF = SField; + using SV = StoredValue; + using OV = OutputValue; + +protected: + SF const& sfield_; + SV value_; + +public: + explicit JTxField(SF const& sfield, SV const& value) + : sfield_(sfield), value_(value) + { + } + + virtual ~JTxField() = default; + + virtual OV + value() const = 0; + + virtual void + operator()(Env&, JTx& jt) const + { + jt.jv[sfield_.jsonName] = value(); + } +}; + +template +struct JTxField +{ + using SF = SField; + using SV = StoredValue; + using OV = SV; + +protected: + SF const& sfield_; + SV value_; + +public: + explicit JTxField(SF const& sfield, SV const& value) + : sfield_(sfield), value_(value) + { + } + + void + operator()(Env&, JTx& jt) const + { + jt.jv[sfield_.jsonName] = value_; + } +}; + +struct timePointField + : public JTxField +{ + using SF = SF_UINT32; + using SV = NetClock::time_point; + using OV = NetClock::rep; + using base = JTxField; + +protected: + using base::value_; + +public: + explicit timePointField(SF const& sfield, SV const& value) + : JTxField(sfield, value) + { + } + + OV + value() const override + { + return value_.time_since_epoch().count(); + } +}; + +struct uint256Field : public JTxField +{ + using SF = SF_UINT256; + using SV = uint256; + using OV = std::string; + using base = JTxField; + +protected: + using base::value_; + +public: + explicit uint256Field(SF const& sfield, SV const& value) + : JTxField(sfield, value) + { + } + + OV + value() const override + { + return to_string(value_); + } +}; + +struct accountIDField : public JTxField +{ + using SF = SF_ACCOUNT; + using SV = AccountID; + using OV = std::string; + using base = JTxField; + +protected: + using base::value_; + +public: + explicit accountIDField(SF const& sfield, SV const& value) + : JTxField(sfield, value) + { + } + + OV + value() const override + { + return toBase58(value_); + } +}; + +struct blobField : public JTxField +{ + using SF = SF_VL; + using SV = std::string; + using base = JTxField; + + using JTxField::JTxField; + + explicit blobField(SF const& sfield, Slice const& cond) + : JTxField(sfield, strHex(cond)) + { + } + + template + explicit blobField(SF const& sfield, std::array const& c) + : blobField(sfield, makeSlice(c)) + { + } +}; + +template +struct valueUnitField + : public JTxField, ValueType> +{ + using SF = SField; + using SV = unit::ValueUnit; + using OV = ValueType; + using base = JTxField; + + static_assert(std::is_same_v); + +protected: + using base::value_; + +public: + using JTxField::JTxField; + + OV + value() const override + { + return value_.value(); + } +}; + +template +struct JTxFieldWrapper +{ + using JF = JTxField; + using SF = typename JF::SF; + using SV = typename JF::SV; + +protected: + SF const& sfield_; + +public: + explicit JTxFieldWrapper(SF const& sfield) : sfield_(sfield) + { + } + + JF + operator()(SV const& value) const + { + return JTxField(sfield_, value); + } +}; + +template <> +struct JTxFieldWrapper +{ + using JF = blobField; + using SF = JF::SF; + using SV = JF::SV; + +protected: + SF const& sfield_; + +public: + explicit JTxFieldWrapper(SF const& sfield) : sfield_(sfield) + { + } + + JF + operator()(SV const& cond) const + { + return JF(sfield_, makeSlice(cond)); + } + + JF + operator()(Slice const& cond) const + { + return JF(sfield_, cond); + } + + template + JF + operator()(std::array const& c) const + { + return operator()(makeSlice(c)); + } +}; + +template < + class SField, + class UnitTag, + class ValueType = typename SField::type::value_type> +using valueUnitWrapper = + JTxFieldWrapper>; + +template +using simpleField = JTxFieldWrapper>; + +/** General field definitions, or fields used in multiple transaction namespaces + */ +auto const data = JTxFieldWrapper(sfData); + // TODO We only need this long "requires" clause as polyfill, for C++20 // implementations which are missing header. Replace with // `std::ranges::range`, and accordingly use std::ranges::begin/end @@ -111,6 +359,25 @@ checkArraySize(Json::Value const& val, unsigned int size); std::uint32_t ownerCount(test::jtx::Env const& env, test::jtx::Account const& account); +[[nodiscard]] +inline bool +checkVL(Slice const& result, std::string const& expected) +{ + Serializer s; + s.addRaw(result); + return s.getString() == expected; +} + +[[nodiscard]] +inline bool +checkVL( + std::shared_ptr const& sle, + SField const& field, + std::string const& expected) +{ + return strHex(expected) == strHex(sle->getFieldVL(field)); +} + /* Path finding */ /******************************************************************************/ void @@ -186,7 +453,7 @@ PrettyAmount xrpMinusFee(Env const& env, std::int64_t xrpAmount); bool -expectLine( +expectHolding( Env& env, AccountID const& account, STAmount const& value, @@ -194,18 +461,18 @@ expectLine( template bool -expectLine( +expectHolding( Env& env, AccountID const& account, STAmount const& value, Amts const&... amts) { - return expectLine(env, account, value, false) && - expectLine(env, account, amts...); + return expectHolding(env, account, value, false) && + expectHolding(env, account, amts...); } bool -expectLine(Env& env, AccountID const& account, None const& value); +expectHolding(Env& env, AccountID const& account, None const& value); bool expectOffers( diff --git a/src/test/jtx/amount.h b/src/test/jtx/amount.h index 344a2ab73c..a793f3a287 100644 --- a/src/test/jtx/amount.h +++ b/src/test/jtx/amount.h @@ -24,9 +24,9 @@ #include #include -#include #include #include +#include #include #include @@ -34,6 +34,15 @@ #include namespace ripple { +namespace detail { + +struct epsilon_multiple +{ + std::size_t n; +}; + +} // namespace detail + namespace test { namespace jtx { @@ -57,7 +66,7 @@ struct AnyAmount; // struct None { - Issue issue; + Asset asset; }; //------------------------------------------------------------------------------ @@ -133,6 +142,12 @@ public: return amount_; } + inline int + signum() const + { + return amount_.signum(); + } + operator STAmount const&() const { return amount_; @@ -165,17 +180,17 @@ struct PrettyAsset { private: Asset asset_; - unsigned int scale_; + std::uint32_t scale_; public: template requires std::convertible_to - PrettyAsset(A const& asset, unsigned int scale = 1) + PrettyAsset(A const& asset, std::uint32_t scale = 1) : PrettyAsset{Asset{asset}, scale} { } - PrettyAsset(Asset const& asset, unsigned int scale = 1) + PrettyAsset(Asset const& asset, std::uint32_t scale = 1) : asset_(asset), scale_(scale) { } @@ -199,10 +214,22 @@ public: template PrettyAmount operator()(T v) const + { + return operator()(Number(v)); + } + + PrettyAmount + operator()(Number v) const { STAmount amount{asset_, v * scale_}; return {amount, ""}; } + + None + operator()(none_t) const + { + return {asset_}; + } }; //------------------------------------------------------------------------------ @@ -312,15 +339,6 @@ drops(XRPAmount i) //------------------------------------------------------------------------------ -namespace detail { - -struct epsilon_multiple -{ - std::size_t n; -}; - -} // namespace detail - // The smallest possible IOU STAmount struct epsilon_t { @@ -360,6 +378,11 @@ public: { return {currency, account.id()}; } + Asset + asset() const + { + return issue(); + } /** Implicit conversion to Issue or Asset. @@ -370,9 +393,9 @@ public: { return issue(); } - operator Asset() const + operator PrettyAsset() const { - return issue(); + return asset(); } template < @@ -438,14 +461,32 @@ public: return issuanceID; } - /** Implicit conversion to MPTIssue. + /** Explicit conversion to MPTIssue or asset. + */ + ripple::MPTIssue + mptIssue() const + { + return MPTIssue{issuanceID}; + } + Asset + asset() const + { + return mptIssue(); + } + + /** Implicit conversion to MPTIssue or asset. This allows passing an MPT value where an MPTIssue is expected. */ operator ripple::MPTIssue() const { - return MPTIssue{issuanceID}; + return mptIssue(); + } + + operator PrettyAsset() const + { + return asset(); } template @@ -461,6 +502,13 @@ public: PrettyAmount operator()(detail::epsilon_multiple) const; + /** Returns None-of-Issue */ + None + operator()(none_t) const + { + return {mptIssue()}; + } + friend BookSpec operator~(MPT const& mpt) { diff --git a/src/test/jtx/balance.h b/src/test/jtx/balance.h index 3a2cf0423f..0c4a6cca1d 100644 --- a/src/test/jtx/balance.h +++ b/src/test/jtx/balance.h @@ -38,9 +38,9 @@ namespace jtx { class balance { private: - bool none_; - Account account_; - STAmount value_; + bool const none_; + Account const account_; + STAmount const value_; public: balance(Account const& account, none_t) @@ -49,7 +49,7 @@ public: } balance(Account const& account, None const& value) - : none_(true), account_(account), value_(value.issue) + : none_(true), account_(account), value_(value.asset) { } diff --git a/src/test/jtx/escrow.h b/src/test/jtx/escrow.h index 3147b44c65..483db578b0 100644 --- a/src/test/jtx/escrow.h +++ b/src/test/jtx/escrow.h @@ -22,6 +22,7 @@ #include #include +#include #include #include @@ -94,86 +95,14 @@ std::array const cb3 = { 0x57, 0x0D, 0x15, 0x85, 0x8B, 0xD4, 0x81, 0x01, 0x04}}; /** Set the "FinishAfter" time tag on a JTx */ -struct finish_time -{ -private: - NetClock::time_point value_; - -public: - explicit finish_time(NetClock::time_point const& value) : value_(value) - { - } - - void - operator()(Env&, JTx& jt) const - { - jt.jv[sfFinishAfter.jsonName] = value_.time_since_epoch().count(); - } -}; +auto const finish_time = JTxFieldWrapper(sfFinishAfter); /** Set the "CancelAfter" time tag on a JTx */ -struct cancel_time -{ -private: - NetClock::time_point value_; +auto const cancel_time = JTxFieldWrapper(sfCancelAfter); -public: - explicit cancel_time(NetClock::time_point const& value) : value_(value) - { - } +auto const condition = JTxFieldWrapper(sfCondition); - void - operator()(jtx::Env&, jtx::JTx& jt) const - { - jt.jv[sfCancelAfter.jsonName] = value_.time_since_epoch().count(); - } -}; - -struct condition -{ -private: - std::string value_; - -public: - explicit condition(Slice const& cond) : value_(strHex(cond)) - { - } - - template - explicit condition(std::array const& c) - : condition(makeSlice(c)) - { - } - - void - operator()(Env&, JTx& jt) const - { - jt.jv[sfCondition.jsonName] = value_; - } -}; - -struct fulfillment -{ -private: - std::string value_; - -public: - explicit fulfillment(Slice condition) : value_(strHex(condition)) - { - } - - template - explicit fulfillment(std::array f) - : fulfillment(makeSlice(f)) - { - } - - void - operator()(Env&, JTx& jt) const - { - jt.jv[sfFulfillment.jsonName] = value_; - } -}; +auto const fulfillment = JTxFieldWrapper(sfFulfillment); } // namespace escrow diff --git a/src/test/jtx/fee.h b/src/test/jtx/fee.h index 7d54804f87..3e3740b80d 100644 --- a/src/test/jtx/fee.h +++ b/src/test/jtx/fee.h @@ -37,6 +37,7 @@ class fee { private: bool manual_ = true; + bool increment_ = false; std::optional amount_; public: @@ -44,6 +45,10 @@ public: { } + explicit fee(increment_t) : increment_(true) + { + } + explicit fee(none_t) { } diff --git a/src/test/jtx/flags.h b/src/test/jtx/flags.h index aa048c3e55..8d3fa4f25c 100644 --- a/src/test/jtx/flags.h +++ b/src/test/jtx/flags.h @@ -27,22 +27,6 @@ #include namespace ripple { -namespace test { -namespace jtx { - -// JSON generators - -/** Add and/or remove flag. */ -Json::Value -fset(Account const& account, std::uint32_t on, std::uint32_t off = 0); - -/** Remove account flag. */ -inline Json::Value -fclear(Account const& account, std::uint32_t off) -{ - return fset(account, 0, off); -} - namespace detail { class flags_helper @@ -123,6 +107,22 @@ protected: } // namespace detail +namespace test { +namespace jtx { + +// JSON generators + +/** Add and/or remove flag. */ +Json::Value +fset(Account const& account, std::uint32_t on, std::uint32_t off = 0); + +/** Remove account flag. */ +inline Json::Value +fclear(Account const& account, std::uint32_t off) +{ + return fset(account, 0, off); +} + /** Match set account flags */ class flags : private detail::flags_helper { diff --git a/src/test/jtx/impl/Env.cpp b/src/test/jtx/impl/Env.cpp index d6956b30c7..ae99e1b5d6 100644 --- a/src/test/jtx/impl/Env.cpp +++ b/src/test/jtx/impl/Env.cpp @@ -218,7 +218,9 @@ Env::balance(Account const& account, MPTIssue const& mptIssue) const if (!sle) return {STAmount(mptIssue, 0), account.name()}; - STAmount const amount{mptIssue, sle->getFieldU64(sfOutstandingAmount)}; + // Make it negative + STAmount const amount{ + mptIssue, sle->getFieldU64(sfOutstandingAmount), 0, true}; return {amount, lookup(issuer).name()}; } else @@ -233,6 +235,14 @@ Env::balance(Account const& account, MPTIssue const& mptIssue) const } } +PrettyAmount +Env::balance(Account const& account, Asset const& asset) const +{ + return std::visit( + [&](auto const& issue) { return balance(account, issue); }, + asset.value()); +} + PrettyAmount Env::limit(Account const& account, Issue const& issue) const { diff --git a/src/test/jtx/impl/TestHelpers.cpp b/src/test/jtx/impl/TestHelpers.cpp index 5f8c53877a..20f24f0d84 100644 --- a/src/test/jtx/impl/TestHelpers.cpp +++ b/src/test/jtx/impl/TestHelpers.cpp @@ -103,7 +103,7 @@ xrpMinusFee(Env const& env, std::int64_t xrpAmount) }; [[nodiscard]] bool -expectLine( +expectHolding( Env& env, AccountID const& account, STAmount const& value, @@ -137,9 +137,33 @@ expectLine( } [[nodiscard]] bool -expectLine(Env& env, AccountID const& account, None const& value) +expectHolding( + Env& env, + AccountID const& account, + None const&, + Issue const& issue) { - return !env.le(keylet::line(account, value.issue)); + return !env.le(keylet::line(account, issue)); +} + +[[nodiscard]] bool +expectHolding( + Env& env, + AccountID const& account, + None const&, + MPTIssue const& mptIssue) +{ + return !env.le(keylet::mptoken(mptIssue.getMptID(), account)); +} + +[[nodiscard]] bool +expectHolding(Env& env, AccountID const& account, None const& value) +{ + return std::visit( + [&](auto const& issue) { + return expectHolding(env, account, value, issue); + }, + value.asset.value()); } [[nodiscard]] bool diff --git a/src/test/jtx/impl/amount.cpp b/src/test/jtx/impl/amount.cpp index a1dbd25652..a04a6c87d2 100644 --- a/src/test/jtx/impl/amount.cpp +++ b/src/test/jtx/impl/amount.cpp @@ -91,12 +91,18 @@ operator<<(std::ostream& os, PrettyAmount const& amount) os << to_places(d, 6) << " XRP"; } - else + else if (amount.value().holds()) { os << amount.value().getText() << "/" << to_string(amount.value().issue().currency) << "(" << amount.name() << ")"; } + else + { + auto const& mptIssue = amount.value().asset().get(); + os << amount.value().getText() << "/" << to_string(mptIssue) << "(" + << amount.name() << ")"; + } return os; } diff --git a/src/test/jtx/impl/balance.cpp b/src/test/jtx/impl/balance.cpp index 42330658eb..decbd816e1 100644 --- a/src/test/jtx/impl/balance.cpp +++ b/src/test/jtx/impl/balance.cpp @@ -24,38 +24,73 @@ namespace test { namespace jtx { void -balance::operator()(Env& env) const +doBalance( + Env& env, + AccountID const& account, + bool none, + STAmount const& value, + Issue const& issue) { - if (isXRP(value_.issue())) + if (isXRP(issue)) { - auto const sle = env.le(account_); - if (none_) + auto const sle = env.le(keylet::account(account)); + if (none) { env.test.expect(!sle); } else if (env.test.expect(sle)) { - env.test.expect(sle->getFieldAmount(sfBalance) == value_); + env.test.expect(sle->getFieldAmount(sfBalance) == value); } } else { - auto const sle = env.le(keylet::line(account_.id(), value_.issue())); - if (none_) + auto const sle = env.le(keylet::line(account, issue)); + if (none) { env.test.expect(!sle); } else if (env.test.expect(sle)) { auto amount = sle->getFieldAmount(sfBalance); - amount.setIssuer(value_.issue().account); - if (account_.id() > value_.issue().account) + amount.setIssuer(issue.account); + if (account > issue.account) amount.negate(); - env.test.expect(amount == value_); + env.test.expect(amount == value); } } } +void +doBalance( + Env& env, + AccountID const& account, + bool none, + STAmount const& value, + MPTIssue const& mptIssue) +{ + auto const sle = env.le(keylet::mptoken(mptIssue.getMptID(), account)); + if (none) + { + env.test.expect(!sle); + } + else if (env.test.expect(sle)) + { + STAmount const amount{mptIssue, sle->getFieldU64(sfMPTAmount)}; + env.test.expect(amount == value); + } +} + +void +balance::operator()(Env& env) const +{ + return std::visit( + [&](auto const& issue) { + doBalance(env, account_.id(), none_, value_, issue); + }, + value_.asset().value()); +} + } // namespace jtx } // namespace test } // namespace ripple diff --git a/src/test/jtx/impl/fee.cpp b/src/test/jtx/impl/fee.cpp index 71e3dd089a..a58105a85f 100644 --- a/src/test/jtx/impl/fee.cpp +++ b/src/test/jtx/impl/fee.cpp @@ -26,13 +26,16 @@ namespace test { namespace jtx { void -fee::operator()(Env&, JTx& jt) const +fee::operator()(Env& env, JTx& jt) const { if (!manual_) return; jt.fill_fee = false; - if (amount_) - jt[jss::Fee] = amount_->getJson(JsonOptions::none); + assert(!increment_ || !amount_); + if (increment_) + jt[sfFee] = STAmount(env.current()->fees().increment).getJson(); + else if (amount_) + jt[sfFee] = amount_->getJson(JsonOptions::none); } } // namespace jtx diff --git a/src/test/jtx/impl/owners.cpp b/src/test/jtx/impl/owners.cpp index 386ec29a37..b55986fccb 100644 --- a/src/test/jtx/impl/owners.cpp +++ b/src/test/jtx/impl/owners.cpp @@ -20,9 +20,6 @@ #include namespace ripple { -namespace test { -namespace jtx { - namespace detail { std::uint32_t @@ -39,7 +36,7 @@ owned_count_of(ReadView const& view, AccountID const& id, LedgerEntryType type) void owned_count_helper( - Env& env, + test::jtx::Env& env, AccountID const& id, LedgerEntryType type, std::uint32_t value) @@ -49,6 +46,9 @@ owned_count_helper( } // namespace detail +namespace test { +namespace jtx { + void owners::operator()(Env& env) const { diff --git a/src/test/jtx/owners.h b/src/test/jtx/owners.h index 9b6f6a6df5..baf5d9da7d 100644 --- a/src/test/jtx/owners.h +++ b/src/test/jtx/owners.h @@ -29,8 +29,6 @@ #include namespace ripple { -namespace test { -namespace jtx { namespace detail { @@ -39,13 +37,16 @@ owned_count_of(ReadView const& view, AccountID const& id, LedgerEntryType type); void owned_count_helper( - Env& env, + test::jtx::Env& env, AccountID const& id, LedgerEntryType type, std::uint32_t value); } // namespace detail +namespace test { +namespace jtx { + // Helper for aliases template class owner_count diff --git a/src/test/jtx/require.h b/src/test/jtx/require.h index bec21235a6..3215ac0abb 100644 --- a/src/test/jtx/require.h +++ b/src/test/jtx/require.h @@ -26,14 +26,12 @@ #include namespace ripple { -namespace test { -namespace jtx { namespace detail { template inline void -require_args(requires_t& vec, Cond const& cond, Args const&... args) +require_args(test::jtx::requires_t& vec, Cond const& cond, Args const&... args) { vec.push_back(cond); if constexpr (sizeof...(args) > 0) @@ -42,6 +40,9 @@ require_args(requires_t& vec, Cond const& cond, Args const&... args) } // namespace detail +namespace test { +namespace jtx { + /** Compose many condition functors into one */ template require_t diff --git a/src/test/jtx/tags.h b/src/test/jtx/tags.h index bb64295f05..4d55929d69 100644 --- a/src/test/jtx/tags.h +++ b/src/test/jtx/tags.h @@ -49,6 +49,16 @@ struct disabled_t }; static disabled_t const disabled; +/** Used for fee() calls that use an owner reserve increment */ +struct increment_t +{ + increment_t() + { + } +}; + +static increment_t const increment; + } // namespace jtx } // namespace test diff --git a/src/test/nodestore/import_test.cpp b/src/test/nodestore/import_test.cpp index 11009ec5be..ea5f23548a 100644 --- a/src/test/nodestore/import_test.cpp +++ b/src/test/nodestore/import_test.cpp @@ -61,7 +61,6 @@ multi(32gb): */ namespace ripple { -namespace NodeStore { namespace detail { @@ -191,6 +190,8 @@ fmtdur(std::chrono::duration const& d) } // namespace detail +namespace NodeStore { + //------------------------------------------------------------------------------ class progress diff --git a/src/test/unit_test/FileDirGuard.h b/src/test/unit_test/FileDirGuard.h index 091bc80d20..d3cabc2092 100644 --- a/src/test/unit_test/FileDirGuard.h +++ b/src/test/unit_test/FileDirGuard.h @@ -29,7 +29,6 @@ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. #include namespace ripple { -namespace test { namespace detail { /** @@ -178,7 +177,6 @@ public: }; } // namespace detail -} // namespace test } // namespace ripple #endif // TEST_UNIT_TEST_DIRGUARD_H diff --git a/src/test/unit_test/multi_runner.cpp b/src/test/unit_test/multi_runner.cpp index 087e37dac2..3755428ee3 100644 --- a/src/test/unit_test/multi_runner.cpp +++ b/src/test/unit_test/multi_runner.cpp @@ -30,7 +30,6 @@ #include namespace ripple { -namespace test { namespace detail { @@ -388,6 +387,8 @@ multi_runner_base::add_failures(std::size_t failures) } // namespace detail +namespace test { + //------------------------------------------------------------------------------ multi_runner_parent::multi_runner_parent() : os_(std::cout) @@ -645,10 +646,11 @@ multi_runner_child::on_log(std::string const& msg) message_queue_send(MessageType::log, s.str()); } +} // namespace test + namespace detail { template class multi_runner_base; template class multi_runner_base; } // namespace detail -} // namespace test } // namespace ripple diff --git a/src/test/unit_test/multi_runner.h b/src/test/unit_test/multi_runner.h index 08512d1882..bce62fb131 100644 --- a/src/test/unit_test/multi_runner.h +++ b/src/test/unit_test/multi_runner.h @@ -40,7 +40,6 @@ #include namespace ripple { -namespace test { namespace detail { @@ -212,6 +211,8 @@ public: } // namespace detail +namespace test { + //------------------------------------------------------------------------------ /** Manager for children running unit tests diff --git a/src/xrpld/app/main/Application.cpp b/src/xrpld/app/main/Application.cpp index beaf85ce2e..05b8f5e5fa 100644 --- a/src/xrpld/app/main/Application.cpp +++ b/src/xrpld/app/main/Application.cpp @@ -304,8 +304,8 @@ public: static_cast(std::thread::hardware_concurrency()); // Be more aggressive about the number of threads to use - // for the job queue if the server is configured as "large" - // or "huge" if there are enough cores. + // for the job queue if the server is configured as + // "large" or "huge" if there are enough cores. if (config->NODE_SIZE >= 4 && count >= 16) count = 6 + std::min(count, 8); else if (config->NODE_SIZE >= 3 && count >= 8) diff --git a/src/xrpld/app/misc/detail/LoadFeeTrack.cpp b/src/xrpld/app/misc/detail/LoadFeeTrack.cpp index 776e9fa50b..4728df9272 100644 --- a/src/xrpld/app/misc/detail/LoadFeeTrack.cpp +++ b/src/xrpld/app/misc/detail/LoadFeeTrack.cpp @@ -22,8 +22,7 @@ #include #include #include -#include -#include +#include #include diff --git a/src/xrpld/app/tx/detail/DeleteAccount.cpp b/src/xrpld/app/tx/detail/DeleteAccount.cpp index deb1743991..02f84adcc3 100644 --- a/src/xrpld/app/tx/detail/DeleteAccount.cpp +++ b/src/xrpld/app/tx/detail/DeleteAccount.cpp @@ -31,10 +31,10 @@ #include #include #include -#include #include #include #include +#include namespace ripple { diff --git a/src/xrpld/app/tx/detail/Escrow.cpp b/src/xrpld/app/tx/detail/Escrow.cpp index ace7437098..f1d1db79a0 100644 --- a/src/xrpld/app/tx/detail/Escrow.cpp +++ b/src/xrpld/app/tx/detail/Escrow.cpp @@ -81,8 +81,8 @@ constexpr HashRouterFlags SF_CF_VALID = HashRouterFlags::PRIVATE6; TxConsequences EscrowCreate::makeTxConsequences(PreflightContext const& ctx) { - return TxConsequences{ - ctx.tx, isXRP(ctx.tx[sfAmount]) ? ctx.tx[sfAmount].xrp() : beast::zero}; + auto const amount = ctx.tx[sfAmount]; + return TxConsequences{ctx.tx, isXRP(amount) ? amount.xrp() : beast::zero}; } template diff --git a/src/xrpld/app/tx/detail/InvariantCheck.cpp b/src/xrpld/app/tx/detail/InvariantCheck.cpp index f20a49366b..87ca9ea6c1 100644 --- a/src/xrpld/app/tx/detail/InvariantCheck.cpp +++ b/src/xrpld/app/tx/detail/InvariantCheck.cpp @@ -28,14 +28,88 @@ #include #include #include -#include #include +#include #include #include +#include #include namespace ripple { +/* +assert(enforce) + +There are several asserts (or XRPL_ASSERTs) in this file that check a variable +named `enforce` when an invariant fails. At first glance, those asserts may look +incorrect, but they are not. + +Those asserts take advantage of two facts: +1. `asserts` are not (normally) executed in release builds. +2. Invariants should *never* fail, except in tests that specifically modify + the open ledger to break them. + +This makes `assert(enforce)` sort of a second-layer of invariant enforcement +aimed at _developers_. It's designed to fire if a developer writes code that +violates an invariant, and runs it in unit tests or a develop build that _does +not have the relevant amendments enabled_. It's intentionally a pain in the neck +so that bad code gets caught and fixed as early as possible. +*/ + +enum Privilege { + noPriv = + 0x0000, // The transaction can not do any of the enumerated operations + createAcct = + 0x0001, // The transaction can create a new ACCOUNT_ROOT object. + createPseudoAcct = 0x0002, // The transaction can create a pseudo account, + // which implies createAcct + mustDeleteAcct = + 0x0004, // The transaction must delete an ACCOUNT_ROOT object + mayDeleteAcct = 0x0008, // The transaction may delete an ACCOUNT_ROOT + // object, but does not have to + overrideFreeze = 0x0010, // The transaction can override some freeze rules + changeNFTCounts = 0x0020, // The transaction can mint or burn an NFT + createMPTIssuance = + 0x0040, // The transaction can create a new MPT issuance + destroyMPTIssuance = 0x0080, // The transaction can destroy an MPT issuance + mustAuthorizeMPT = 0x0100, // The transaction MUST create or delete an MPT + // object (except by issuer) + mayAuthorizeMPT = 0x0200, // The transaction MAY create or delete an MPT + // object (except by issuer) + mayDeleteMPT = + 0x0400, // The transaction MAY delete an MPT object. May not create. +}; +constexpr Privilege +operator|(Privilege lhs, Privilege rhs) +{ + return safe_cast( + safe_cast>(lhs) | + safe_cast>(rhs)); +} + +#pragma push_macro("TRANSACTION") +#undef TRANSACTION + +#define TRANSACTION(tag, value, name, delegatable, amendment, privileges, ...) \ + case tag: { \ + return (privileges) & priv; \ + } + +bool +hasPrivilege(STTx const& tx, Privilege priv) +{ + switch (tx.getTxnType()) + { +#include + // Deprecated types + default: + return false; + } +}; + +#undef TRANSACTION +#pragma pop_macro("TRANSACTION") + void TransactionFeeCheck::visitEntry( bool, @@ -379,10 +453,7 @@ AccountRootsNotDeleted::finalize( // transaction when the total AMM LP Tokens balance goes to 0. // A successful AccountDelete or AMMDelete MUST delete exactly // one account root. - if ((tx.getTxnType() == ttACCOUNT_DELETE || - tx.getTxnType() == ttAMM_DELETE || - tx.getTxnType() == ttVAULT_DELETE) && - result == tesSUCCESS) + if (hasPrivilege(tx, mustDeleteAcct) && result == tesSUCCESS) { if (accountsDeleted_ == 1) return true; @@ -399,9 +470,8 @@ AccountRootsNotDeleted::finalize( // A successful AMMWithdraw/AMMClawback MAY delete one account root // when the total AMM LP Tokens balance goes to 0. Not every AMM withdraw // deletes the AMM account, accountsDeleted_ is set if it is deleted. - if ((tx.getTxnType() == ttAMM_WITHDRAW || - tx.getTxnType() == ttAMM_CLAWBACK) && - result == tesSUCCESS && accountsDeleted_ == 1) + if (hasPrivilege(tx, mayDeleteAcct) && result == tesSUCCESS && + accountsDeleted_ == 1) return true; if (accountsDeleted_ == 0) @@ -436,7 +506,8 @@ AccountRootsDeletedClean::finalize( // feature is enabled. Enabled, or not, though, a fatal-level message will // be logged [[maybe_unused]] bool const enforce = - view.rules().enabled(featureInvariantsV1_1); + view.rules().enabled(featureInvariantsV1_1) || + view.rules().enabled(featureSingleAssetVault); auto const objectExists = [&view, enforce, &j](auto const& keylet) { (void)enforce; @@ -455,6 +526,8 @@ AccountRootsDeletedClean::finalize( JLOG(j.fatal()) << "Invariant failed: account deletion left behind a " << typeName << " object"; + // The comment above starting with "assert(enforce)" explains this + // assert. XRPL_ASSERT( enforce, "ripple::AccountRootsDeletedClean::finalize::objectExists : " @@ -489,11 +562,16 @@ AccountRootsDeletedClean::finalize( return false; } - // Keys directly stored in the AccountRoot object - if (auto const ammKey = accountSLE->at(~sfAMMID)) + // If the account is a pseudo account, then the linked object must + // also be deleted. e.g. AMM, Vault, etc. + for (auto const& field : getPseudoAccountFields()) { - if (objectExists(keylet::amm(*ammKey)) && enforce) - return false; + if (accountSLE->isFieldPresent(*field)) + { + auto const key = accountSLE->getFieldH256(*field); + if (objectExists(keylet::unchecked(key)) && enforce) + return false; + } } } @@ -513,41 +591,23 @@ LedgerEntryTypesMatch::visitEntry( if (after) { +#pragma push_macro("LEDGER_ENTRY") +#undef LEDGER_ENTRY + +#define LEDGER_ENTRY(tag, ...) case tag: + switch (after->getType()) { - case ltACCOUNT_ROOT: - case ltDELEGATE: - case ltDIR_NODE: - case ltRIPPLE_STATE: - case ltTICKET: - case ltSIGNER_LIST: - case ltOFFER: - case ltLEDGER_HASHES: - case ltAMENDMENTS: - case ltFEE_SETTINGS: - case ltESCROW: - case ltPAYCHAN: - case ltCHECK: - case ltDEPOSIT_PREAUTH: - case ltNEGATIVE_UNL: - case ltNFTOKEN_PAGE: - case ltNFTOKEN_OFFER: - case ltAMM: - case ltBRIDGE: - case ltXCHAIN_OWNED_CLAIM_ID: - case ltXCHAIN_OWNED_CREATE_ACCOUNT_CLAIM_ID: - case ltDID: - case ltORACLE: - case ltMPTOKEN_ISSUANCE: - case ltMPTOKEN: - case ltCREDENTIAL: - case ltPERMISSIONED_DOMAIN: - case ltVAULT: - break; +#include + + break; default: invalidTypeAdded_ = true; break; } + +#undef LEDGER_ENTRY +#pragma pop_macro("LEDGER_ENTRY") } } @@ -713,6 +773,8 @@ TransfersNotFrozen::finalize( // just in case so rippled doesn't crash in release. if (!issuerSle) { + // The comment above starting with "assert(enforce)" explains this + // assert. XRPL_ASSERT( enforce, "ripple::TransfersNotFrozen::finalize : enforce " @@ -901,7 +963,7 @@ TransfersNotFrozen::validateFrozenState( } // AMMClawbacks are allowed to override some freeze rules - if ((!isAMMLine || globalFreeze) && tx.getTxnType() == ttAMM_CLAWBACK) + if ((!isAMMLine || globalFreeze) && hasPrivilege(tx, overrideFreeze)) { JLOG(j.debug()) << "Invariant check allowing funds to be moved " << (change.balanceChangeSign > 0 ? "to" : "from") @@ -912,6 +974,7 @@ TransfersNotFrozen::validateFrozenState( JLOG(j.fatal()) << "Invariant failed: Attempting to move frozen funds for " << tx.getTransactionID(); + // The comment above starting with "assert(enforce)" explains this assert. XRPL_ASSERT( enforce, "ripple::TransfersNotFrozen::validateFrozenState : enforce " @@ -961,17 +1024,12 @@ ValidNewAccountRoot::finalize( } // From this point on we know exactly one account was created. - if ((tx.getTxnType() == ttPAYMENT || tx.getTxnType() == ttAMM_CREATE || - tx.getTxnType() == ttVAULT_CREATE || - tx.getTxnType() == ttXCHAIN_ADD_CLAIM_ATTESTATION || - tx.getTxnType() == ttXCHAIN_ADD_ACCOUNT_CREATE_ATTESTATION) && - result == tesSUCCESS) + if (hasPrivilege(tx, createAcct | createPseudoAcct) && result == tesSUCCESS) { bool const pseudoAccount = (pseudoAccount_ && view.rules().enabled(featureSingleAssetVault)); - if (pseudoAccount && tx.getTxnType() != ttAMM_CREATE && - tx.getTxnType() != ttVAULT_CREATE) + if (pseudoAccount && !hasPrivilege(tx, createPseudoAcct)) { JLOG(j.fatal()) << "Invariant failed: pseudo-account created by a " "wrong transaction type"; @@ -1010,7 +1068,7 @@ ValidNewAccountRoot::finalize( JLOG(j.fatal()) << "Invariant failed: account root created illegally"; return false; -} +} // namespace ripple //------------------------------------------------------------------------------ @@ -1205,8 +1263,7 @@ NFTokenCountTracking::finalize( ReadView const& view, beast::Journal const& j) { - if (TxType const txType = tx.getTxnType(); - txType != ttNFTOKEN_MINT && txType != ttNFTOKEN_BURN) + if (!hasPrivilege(tx, changeNFTCounts)) { if (beforeMintedTotal != afterMintedTotal) { @@ -1391,13 +1448,12 @@ ValidMPTIssuance::finalize( STTx const& tx, TER const result, XRPAmount const _fee, - ReadView const& _view, + ReadView const& view, beast::Journal const& j) { if (result == tesSUCCESS) { - if (tx.getTxnType() == ttMPTOKEN_ISSUANCE_CREATE || - tx.getTxnType() == ttVAULT_CREATE) + if (hasPrivilege(tx, createMPTIssuance)) { if (mptIssuancesCreated_ == 0) { @@ -1418,8 +1474,7 @@ ValidMPTIssuance::finalize( return mptIssuancesCreated_ == 1 && mptIssuancesDeleted_ == 0; } - if (tx.getTxnType() == ttMPTOKEN_ISSUANCE_DESTROY || - tx.getTxnType() == ttVAULT_DELETE) + if (hasPrivilege(tx, destroyMPTIssuance)) { if (mptIssuancesDeleted_ == 0) { @@ -1440,8 +1495,17 @@ ValidMPTIssuance::finalize( return mptIssuancesCreated_ == 0 && mptIssuancesDeleted_ == 1; } - if (tx.getTxnType() == ttMPTOKEN_AUTHORIZE || - tx.getTxnType() == ttVAULT_DEPOSIT) + // ttESCROW_FINISH may authorize an MPT, but it can't have the + // mayAuthorizeMPT privilege, because that may cause + // non-amendment-gated side effects. + bool const enforceEscrowFinish = (tx.getTxnType() == ttESCROW_FINISH) && + (view.rules().enabled(featureSingleAssetVault) + /* + TODO: Uncomment when LendingProtocol is defined + || view.rules().enabled(featureLendingProtocol)*/ + ); + if (hasPrivilege(tx, mustAuthorizeMPT | mayAuthorizeMPT) || + enforceEscrowFinish) { bool const submittedByIssuer = tx.isFieldPresent(sfHolder); @@ -1467,7 +1531,7 @@ ValidMPTIssuance::finalize( return false; } else if ( - !submittedByIssuer && (tx.getTxnType() != ttVAULT_DEPOSIT) && + !submittedByIssuer && hasPrivilege(tx, mustAuthorizeMPT) && (mptokensCreated_ + mptokensDeleted_ != 1)) { // if the holder submitted this tx, then a mptoken must be @@ -1480,41 +1544,21 @@ ValidMPTIssuance::finalize( return true; } - - if (tx.getTxnType() == ttMPTOKEN_ISSUANCE_SET) + if (tx.getTxnType() == ttESCROW_FINISH) { - if (mptIssuancesDeleted_ > 0) - { - JLOG(j.fatal()) << "Invariant failed: MPT issuance set " - "succeeded while removing MPT issuances"; - } - else if (mptIssuancesCreated_ > 0) - { - JLOG(j.fatal()) << "Invariant failed: MPT issuance set " - "succeeded while creating MPT issuances"; - } - else if (mptokensDeleted_ > 0) - { - JLOG(j.fatal()) << "Invariant failed: MPT issuance set " - "succeeded while removing MPTokens"; - } - else if (mptokensCreated_ > 0) - { - JLOG(j.fatal()) << "Invariant failed: MPT issuance set " - "succeeded while creating MPTokens"; - } - - return mptIssuancesCreated_ == 0 && mptIssuancesDeleted_ == 0 && - mptokensCreated_ == 0 && mptokensDeleted_ == 0; + // ttESCROW_FINISH may authorize an MPT, but it can't have the + // mayAuthorizeMPT privilege, because that may cause + // non-amendment-gated side effects. + XRPL_ASSERT_PARTS( + !enforceEscrowFinish, + "ripple::ValidMPTIssuance::finalize", + "not escrow finish tx"); + return true; } - if (tx.getTxnType() == ttESCROW_FINISH) - return true; - - if ((tx.getTxnType() == ttVAULT_CLAWBACK || - tx.getTxnType() == ttVAULT_WITHDRAW) && - mptokensDeleted_ == 1 && mptokensCreated_ == 0 && - mptIssuancesCreated_ == 0 && mptIssuancesDeleted_ == 0) + if (hasPrivilege(tx, mayDeleteMPT) && mptokensDeleted_ == 1 && + mptokensCreated_ == 0 && mptIssuancesCreated_ == 0 && + mptIssuancesDeleted_ == 0) return true; } @@ -1640,6 +1684,104 @@ ValidPermissionedDomain::finalize( (sleStatus_[1] ? check(*sleStatus_[1], j) : true); } +//------------------------------------------------------------------------------ + +void +ValidPseudoAccounts::visitEntry( + bool isDelete, + std::shared_ptr const& before, + std::shared_ptr const& after) +{ + if (isDelete) + // Deletion is ignored + return; + + if (after && after->getType() == ltACCOUNT_ROOT) + { + bool const isPseudo = [&]() { + // isPseudoAccount checks that any of the pseudo-account fields are + // set. + if (isPseudoAccount(after)) + return true; + // Not all pseudo-accounts have a zero sequence, but all accounts + // with a zero sequence had better be pseudo-accounts. + if (after->at(sfSequence) == 0) + return true; + + return false; + }(); + if (isPseudo) + { + // Pseudo accounts must have the following properties: + // 1. Exactly one of the pseudo-account fields is set. + // 2. The sequence number is not changed. + // 3. The lsfDisableMaster, lsfDefaultRipple, and lsfDepositAuth + // flags are set. + // 4. The RegularKey is not set. + { + std::vector const& fields = + getPseudoAccountFields(); + + auto const numFields = std::count_if( + fields.begin(), + fields.end(), + [&after](SField const* sf) -> bool { + return after->isFieldPresent(*sf); + }); + if (numFields != 1) + { + std::stringstream error; + error << "pseudo-account has " << numFields + << " pseudo-account fields set"; + errors_.emplace_back(error.str()); + } + } + if (before && before->at(sfSequence) != after->at(sfSequence)) + { + errors_.emplace_back("pseudo-account sequence changed"); + } + if (!after->isFlag( + lsfDisableMaster | lsfDefaultRipple | lsfDepositAuth)) + { + errors_.emplace_back("pseudo-account flags are not set"); + } + if (after->isFieldPresent(sfRegularKey)) + { + errors_.emplace_back("pseudo-account has a regular key"); + } + } + } +} + +bool +ValidPseudoAccounts::finalize( + STTx const& tx, + TER const, + XRPAmount const, + ReadView const& view, + beast::Journal const& j) +{ + bool const enforce = view.rules().enabled(featureSingleAssetVault); + + // The comment above starting with "assert(enforce)" explains this assert. + XRPL_ASSERT( + errors_.empty() || enforce, + "ripple::ValidPseudoAccounts::finalize : no bad " + "changes or enforce invariant"); + if (!errors_.empty()) + { + for (auto const& error : errors_) + { + JLOG(j.fatal()) << "Invariant failed: " << error; + } + if (enforce) + return false; + } + return true; +} + +//------------------------------------------------------------------------------ + void ValidPermissionedDEX::visitEntry( bool, diff --git a/src/xrpld/app/tx/detail/InvariantCheck.h b/src/xrpld/app/tx/detail/InvariantCheck.h index 529c05ce0e..5444f2f3a9 100644 --- a/src/xrpld/app/tx/detail/InvariantCheck.h +++ b/src/xrpld/app/tx/detail/InvariantCheck.h @@ -619,6 +619,34 @@ public: beast::Journal const&); }; +/** + * @brief Invariants: Pseudo-accounts have valid and consisent properties + * + * Pseudo-accounts have certain properties, and some of those properties are + * unique to pseudo-accounts. Check that all pseudo-accounts are following the + * rules, and that only pseudo-accounts look like pseudo-accounts. + * + */ +class ValidPseudoAccounts +{ + std::vector errors_; + +public: + void + visitEntry( + bool, + std::shared_ptr const&, + std::shared_ptr const&); + + bool + finalize( + STTx const&, + TER const, + XRPAmount const, + ReadView const&, + beast::Journal const&); +}; + class ValidPermissionedDEX { bool regularOffers_ = false; @@ -725,7 +753,8 @@ using InvariantChecks = std::tuple< ValidMPTIssuance, ValidPermissionedDomain, ValidPermissionedDEX, - ValidAMM>; + ValidAMM, + ValidPseudoAccounts>; /** * @brief get a tuple of all invariant checks diff --git a/src/xrpld/app/tx/detail/Transactor.cpp b/src/xrpld/app/tx/detail/Transactor.cpp index 8f881d7252..fd396e4556 100644 --- a/src/xrpld/app/tx/detail/Transactor.cpp +++ b/src/xrpld/app/tx/detail/Transactor.cpp @@ -206,7 +206,10 @@ preflight2(PreflightContext const& ctx) //------------------------------------------------------------------------------ Transactor::Transactor(ApplyContext& ctx) - : ctx_(ctx), j_(ctx.journal), account_(ctx.tx.getAccountID(sfAccount)) + : ctx_(ctx) + , sink_(ctx.journal, to_short_string(ctx.tx.getTransactionID()) + " ") + , j_(sink_) + , account_(ctx.tx.getAccountID(sfAccount)) { } diff --git a/src/xrpld/app/tx/detail/Transactor.h b/src/xrpld/app/tx/detail/Transactor.h index 42d4861a63..e94b93523d 100644 --- a/src/xrpld/app/tx/detail/Transactor.h +++ b/src/xrpld/app/tx/detail/Transactor.h @@ -24,6 +24,7 @@ #include #include +#include #include #include @@ -138,6 +139,7 @@ class Transactor { protected: ApplyContext& ctx_; + beast::WrappedSink sink_; beast::Journal const j_; AccountID const account_; diff --git a/src/xrpld/app/tx/detail/applySteps.cpp b/src/xrpld/app/tx/detail/applySteps.cpp index 03ef7244f8..543bedcd47 100644 --- a/src/xrpld/app/tx/detail/applySteps.cpp +++ b/src/xrpld/app/tx/detail/applySteps.cpp @@ -18,57 +18,20 @@ //============================================================================== #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#pragma push_macro("TRANSACTION") +#undef TRANSACTION + +// Do nothing +#define TRANSACTION(...) +#define TRANSACTION_INCLUDE 1 + +#include + +#undef TRANSACTION +#pragma pop_macro("TRANSACTION") + +// DO NOT INCLUDE TRANSACTOR HEADER FILES HERE. +// See the instructions at the top of transactions.macro instead. #include diff --git a/src/xrpld/rpc/detail/RPCHelpers.cpp b/src/xrpld/rpc/detail/RPCHelpers.cpp index 4b28d44253..edb91611be 100644 --- a/src/xrpld/rpc/detail/RPCHelpers.cpp +++ b/src/xrpld/rpc/detail/RPCHelpers.cpp @@ -941,7 +941,7 @@ chooseLedgerEntryType(Json::Value const& params) #pragma push_macro("LEDGER_ENTRY") #undef LEDGER_ENTRY -#define LEDGER_ENTRY(tag, value, name, rpcName, fields) \ +#define LEDGER_ENTRY(tag, value, name, rpcName, ...) \ {jss::name, jss::rpcName, tag}, #include From ffeabc9642fe9cdd34fbcd479ed528d9b4bdbaa4 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Thu, 18 Sep 2025 15:04:40 -0400 Subject: [PATCH 198/244] refactor: Simplify STParsedJSON with some helper functions (#5591) - Add code coverage for STParsedJSON edge cases Co-authored-by: Denis Angell --- src/libxrpl/protocol/STParsedJSON.cpp | 293 +++++++++++--------- src/test/protocol/STObject_test.cpp | 215 --------------- src/test/protocol/STParsedJSON_test.cpp | 339 ++++++++++++++++++++++++ 3 files changed, 514 insertions(+), 333 deletions(-) create mode 100644 src/test/protocol/STParsedJSON_test.cpp diff --git a/src/libxrpl/protocol/STParsedJSON.cpp b/src/libxrpl/protocol/STParsedJSON.cpp index bc9aad0a13..02dfde3966 100644 --- a/src/libxrpl/protocol/STParsedJSON.cpp +++ b/src/libxrpl/protocol/STParsedJSON.cpp @@ -202,6 +202,175 @@ non_object_in_array(std::string const& item, Json::UInt index) " is not an object. Arrays may only contain objects."); } +template +static std::optional +parseUnsigned( + SField const& field, + std::string const& json_name, + std::string const& fieldName, + SField const* name, + Json::Value const& value, + Json::Value& error) +{ + std::optional ret; + + try + { + if (value.isString()) + { + ret = detail::make_stvar( + field, + safe_cast( + beast::lexicalCastThrow(value.asString()))); + } + else if (value.isInt()) + { + ret = detail::make_stvar( + field, + to_unsigned(value.asInt())); + } + else if (value.isUInt()) + { + ret = detail::make_stvar( + field, + to_unsigned(value.asUInt())); + } + else + { + error = bad_type(json_name, fieldName); + return ret; + } + } + catch (std::exception const&) + { + error = invalid_data(json_name, fieldName); + return ret; + } + + return ret; +} + +template +static std::optional +parseUint16( + SField const& field, + std::string const& json_name, + std::string const& fieldName, + SField const* name, + Json::Value const& value, + Json::Value& error) +{ + std::optional ret; + + try + { + if (value.isString()) + { + std::string const strValue = value.asString(); + + if (!strValue.empty() && + ((strValue[0] < '0') || (strValue[0] > '9'))) + { + if (field == sfTransactionType) + { + ret = detail::make_stvar( + field, + safe_cast( + static_cast( + TxFormats::getInstance().findTypeByName( + strValue)))); + + if (*name == sfGeneric) + name = &sfTransaction; + } + else if (field == sfLedgerEntryType) + { + ret = detail::make_stvar( + field, + safe_cast( + static_cast( + LedgerFormats::getInstance().findTypeByName( + strValue)))); + + if (*name == sfGeneric) + name = &sfLedgerEntry; + } + else + { + error = invalid_data(json_name, fieldName); + return ret; + } + } + } + if (!ret) + return parseUnsigned( + field, json_name, fieldName, name, value, error); + } + catch (std::exception const&) + { + error = invalid_data(json_name, fieldName); + return ret; + } + + return ret; +} + +template +static std::optional +parseUint32( + SField const& field, + std::string const& json_name, + std::string const& fieldName, + SField const* name, + Json::Value const& value, + Json::Value& error) +{ + std::optional ret; + + try + { + if (value.isString()) + { + if (field == sfPermissionValue) + { + std::string const strValue = value.asString(); + auto const granularPermission = + Permission::getInstance().getGranularValue(strValue); + if (granularPermission) + { + ret = detail::make_stvar( + field, *granularPermission); + } + else + { + auto const& txType = + TxFormats::getInstance().findTypeByName(strValue); + ret = detail::make_stvar( + field, + Permission::getInstance().txToPermissionType(txType)); + } + } + else + { + ret = detail::make_stvar( + field, + safe_cast( + beast::lexicalCastThrow(value.asString()))); + } + } + if (!ret) + return parseUnsigned( + field, json_name, fieldName, name, value, error); + } + catch (std::exception const&) + { + error = invalid_data(json_name, fieldName); + return ret; + } + + return ret; +} + // This function is used by parseObject to parse any JSON type that doesn't // recurse. Everything represented here is a leaf-type. static std::optional @@ -302,130 +471,18 @@ parseLeaf( break; case STI_UINT16: - try - { - if (value.isString()) - { - std::string const strValue = value.asString(); - - if (!strValue.empty() && - ((strValue[0] < '0') || (strValue[0] > '9'))) - { - if (field == sfTransactionType) - { - ret = detail::make_stvar( - field, - static_cast( - TxFormats::getInstance().findTypeByName( - strValue))); - - if (*name == sfGeneric) - name = &sfTransaction; - } - else if (field == sfLedgerEntryType) - { - ret = detail::make_stvar( - field, - static_cast( - LedgerFormats::getInstance().findTypeByName( - strValue))); - - if (*name == sfGeneric) - name = &sfLedgerEntry; - } - else - { - error = invalid_data(json_name, fieldName); - return ret; - } - } - else - { - ret = detail::make_stvar( - field, - beast::lexicalCastThrow(strValue)); - } - } - else if (value.isInt()) - { - ret = detail::make_stvar( - field, to_unsigned(value.asInt())); - } - else if (value.isUInt()) - { - ret = detail::make_stvar( - field, to_unsigned(value.asUInt())); - } - else - { - error = bad_type(json_name, fieldName); - return ret; - } - } - catch (std::exception const&) - { - error = invalid_data(json_name, fieldName); + ret = parseUint16( + field, json_name, fieldName, name, value, error); + if (!ret) return ret; - } break; case STI_UINT32: - try - { - if (value.isString()) - { - if (field == sfPermissionValue) - { - std::string const strValue = value.asString(); - auto const granularPermission = - Permission::getInstance().getGranularValue( - strValue); - if (granularPermission) - { - ret = detail::make_stvar( - field, *granularPermission); - } - else - { - auto const& txType = - TxFormats::getInstance().findTypeByName( - strValue); - ret = detail::make_stvar( - field, - Permission::getInstance().txToPermissionType( - txType)); - } - } - else - { - ret = detail::make_stvar( - field, - beast::lexicalCastThrow( - value.asString())); - } - } - else if (value.isInt()) - { - ret = detail::make_stvar( - field, to_unsigned(value.asInt())); - } - else if (value.isUInt()) - { - ret = detail::make_stvar( - field, safe_cast(value.asUInt())); - } - else - { - error = bad_type(json_name, fieldName); - return ret; - } - } - catch (std::exception const&) - { - error = invalid_data(json_name, fieldName); + ret = parseUint32( + field, json_name, fieldName, name, value, error); + if (!ret) return ret; - } break; diff --git a/src/test/protocol/STObject_test.cpp b/src/test/protocol/STObject_test.cpp index e02cfeeea4..47a7ab0ad2 100644 --- a/src/test/protocol/STObject_test.cpp +++ b/src/test/protocol/STObject_test.cpp @@ -19,223 +19,11 @@ #include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - namespace ripple { class STObject_test : public beast::unit_test::suite { public: - bool - parseJSONString(std::string const& json, Json::Value& to) - { - Json::Reader reader; - return reader.parse(json, to) && to.isObject(); - } - - void - testParseJSONArrayWithInvalidChildrenObjects() - { - testcase("parse json array invalid children"); - try - { - /* - - STArray/STObject constructs don't really map perfectly to json - arrays/objects. - - STObject is an associative container, mapping fields to value, but - an STObject may also have a Field as its name, stored outside the - associative structure. The name is important, so to maintain - fidelity, it will take TWO json objects to represent them. - - */ - std::string faulty( - "{\"Template\":[{" - "\"ModifiedNode\":{\"Sequence\":1}, " - "\"DeletedNode\":{\"Sequence\":1}" - "}]}"); - - std::unique_ptr so; - Json::Value faultyJson; - bool parsedOK(parseJSONString(faulty, faultyJson)); - unexpected(!parsedOK, "failed to parse"); - STParsedJSONObject parsed("test", faultyJson); - BEAST_EXPECT(!parsed.object); - } - catch (std::runtime_error& e) - { - std::string what(e.what()); - unexpected(what.find("First level children of `Template`") != 0); - } - } - - void - testParseJSONArray() - { - testcase("parse json array"); - std::string const json( - "{\"Template\":[{\"ModifiedNode\":{\"Sequence\":1}}]}"); - - Json::Value jsonObject; - bool parsedOK(parseJSONString(json, jsonObject)); - if (parsedOK) - { - STParsedJSONObject parsed("test", jsonObject); - BEAST_EXPECT(parsed.object); - std::string const& serialized( - to_string(parsed.object->getJson(JsonOptions::none))); - BEAST_EXPECT(serialized == json); - } - else - { - fail("Couldn't parse json: " + json); - } - } - - void - testParseJSONEdgeCases() - { - testcase("parse json object"); - - { - std::string const goodJson(R"({"CloseResolution":19,"Method":250,)" - R"("TransactionResult":"tecFROZEN"})"); - - Json::Value jv; - if (BEAST_EXPECT(parseJSONString(goodJson, jv))) - { - STParsedJSONObject parsed("test", jv); - if (BEAST_EXPECT(parsed.object)) - { - std::string const& serialized( - to_string(parsed.object->getJson(JsonOptions::none))); - BEAST_EXPECT(serialized == goodJson); - } - } - } - - { - std::string const goodJson( - R"({"CloseResolution":19,"Method":"250",)" - R"("TransactionResult":"tecFROZEN"})"); - std::string const expectedJson( - R"({"CloseResolution":19,"Method":250,)" - R"("TransactionResult":"tecFROZEN"})"); - - Json::Value jv; - if (BEAST_EXPECT(parseJSONString(goodJson, jv))) - { - // Integer values are always parsed as int, - // unless they're too big. We want a small uint. - jv["CloseResolution"] = Json::UInt(19); - STParsedJSONObject parsed("test", jv); - if (BEAST_EXPECT(parsed.object)) - { - std::string const& serialized( - to_string(parsed.object->getJson(JsonOptions::none))); - BEAST_EXPECT(serialized == expectedJson); - } - } - } - - { - std::string const json(R"({"CloseResolution":19,"Method":250,)" - R"("TransactionResult":"terQUEUED"})"); - - Json::Value jv; - if (BEAST_EXPECT(parseJSONString(json, jv))) - { - STParsedJSONObject parsed("test", jv); - BEAST_EXPECT(!parsed.object); - BEAST_EXPECT(parsed.error); - BEAST_EXPECT(parsed.error[jss::error] == "invalidParams"); - BEAST_EXPECT( - parsed.error[jss::error_message] == - "Field 'test.TransactionResult' is out of range."); - } - } - - { - std::string const json(R"({"CloseResolution":19,"Method":"pony",)" - R"("TransactionResult":"tesSUCCESS"})"); - - Json::Value jv; - if (BEAST_EXPECT(parseJSONString(json, jv))) - { - STParsedJSONObject parsed("test", jv); - BEAST_EXPECT(!parsed.object); - BEAST_EXPECT(parsed.error); - BEAST_EXPECT(parsed.error[jss::error] == "invalidParams"); - BEAST_EXPECT( - parsed.error[jss::error_message] == - "Field 'test.Method' has bad type."); - } - } - - { - std::string const json( - R"({"CloseResolution":19,"Method":3294967296,)" - R"("TransactionResult":"tesSUCCESS"})"); - - Json::Value jv; - if (BEAST_EXPECT(parseJSONString(json, jv))) - { - STParsedJSONObject parsed("test", jv); - BEAST_EXPECT(!parsed.object); - BEAST_EXPECT(parsed.error); - BEAST_EXPECT(parsed.error[jss::error] == "invalidParams"); - BEAST_EXPECT( - parsed.error[jss::error_message] == - "Field 'test.Method' is out of range."); - } - } - - { - std::string const json(R"({"CloseResolution":-10,"Method":42,)" - R"("TransactionResult":"tesSUCCESS"})"); - - Json::Value jv; - if (BEAST_EXPECT(parseJSONString(json, jv))) - { - STParsedJSONObject parsed("test", jv); - BEAST_EXPECT(!parsed.object); - BEAST_EXPECT(parsed.error); - BEAST_EXPECT(parsed.error[jss::error] == "invalidParams"); - BEAST_EXPECT( - parsed.error[jss::error_message] == - "Field 'test.CloseResolution' is out of range."); - } - } - - { - std::string const json( - R"({"CloseResolution":19,"Method":3.141592653,)" - R"("TransactionResult":"tesSUCCESS"})"); - - Json::Value jv; - if (BEAST_EXPECT(parseJSONString(json, jv))) - { - STParsedJSONObject parsed("test", jv); - BEAST_EXPECT(!parsed.object); - BEAST_EXPECT(parsed.error); - BEAST_EXPECT(parsed.error[jss::error] == "invalidParams"); - BEAST_EXPECT( - parsed.error[jss::error_message] == - "Field 'test.Method' has bad type."); - } - } - } - void testSerialization() { @@ -730,9 +518,6 @@ public: testFields(); testSerialization(); - testParseJSONArray(); - testParseJSONArrayWithInvalidChildrenObjects(); - testParseJSONEdgeCases(); testMalformed(); } }; diff --git a/src/test/protocol/STParsedJSON_test.cpp b/src/test/protocol/STParsedJSON_test.cpp new file mode 100644 index 0000000000..bd62196a99 --- /dev/null +++ b/src/test/protocol/STParsedJSON_test.cpp @@ -0,0 +1,339 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2012, 2013 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include +#include + +namespace ripple { + +class STParsedJSON_test : public beast::unit_test::suite +{ +public: + bool + parseJSONString(std::string const& json, Json::Value& to) + { + Json::Reader reader; + return reader.parse(json, to) && to.isObject(); + } + + void + testParseJSONArrayWithInvalidChildrenObjects() + { + testcase("parse json array invalid children"); + try + { + /* + + STArray/STObject constructs don't really map perfectly to json + arrays/objects. + + STObject is an associative container, mapping fields to value, but + an STObject may also have a Field as its name, stored outside the + associative structure. The name is important, so to maintain + fidelity, it will take TWO json objects to represent them. + + */ + std::string faulty( + "{\"Template\":[{" + "\"ModifiedNode\":{\"Sequence\":1}, " + "\"DeletedNode\":{\"Sequence\":1}" + "}]}"); + + std::unique_ptr so; + Json::Value faultyJson; + bool parsedOK(parseJSONString(faulty, faultyJson)); + unexpected(!parsedOK, "failed to parse"); + STParsedJSONObject parsed("test", faultyJson); + BEAST_EXPECT(!parsed.object); + } + catch (std::runtime_error& e) + { + std::string what(e.what()); + unexpected(what.find("First level children of `Template`") != 0); + } + } + + void + testParseJSONArray() + { + testcase("parse json array"); + std::string const json( + "{\"Template\":[{\"ModifiedNode\":{\"Sequence\":1}}]}"); + + Json::Value jsonObject; + bool parsedOK(parseJSONString(json, jsonObject)); + if (parsedOK) + { + STParsedJSONObject parsed("test", jsonObject); + BEAST_EXPECT(parsed.object); + std::string const& serialized( + to_string(parsed.object->getJson(JsonOptions::none))); + BEAST_EXPECT(serialized == json); + } + else + { + fail("Couldn't parse json: " + json); + } + } + + void + testParseJSONEdgeCases() + { + testcase("parse json object"); + + { + std::string const goodJson(R"({"CloseResolution":19,"Method":250,)" + R"("TransactionResult":"tecFROZEN"})"); + + Json::Value jv; + if (BEAST_EXPECT(parseJSONString(goodJson, jv))) + { + STParsedJSONObject parsed("test", jv); + if (BEAST_EXPECT(parsed.object)) + { + std::string const& serialized( + to_string(parsed.object->getJson(JsonOptions::none))); + BEAST_EXPECT(serialized == goodJson); + } + } + } + + { + std::string const goodJson( + R"({"CloseResolution":19,"Method":"250",)" + R"("TransactionResult":"tecFROZEN"})"); + std::string const expectedJson( + R"({"CloseResolution":19,"Method":250,)" + R"("TransactionResult":"tecFROZEN"})"); + + Json::Value jv; + if (BEAST_EXPECT(parseJSONString(goodJson, jv))) + { + // Integer values are always parsed as int, + // unless they're too big. We want a small uint. + jv["CloseResolution"] = Json::UInt(19); + STParsedJSONObject parsed("test", jv); + if (BEAST_EXPECT(parsed.object)) + { + std::string const& serialized( + to_string(parsed.object->getJson(JsonOptions::none))); + BEAST_EXPECT(serialized == expectedJson); + } + } + } + + { + std::string const goodJson( + R"({"CloseResolution":"19","Method":"250",)" + R"("TransactionResult":"tecFROZEN"})"); + std::string const expectedJson( + R"({"CloseResolution":19,"Method":250,)" + R"("TransactionResult":"tecFROZEN"})"); + + Json::Value jv; + if (BEAST_EXPECT(parseJSONString(goodJson, jv))) + { + // Integer values are always parsed as int, + // unless they're too big. We want a small uint. + jv["CloseResolution"] = Json::UInt(19); + STParsedJSONObject parsed("test", jv); + if (BEAST_EXPECT(parsed.object)) + { + std::string const& serialized( + to_string(parsed.object->getJson(JsonOptions::none))); + BEAST_EXPECT(serialized == expectedJson); + } + } + } + + { + std::string const json(R"({"CloseResolution":19,"Method":250,)" + R"("TransactionResult":"terQUEUED"})"); + + Json::Value jv; + if (BEAST_EXPECT(parseJSONString(json, jv))) + { + STParsedJSONObject parsed("test", jv); + BEAST_EXPECT(!parsed.object); + BEAST_EXPECT(parsed.error); + BEAST_EXPECT(parsed.error[jss::error] == "invalidParams"); + BEAST_EXPECT( + parsed.error[jss::error_message] == + "Field 'test.TransactionResult' is out of range."); + } + } + + { + std::string const json(R"({"CloseResolution":19,"Method":"pony",)" + R"("TransactionResult":"tesSUCCESS"})"); + + Json::Value jv; + if (BEAST_EXPECT(parseJSONString(json, jv))) + { + STParsedJSONObject parsed("test", jv); + BEAST_EXPECT(!parsed.object); + BEAST_EXPECT(parsed.error); + BEAST_EXPECT(parsed.error[jss::error] == "invalidParams"); + BEAST_EXPECT( + parsed.error[jss::error_message] == + "Field 'test.Method' has bad type."); + } + } + + { + std::string const json( + R"({"CloseResolution":19,"Method":3294967296,)" + R"("TransactionResult":"tesSUCCESS"})"); + + Json::Value jv; + if (BEAST_EXPECT(parseJSONString(json, jv))) + { + STParsedJSONObject parsed("test", jv); + BEAST_EXPECT(!parsed.object); + BEAST_EXPECT(parsed.error); + BEAST_EXPECT(parsed.error[jss::error] == "invalidParams"); + BEAST_EXPECT( + parsed.error[jss::error_message] == + "Field 'test.Method' is out of range."); + } + } + + { + std::string const json(R"({"CloseResolution":-10,"Method":42,)" + R"("TransactionResult":"tesSUCCESS"})"); + + Json::Value jv; + if (BEAST_EXPECT(parseJSONString(json, jv))) + { + STParsedJSONObject parsed("test", jv); + BEAST_EXPECT(!parsed.object); + BEAST_EXPECT(parsed.error); + BEAST_EXPECT(parsed.error[jss::error] == "invalidParams"); + BEAST_EXPECT( + parsed.error[jss::error_message] == + "Field 'test.CloseResolution' is out of range."); + } + } + + { + std::string const json( + R"({"CloseResolution":19,"Method":3.141592653,)" + R"("TransactionResult":"tesSUCCESS"})"); + + Json::Value jv; + if (BEAST_EXPECT(parseJSONString(json, jv))) + { + STParsedJSONObject parsed("test", jv); + BEAST_EXPECT(!parsed.object); + BEAST_EXPECT(parsed.error); + BEAST_EXPECT(parsed.error[jss::error] == "invalidParams"); + BEAST_EXPECT( + parsed.error[jss::error_message] == + "Field 'test.Method' has bad type."); + } + } + + { + std::string const goodJson(R"({"CloseResolution":19,"Method":250,)" + R"("TransferFee":"65535"})"); + std::string const expectedJson( + R"({"CloseResolution":19,"Method":250,)" + R"("TransferFee":65535})"); + + Json::Value jv; + if (BEAST_EXPECT(parseJSONString(goodJson, jv))) + { + STParsedJSONObject parsed("test", jv); + if (BEAST_EXPECT(parsed.object)) + { + std::string const& serialized( + to_string(parsed.object->getJson(JsonOptions::none))); + BEAST_EXPECT(serialized == expectedJson); + } + } + } + + { + std::string const json(R"({"CloseResolution":19,"Method":250,)" + R"("TransferFee":"65536"})"); + + Json::Value jv; + if (BEAST_EXPECT(parseJSONString(json, jv))) + { + STParsedJSONObject parsed("test", jv); + BEAST_EXPECT(!parsed.object); + BEAST_EXPECT(parsed.error); + BEAST_EXPECT(parsed.error[jss::error] == "invalidParams"); + BEAST_EXPECT( + parsed.error[jss::error_message] == + "Field 'test.TransferFee' has invalid data."); + } + } + + { + std::string const json(R"({"CloseResolution":19,"Method":250,)" + R"("TransferFee":"Payment"})"); + + Json::Value jv; + if (BEAST_EXPECT(parseJSONString(json, jv))) + { + STParsedJSONObject parsed("test", jv); + BEAST_EXPECT(!parsed.object); + BEAST_EXPECT(parsed.error); + BEAST_EXPECT(parsed.error[jss::error] == "invalidParams"); + BEAST_EXPECT( + parsed.error[jss::error_message] == + "Field 'test.TransferFee' has invalid data."); + } + } + + { + std::string const json(R"({"CloseResolution":19,"Method":250,)" + R"("TransferFee":true})"); + + Json::Value jv; + if (BEAST_EXPECT(parseJSONString(json, jv))) + { + STParsedJSONObject parsed("test", jv); + BEAST_EXPECT(!parsed.object); + BEAST_EXPECT(parsed.error); + BEAST_EXPECT(parsed.error[jss::error] == "invalidParams"); + BEAST_EXPECT( + parsed.error[jss::error_message] == + "Field 'test.TransferFee' has bad type."); + } + } + } + + void + run() override + { + // Instantiate a jtx::Env so debugLog writes are exercised. + test::jtx::Env env(*this); + testParseJSONArrayWithInvalidChildrenObjects(); + testParseJSONArray(); + testParseJSONEdgeCases(); + } +}; + +BEAST_DEFINE_TESTSUITE(STParsedJSON, protocol, ripple); + +} // namespace ripple From 6b8a5894476cf06b823b2b4075b7d77b54a6ad81 Mon Sep 17 00:00:00 2001 From: Mayukha Vadari Date: Mon, 22 Sep 2025 14:00:31 -0400 Subject: [PATCH 199/244] test: Add STInteger and STParsedJSON tests (#5726) This change is to improve code coverage (and to simplify #5720 and #5725); there is otherwise no change in functionality. The change adds basic tests for `STInteger` and `STParsedJSON`, so it becomes easier to test smaller changes to the types, as well as removes `STParsedJSONArray`, since it is not used anywhere (including in Clio). --- include/xrpl/protocol/Permissions.h | 3 + include/xrpl/protocol/STParsedJSON.h | 28 - src/libxrpl/protocol/Permissions.cpp | 16 + src/libxrpl/protocol/STInteger.cpp | 32 +- src/libxrpl/protocol/STParsedJSON.cpp | 61 +- src/test/protocol/STInteger_test.cpp | 135 ++ src/test/protocol/STParsedJSON_test.cpp | 2018 ++++++++++++++++++++++- 7 files changed, 2162 insertions(+), 131 deletions(-) create mode 100644 src/test/protocol/STInteger_test.cpp diff --git a/include/xrpl/protocol/Permissions.h b/include/xrpl/protocol/Permissions.h index cf49ff7382..2eca441124 100644 --- a/include/xrpl/protocol/Permissions.h +++ b/include/xrpl/protocol/Permissions.h @@ -74,6 +74,9 @@ public: Permission& operator=(Permission const&) = delete; + std::optional + getPermissionName(std::uint32_t const value) const; + std::optional getGranularValue(std::string const& name) const; diff --git a/include/xrpl/protocol/STParsedJSON.h b/include/xrpl/protocol/STParsedJSON.h index d655969030..9c770fe94d 100644 --- a/include/xrpl/protocol/STParsedJSON.h +++ b/include/xrpl/protocol/STParsedJSON.h @@ -54,34 +54,6 @@ public: Json::Value error; }; -/** Holds the serialized result of parsing an input JSON array. - This does validation and checking on the provided JSON. -*/ -class STParsedJSONArray -{ -public: - /** Parses and creates an STParsedJSON array. - The result of the parsing is stored in array and error. - Exceptions: - Does not throw. - @param name The name of the JSON field, used in diagnostics. - @param json The JSON-RPC to parse. - */ - STParsedJSONArray(std::string const& name, Json::Value const& json); - - STParsedJSONArray() = delete; - STParsedJSONArray(STParsedJSONArray const&) = delete; - STParsedJSONArray& - operator=(STParsedJSONArray const&) = delete; - ~STParsedJSONArray() = default; - - /** The STArray if the parse was successful. */ - std::optional array; - - /** On failure, an appropriate set of error values. */ - Json::Value error; -}; - } // namespace ripple #endif diff --git a/src/libxrpl/protocol/Permissions.cpp b/src/libxrpl/protocol/Permissions.cpp index 781799f128..6a4b0678e0 100644 --- a/src/libxrpl/protocol/Permissions.cpp +++ b/src/libxrpl/protocol/Permissions.cpp @@ -101,6 +101,22 @@ Permission::getInstance() return instance; } +std::optional +Permission::getPermissionName(std::uint32_t const value) const +{ + auto const permissionValue = static_cast(value); + if (auto const granular = getGranularName(permissionValue)) + return *granular; + + // not a granular permission, check if it maps to a transaction type + auto const txType = permissionToTxType(value); + if (auto const* item = TxFormats::getInstance().findByType(txType); + item != nullptr) + return item->getName(); + + return std::nullopt; +} + std::optional Permission::getGranularValue(std::string const& name) const { diff --git a/src/libxrpl/protocol/STInteger.cpp b/src/libxrpl/protocol/STInteger.cpp index a90e21491c..5d6c1802cc 100644 --- a/src/libxrpl/protocol/STInteger.cpp +++ b/src/libxrpl/protocol/STInteger.cpp @@ -62,8 +62,10 @@ STUInt8::getText() const if (transResultInfo(TER::fromInt(value_), token, human)) return human; + // LCOV_EXCL_START JLOG(debugLog().error()) << "Unknown result code in metadata: " << value_; + // LCOV_EXCL_STOP } return std::to_string(value_); @@ -80,8 +82,10 @@ STUInt8::getJson(JsonOptions) const if (transResultInfo(TER::fromInt(value_), token, human)) return token; + // LCOV_EXCL_START JLOG(debugLog().error()) << "Unknown result code in metadata: " << value_; + // LCOV_EXCL_STOP } return value_; @@ -171,6 +175,13 @@ template <> std::string STUInt32::getText() const { + if (getFName() == sfPermissionValue) + { + auto const permissionName = + Permission::getInstance().getPermissionName(value_); + if (permissionName) + return *permissionName; + } return std::to_string(value_); } @@ -180,23 +191,10 @@ STUInt32::getJson(JsonOptions) const { if (getFName() == sfPermissionValue) { - auto const permissionValue = - static_cast(value_); - auto const granular = - Permission::getInstance().getGranularName(permissionValue); - - if (granular) - { - return *granular; - } - else - { - auto const txType = - Permission::getInstance().permissionToTxType(value_); - auto item = TxFormats::getInstance().findByType(txType); - if (item != nullptr) - return item->getName(); - } + auto const permissionName = + Permission::getInstance().getPermissionName(value_); + if (permissionName) + return *permissionName; } return value_; diff --git a/src/libxrpl/protocol/STParsedJSON.cpp b/src/libxrpl/protocol/STParsedJSON.cpp index 02dfde3966..9fbe5e7f91 100644 --- a/src/libxrpl/protocol/STParsedJSON.cpp +++ b/src/libxrpl/protocol/STParsedJSON.cpp @@ -83,7 +83,8 @@ constexpr std:: return static_cast(value); } -static std::string +// LCOV_EXCL_START +static inline std::string make_name(std::string const& object, std::string const& field) { if (field.empty()) @@ -92,7 +93,7 @@ make_name(std::string const& object, std::string const& field) return object + "." + field; } -static Json::Value +static inline Json::Value not_an_object(std::string const& object, std::string const& field) { return RPC::make_error( @@ -100,20 +101,20 @@ not_an_object(std::string const& object, std::string const& field) "Field '" + make_name(object, field) + "' is not a JSON object."); } -static Json::Value +static inline Json::Value not_an_object(std::string const& object) { return not_an_object(object, ""); } -static Json::Value +static inline Json::Value not_an_array(std::string const& object) { return RPC::make_error( rpcINVALID_PARAMS, "Field '" + object + "' is not a JSON array."); } -static Json::Value +static inline Json::Value unknown_field(std::string const& object, std::string const& field) { return RPC::make_error( @@ -121,7 +122,7 @@ unknown_field(std::string const& object, std::string const& field) "Field '" + make_name(object, field) + "' is unknown."); } -static Json::Value +static inline Json::Value out_of_range(std::string const& object, std::string const& field) { return RPC::make_error( @@ -129,7 +130,7 @@ out_of_range(std::string const& object, std::string const& field) "Field '" + make_name(object, field) + "' is out of range."); } -static Json::Value +static inline Json::Value bad_type(std::string const& object, std::string const& field) { return RPC::make_error( @@ -137,7 +138,7 @@ bad_type(std::string const& object, std::string const& field) "Field '" + make_name(object, field) + "' has bad type."); } -static Json::Value +static inline Json::Value invalid_data(std::string const& object, std::string const& field) { return RPC::make_error( @@ -145,13 +146,13 @@ invalid_data(std::string const& object, std::string const& field) "Field '" + make_name(object, field) + "' has invalid data."); } -static Json::Value +static inline Json::Value invalid_data(std::string const& object) { return invalid_data(object, ""); } -static Json::Value +static inline Json::Value array_expected(std::string const& object, std::string const& field) { return RPC::make_error( @@ -159,7 +160,7 @@ array_expected(std::string const& object, std::string const& field) "Field '" + make_name(object, field) + "' must be a JSON array."); } -static Json::Value +static inline Json::Value string_expected(std::string const& object, std::string const& field) { return RPC::make_error( @@ -167,7 +168,7 @@ string_expected(std::string const& object, std::string const& field) "Field '" + make_name(object, field) + "' must be a string."); } -static Json::Value +static inline Json::Value too_deep(std::string const& object) { return RPC::make_error( @@ -175,7 +176,7 @@ too_deep(std::string const& object) "Field '" + object + "' exceeds nesting depth limit."); } -static Json::Value +static inline Json::Value singleton_expected(std::string const& object, unsigned int index) { return RPC::make_error( @@ -184,7 +185,7 @@ singleton_expected(std::string const& object, unsigned int index) "]' must be an object with a single key/object value."); } -static Json::Value +static inline Json::Value template_mismatch(SField const& sField) { return RPC::make_error( @@ -193,7 +194,7 @@ template_mismatch(SField const& sField) "' contents did not meet requirements for that type."); } -static Json::Value +static inline Json::Value non_object_in_array(std::string const& item, Json::UInt index) { return RPC::make_error( @@ -201,6 +202,7 @@ non_object_in_array(std::string const& item, Json::UInt index) "Item '" + item + "' at index " + std::to_string(index) + " is not an object. Arrays may only contain objects."); } +// LCOV_EXCL_STOP template static std::optional @@ -385,10 +387,13 @@ parseLeaf( auto const& field = SField::getField(fieldName); + // checked in parseObject if (field == sfInvalid) { + // LCOV_EXCL_START error = unknown_field(json_name, fieldName); return ret; + // LCOV_EXCL_STOP } switch (field.fieldType) @@ -760,6 +765,12 @@ parseLeaf( AccountID uAccount, uIssuer; Currency uCurrency; + if (!account && !currency && !issuer) + { + error = invalid_data(element_name); + return ret; + } + if (account) { // human account id @@ -1153,24 +1164,4 @@ STParsedJSONObject::STParsedJSONObject( object = parseObject(name, json, sfGeneric, 0, error); } -//------------------------------------------------------------------------------ - -STParsedJSONArray::STParsedJSONArray( - std::string const& name, - Json::Value const& json) -{ - using namespace STParsedJSONDetail; - auto arr = parseArray(name, json, sfGeneric, 0, error); - if (!arr) - array.reset(); - else - { - auto p = dynamic_cast(&arr->get()); - if (p == nullptr) - array.reset(); - else - array = std::move(*p); - } -} - } // namespace ripple diff --git a/src/test/protocol/STInteger_test.cpp b/src/test/protocol/STInteger_test.cpp new file mode 100644 index 0000000000..f4572e49bd --- /dev/null +++ b/src/test/protocol/STInteger_test.cpp @@ -0,0 +1,135 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include + +namespace ripple { + +struct STInteger_test : public beast::unit_test::suite +{ + void + testUInt8() + { + STUInt8 u8(255); + BEAST_EXPECT(u8.value() == 255); + BEAST_EXPECT(u8.getText() == "255"); + BEAST_EXPECT(u8.getSType() == STI_UINT8); + BEAST_EXPECT(u8.getJson(JsonOptions::none) == 255); + + // there is some special handling for sfTransactionResult + STUInt8 tr(sfTransactionResult, 0); + BEAST_EXPECT(tr.value() == 0); + BEAST_EXPECT( + tr.getText() == + "The transaction was applied. Only final in a validated ledger."); + BEAST_EXPECT(tr.getSType() == STI_UINT8); + BEAST_EXPECT(tr.getJson(JsonOptions::none) == "tesSUCCESS"); + + // invalid transaction result + STUInt8 tr2(sfTransactionResult, 255); + BEAST_EXPECT(tr2.value() == 255); + BEAST_EXPECT(tr2.getText() == "255"); + BEAST_EXPECT(tr2.getSType() == STI_UINT8); + BEAST_EXPECT(tr2.getJson(JsonOptions::none) == 255); + } + + void + testUInt16() + { + STUInt16 u16(65535); + BEAST_EXPECT(u16.value() == 65535); + BEAST_EXPECT(u16.getText() == "65535"); + BEAST_EXPECT(u16.getSType() == STI_UINT16); + BEAST_EXPECT(u16.getJson(JsonOptions::none) == 65535); + + // there is some special handling for sfLedgerEntryType + STUInt16 let(sfLedgerEntryType, ltACCOUNT_ROOT); + BEAST_EXPECT(let.value() == ltACCOUNT_ROOT); + BEAST_EXPECT(let.getText() == "AccountRoot"); + BEAST_EXPECT(let.getSType() == STI_UINT16); + BEAST_EXPECT(let.getJson(JsonOptions::none) == "AccountRoot"); + + // there is some special handling for sfTransactionType + STUInt16 tlt(sfTransactionType, ttPAYMENT); + BEAST_EXPECT(tlt.value() == ttPAYMENT); + BEAST_EXPECT(tlt.getText() == "Payment"); + BEAST_EXPECT(tlt.getSType() == STI_UINT16); + BEAST_EXPECT(tlt.getJson(JsonOptions::none) == "Payment"); + } + + void + testUInt32() + { + STUInt32 u32(4'294'967'295u); + BEAST_EXPECT(u32.value() == 4'294'967'295u); + BEAST_EXPECT(u32.getText() == "4294967295"); + BEAST_EXPECT(u32.getSType() == STI_UINT32); + BEAST_EXPECT(u32.getJson(JsonOptions::none) == 4'294'967'295u); + + // there is some special handling for sfPermissionValue + STUInt32 pv(sfPermissionValue, ttPAYMENT + 1); + BEAST_EXPECT(pv.value() == ttPAYMENT + 1); + BEAST_EXPECT(pv.getText() == "Payment"); + BEAST_EXPECT(pv.getSType() == STI_UINT32); + BEAST_EXPECT(pv.getJson(JsonOptions::none) == "Payment"); + STUInt32 pv2(sfPermissionValue, PaymentMint); + BEAST_EXPECT(pv2.value() == PaymentMint); + BEAST_EXPECT(pv2.getText() == "PaymentMint"); + BEAST_EXPECT(pv2.getSType() == STI_UINT32); + BEAST_EXPECT(pv2.getJson(JsonOptions::none) == "PaymentMint"); + } + + void + testUInt64() + { + STUInt64 u64(0xFFFFFFFFFFFFFFFFull); + BEAST_EXPECT(u64.value() == 0xFFFFFFFFFFFFFFFFull); + BEAST_EXPECT(u64.getText() == "18446744073709551615"); + BEAST_EXPECT(u64.getSType() == STI_UINT64); + + // By default, getJson returns hex string + auto jsonVal = u64.getJson(JsonOptions::none); + BEAST_EXPECT(jsonVal.isString()); + BEAST_EXPECT(jsonVal.asString() == "ffffffffffffffff"); + + STUInt64 u64_2(sfMaximumAmount, 0xFFFFFFFFFFFFFFFFull); + BEAST_EXPECT(u64_2.value() == 0xFFFFFFFFFFFFFFFFull); + BEAST_EXPECT(u64_2.getText() == "18446744073709551615"); + BEAST_EXPECT(u64_2.getSType() == STI_UINT64); + BEAST_EXPECT( + u64_2.getJson(JsonOptions::none) == "18446744073709551615"); + } + + void + run() override + { + testUInt8(); + testUInt16(); + testUInt32(); + testUInt64(); + } +}; + +BEAST_DEFINE_TESTSUITE(STInteger, protocol, ripple); + +} // namespace ripple diff --git a/src/test/protocol/STParsedJSON_test.cpp b/src/test/protocol/STParsedJSON_test.cpp index bd62196a99..9ecb4c0365 100644 --- a/src/test/protocol/STParsedJSON_test.cpp +++ b/src/test/protocol/STParsedJSON_test.cpp @@ -1,7 +1,7 @@ //------------------------------------------------------------------------------ /* This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2013 Ripple Labs Inc. + Copyright (c) 2025 Ripple Labs Inc. Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above @@ -19,14 +19,17 @@ #include +#include #include +#include +#include +#include #include namespace ripple { class STParsedJSON_test : public beast::unit_test::suite { -public: bool parseJSONString(std::string const& json, Json::Value& to) { @@ -35,69 +38,1965 @@ public: } void - testParseJSONArrayWithInvalidChildrenObjects() + testUInt8() { - testcase("parse json array invalid children"); - try + testcase("UInt8"); { - /* - - STArray/STObject constructs don't really map perfectly to json - arrays/objects. - - STObject is an associative container, mapping fields to value, but - an STObject may also have a Field as its name, stored outside the - associative structure. The name is important, so to maintain - fidelity, it will take TWO json objects to represent them. - - */ - std::string faulty( - "{\"Template\":[{" - "\"ModifiedNode\":{\"Sequence\":1}, " - "\"DeletedNode\":{\"Sequence\":1}" - "}]}"); - - std::unique_ptr so; - Json::Value faultyJson; - bool parsedOK(parseJSONString(faulty, faultyJson)); - unexpected(!parsedOK, "failed to parse"); - STParsedJSONObject parsed("test", faultyJson); - BEAST_EXPECT(!parsed.object); + Json::Value j; + j[sfCloseResolution] = 255; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfCloseResolution)); + BEAST_EXPECT(obj.object->getFieldU8(sfCloseResolution) == 255); } - catch (std::runtime_error& e) + + // test with uint value { - std::string what(e.what()); - unexpected(what.find("First level children of `Template`") != 0); + Json::Value j; + j[sfCloseResolution] = 255u; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfCloseResolution)); + BEAST_EXPECT(obj.object->getFieldU8(sfCloseResolution) == 255); + } + + // Test with string value + { + Json::Value j; + j[sfCloseResolution] = "255"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfCloseResolution)); + BEAST_EXPECT(obj.object->getFieldU8(sfCloseResolution) == 255); + } + + // Test min value for uint8 + { + Json::Value j; + j[sfCloseResolution] = 0; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->getFieldU8(sfCloseResolution) == 0); + } + + // Test out of range value for UInt8 (negative) + { + Json::Value j; + j[sfCloseResolution] = -1; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test out of range value for UInt8 (too large) + { + Json::Value j; + j[sfCloseResolution] = 256; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test bad_type (not a string/int/uint) + { + Json::Value j; + j[sfCloseResolution] = Json::Value(Json::arrayValue); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test bad_type (not a string/int/uint) + { + Json::Value j; + j[sfCloseResolution] = Json::Value(Json::objectValue); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); } } void - testParseJSONArray() + testUInt16() { - testcase("parse json array"); - std::string const json( - "{\"Template\":[{\"ModifiedNode\":{\"Sequence\":1}}]}"); - - Json::Value jsonObject; - bool parsedOK(parseJSONString(json, jsonObject)); - if (parsedOK) + testcase("UInt16"); + // Test with int value { - STParsedJSONObject parsed("test", jsonObject); - BEAST_EXPECT(parsed.object); - std::string const& serialized( - to_string(parsed.object->getJson(JsonOptions::none))); - BEAST_EXPECT(serialized == json); + Json::Value j; + j[sfLedgerEntryType] = 65535; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfLedgerEntryType)); + BEAST_EXPECT(obj.object->getFieldU16(sfLedgerEntryType) == 65535); } - else + + // Test with uint value { - fail("Couldn't parse json: " + json); + Json::Value j; + j[sfLedgerEntryType] = 65535u; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfLedgerEntryType)); + BEAST_EXPECT(obj.object->getFieldU16(sfLedgerEntryType) == 65535); + } + + // Test with string value + { + Json::Value j; + j[sfLedgerEntryType] = "65535"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfLedgerEntryType)); + BEAST_EXPECT(obj.object->getFieldU16(sfLedgerEntryType) == 65535); + } + + // Test min value for uint16 + { + Json::Value j; + j[sfLedgerEntryType] = 0; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->getFieldU16(sfLedgerEntryType) == 0); + } + + // Test out of range value for UInt16 (negative) + { + Json::Value j; + j[sfLedgerEntryType] = -1; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test out of range value for UInt16 (too large) + { + Json::Value j; + j[sfLedgerEntryType] = 65536; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test string value out of range + { + Json::Value j; + j[sfLedgerEntryType] = "65536"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test bad_type (not a string/int/uint) + { + Json::Value j; + j[sfLedgerEntryType] = Json::Value(Json::arrayValue); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test bad_type (not a string/int/uint) + { + Json::Value j; + j[sfLedgerEntryType] = Json::Value(Json::objectValue); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Invalid input for other field + { + Json::Value j; + j[sfTransferFee] = "Payment"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); } } void - testParseJSONEdgeCases() + testUInt32() { - testcase("parse json object"); + testcase("UInt32"); + { + Json::Value j; + j[sfNetworkID] = 4294967295u; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfNetworkID)); + BEAST_EXPECT(obj.object->getFieldU32(sfNetworkID) == 4294967295u); + } + + // Test with string value + { + Json::Value j; + j[sfNetworkID] = "4294967295"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfNetworkID)); + BEAST_EXPECT(obj.object->getFieldU32(sfNetworkID) == 4294967295u); + } + + // Test min value for uint32 + { + Json::Value j; + j[sfNetworkID] = 0; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->getFieldU32(sfNetworkID) == 0); + } + + // Test out of range value for uint32 (negative) + { + Json::Value j; + j[sfNetworkID] = -1; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test string value out of range + { + Json::Value j; + j[sfNetworkID] = "4294967296"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test bad_type (arrayValue) + { + Json::Value j; + j[sfNetworkID] = Json::Value(Json::arrayValue); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test bad_type (objectValue) + { + Json::Value j; + j[sfNetworkID] = Json::Value(Json::objectValue); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + } + + void + testUInt64() + { + testcase("UInt64"); + { + Json::Value j; + j[sfIndexNext] = "ffffffffffffffff"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfIndexNext)); + BEAST_EXPECT( + obj.object->getFieldU64(sfIndexNext) == + 18446744073709551615ull); + } + + // Test min value for uint64 + { + Json::Value j; + j[sfIndexNext] = 0; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->getFieldU64(sfIndexNext) == 0ull); + } + + // Test out of range value for uint64 (negative) + { + Json::Value j; + j[sfIndexNext] = -1; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // NOTE: the JSON parser doesn't support > UInt32, so those values must + // be in hex + // Test string value out of range + // string is interpreted as hex + { + Json::Value j; + j[sfIndexNext] = "10000000000000000"; // uint64 max + 1 (in hex) + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test hex string value with 0x prefix (should fail) + { + Json::Value j; + j[sfIndexNext] = "0xabcdefabcdef"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test hex string value with invalid characters + { + Json::Value j; + j[sfIndexNext] = "abcdefga"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // test arrayValue + { + Json::Value j; + j[sfIndexNext] = Json::Value(Json::arrayValue); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // test objectValue + { + Json::Value j; + j[sfIndexNext] = Json::Value(Json::objectValue); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + } + + void + testUInt128() + { + testcase("UInt128"); + { + Json::Value j; + j[sfEmailHash] = "0123456789ABCDEF0123456789ABCDEF"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfEmailHash)); + BEAST_EXPECT(obj.object->getFieldH128(sfEmailHash).size() == 16); + std::array expected = { + 0x01, + 0x23, + 0x45, + 0x67, + 0x89, + 0xAB, + 0xCD, + 0xEF, + 0x01, + 0x23, + 0x45, + 0x67, + 0x89, + 0xAB, + 0xCD, + 0xEF}; + BEAST_EXPECT( + obj.object->getFieldH128(sfEmailHash) == uint128{expected}); + } + + // Valid lowercase hex string for UInt128 + { + Json::Value j; + j[sfEmailHash] = "0123456789abcdef0123456789abcdef"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfEmailHash)); + BEAST_EXPECT(obj.object->getFieldH128(sfEmailHash).size() == 16); + } + + // Empty string for UInt128 (should be valid, all zero) + { + Json::Value j; + j[sfEmailHash] = ""; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfEmailHash)); + auto const& h128 = obj.object->getFieldH128(sfEmailHash); + BEAST_EXPECT(h128.size() == 16); + bool allZero = std::all_of( + h128.begin(), h128.end(), [](auto b) { return b == 0; }); + BEAST_EXPECT(allZero); + } + + // Odd-length hex string for UInt128 (should fail) + { + Json::Value j; + j[sfEmailHash] = "0123456789ABCDEF0123456789ABCDE"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Non-hex string for UInt128 (should fail) + { + Json::Value j; + j[sfEmailHash] = "nothexstring"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Hex string too short for UInt128 (should fail) + { + Json::Value j; + j[sfEmailHash] = "01234567"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Hex string too long for UInt128 (should fail) + { + Json::Value j; + j[sfEmailHash] = "0123456789ABCDEF0123456789ABCDEF00"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Array value for UInt128 (should fail) + { + Json::Value j; + j[sfEmailHash] = Json::Value(Json::arrayValue); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Object value for UInt128 (should fail) + { + Json::Value j; + j[sfEmailHash] = Json::Value(Json::objectValue); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + } + + void + testUInt160() + { + testcase("UInt160"); + { + Json::Value j; + j[sfTakerPaysCurrency] = "0123456789ABCDEF0123456789ABCDEF01234567"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfTakerPaysCurrency)); + BEAST_EXPECT( + obj.object->getFieldH160(sfTakerPaysCurrency).size() == 20); + std::array expected = { + 0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0x01, 0x23, + 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0x01, 0x23, 0x45, 0x67}; + BEAST_EXPECT( + obj.object->getFieldH160(sfTakerPaysCurrency) == + uint160{expected}); + } + // Valid lowercase hex string for UInt160 + { + Json::Value j; + j[sfTakerPaysCurrency] = "0123456789abcdef0123456789abcdef01234567"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfTakerPaysCurrency)); + BEAST_EXPECT( + obj.object->getFieldH160(sfTakerPaysCurrency).size() == 20); + } + + // Empty string for UInt160 (should be valid, all zero) + { + Json::Value j; + j[sfTakerPaysCurrency] = ""; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfTakerPaysCurrency)); + auto const& h160 = obj.object->getFieldH160(sfTakerPaysCurrency); + BEAST_EXPECT(h160.size() == 20); + bool allZero = std::all_of( + h160.begin(), h160.end(), [](auto b) { return b == 0; }); + BEAST_EXPECT(allZero); + } + + // Non-hex string for UInt160 (should fail) + { + Json::Value j; + j[sfTakerPaysCurrency] = "nothexstring"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Hex string too short for UInt160 (should fail) + { + Json::Value j; + j[sfTakerPaysCurrency] = "01234567"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Hex string too long for UInt160 (should fail) + { + Json::Value j; + j[sfTakerPaysCurrency] = + "0123456789ABCDEF0123456789ABCDEF0123456789"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Array value for UInt160 (should fail) + { + Json::Value j; + j[sfTakerPaysCurrency] = Json::Value(Json::arrayValue); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Object value for UInt160 (should fail) + { + Json::Value j; + j[sfTakerPaysCurrency] = Json::Value(Json::objectValue); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + } + + void + testUInt192() + { + testcase("UInt192"); + { + Json::Value j; + j[sfMPTokenIssuanceID] = + "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfMPTokenIssuanceID)); + BEAST_EXPECT( + obj.object->getFieldH192(sfMPTokenIssuanceID).size() == 24); + std::array expected = { + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; + BEAST_EXPECT( + obj.object->getFieldH192(sfMPTokenIssuanceID) == + uint192{expected}); + } + + // Valid lowercase hex string for UInt192 + { + Json::Value j; + j[sfMPTokenIssuanceID] = + "ffffffffffffffffffffffffffffffffffffffffffffffff"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfMPTokenIssuanceID)); + BEAST_EXPECT( + obj.object->getFieldH192(sfMPTokenIssuanceID).size() == 24); + } + + // Empty string for UInt192 (should be valid, all zero) + { + Json::Value j; + j[sfMPTokenIssuanceID] = ""; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfMPTokenIssuanceID)); + auto const& h192 = obj.object->getFieldH192(sfMPTokenIssuanceID); + BEAST_EXPECT(h192.size() == 24); + bool allZero = std::all_of( + h192.begin(), h192.end(), [](auto b) { return b == 0; }); + BEAST_EXPECT(allZero); + } + + // Odd-length hex string for UInt192 (should fail) + { + Json::Value j; + j[sfMPTokenIssuanceID] = + "0123456789ABCDEF0123456789ABCDEF0123456789ABCDE"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Non-hex string for UInt192 (should fail) + { + Json::Value j; + j[sfMPTokenIssuanceID] = "nothexstring"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Hex string too short for UInt192 (should fail) + { + Json::Value j; + j[sfMPTokenIssuanceID] = "01234567"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Hex string too long for UInt192 (should fail) + { + Json::Value j; + j[sfMPTokenIssuanceID] = + "0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF00"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Array value for UInt192 (should fail) + { + Json::Value j; + j[sfMPTokenIssuanceID] = Json::Value(Json::arrayValue); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Object value for UInt192 (should fail) + { + Json::Value j; + j[sfMPTokenIssuanceID] = Json::Value(Json::objectValue); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + } + + void + testUInt256() + { + testcase("UInt256"); + // Test with valid hex string for UInt256 + { + Json::Value j; + j[sfLedgerHash] = + "0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCD" + "EF"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfLedgerHash)); + BEAST_EXPECT(obj.object->getFieldH256(sfLedgerHash).size() == 32); + std::array expected = { + 0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, + 0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, + 0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, + 0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}; + BEAST_EXPECT( + obj.object->getFieldH256(sfLedgerHash) == uint256{expected}); + } + // Valid lowercase hex string for UInt256 + { + Json::Value j; + j[sfLedgerHash] = + "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcd" + "ef"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfLedgerHash)); + BEAST_EXPECT(obj.object->getFieldH256(sfLedgerHash).size() == 32); + } + + // Empty string for UInt256 (should be valid, all zero) + { + Json::Value j; + j[sfLedgerHash] = ""; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfLedgerHash)); + auto const& h256 = obj.object->getFieldH256(sfLedgerHash); + BEAST_EXPECT(h256.size() == 32); + bool allZero = std::all_of( + h256.begin(), h256.end(), [](auto b) { return b == 0; }); + BEAST_EXPECT(allZero); + } + + // Odd-length hex string for UInt256 (should fail) + { + Json::Value j; + j[sfLedgerHash] = + "0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCD" + "E"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Non-hex string for UInt256 (should fail) + { + Json::Value j; + j[sfLedgerHash] = "nothexstring"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Hex string too short for UInt256 (should fail) + { + Json::Value j; + j[sfLedgerHash] = "01234567"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Hex string too long for UInt256 (should fail) + { + Json::Value j; + j[sfLedgerHash] = + "0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCD" + "EF00"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Array value for UInt256 (should fail) + { + Json::Value j; + j[sfLedgerHash] = Json::Value(Json::arrayValue); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Object value for UInt256 (should fail) + { + Json::Value j; + j[sfLedgerHash] = Json::Value(Json::objectValue); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + } + + void + testBlob() + { + testcase("Blob"); + // Test with valid hex string for blob + { + Json::Value j; + j[sfPublicKey] = "DEADBEEF"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfPublicKey)); + auto const& blob = obj.object->getFieldVL(sfPublicKey); + BEAST_EXPECT(blob.size() == 4); + BEAST_EXPECT(blob[0] == 0xDE); + BEAST_EXPECT(blob[1] == 0xAD); + BEAST_EXPECT(blob[2] == 0xBE); + BEAST_EXPECT(blob[3] == 0xEF); + } + + // Test empty string for blob (should be valid, size 0) + { + Json::Value j; + j[sfPublicKey] = ""; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfPublicKey)); + auto const& blob = obj.object->getFieldVL(sfPublicKey); + BEAST_EXPECT(blob.size() == 0); + } + + // Test lowercase hex string for blob + { + Json::Value j; + j[sfPublicKey] = "deadbeef"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfPublicKey)); + auto const& blob = obj.object->getFieldVL(sfPublicKey); + BEAST_EXPECT(blob.size() == 4); + BEAST_EXPECT(blob[0] == 0xDE); + BEAST_EXPECT(blob[1] == 0xAD); + BEAST_EXPECT(blob[2] == 0xBE); + BEAST_EXPECT(blob[3] == 0xEF); + } + + // Test non-hex string for blob (should fail) + { + Json::Value j; + j[sfPublicKey] = "XYZ123"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test array value for blob (should fail) + { + Json::Value j; + j[sfPublicKey] = Json::Value(Json::arrayValue); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test object value for blob (should fail) + { + Json::Value j; + j[sfPublicKey] = Json::Value(Json::objectValue); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + } + + void + testVector256() + { + testcase("Vector256"); + // Test with valid array of hex strings for Vector256 + { + Json::Value j; + Json::Value arr(Json::arrayValue); + arr.append( + "0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCD" + "EF"); + arr.append( + "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA98765432" + "10"); + j[sfHashes] = arr; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfHashes)); + auto const& vec = obj.object->getFieldV256(sfHashes); + BEAST_EXPECT(vec.size() == 2); + BEAST_EXPECT(to_string(vec[0]) == arr[0u].asString()); + BEAST_EXPECT(to_string(vec[1]) == arr[1u].asString()); + } + // Test empty array for Vector256 (should be valid, size 0) + { + Json::Value j; + Json::Value arr(Json::arrayValue); + j[sfHashes] = arr; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfHashes)); + auto const& vec = obj.object->getFieldV256(sfHashes); + BEAST_EXPECT(vec.size() == 0); + } + + // Test array with invalid hex string (should fail) + { + Json::Value j; + Json::Value arr(Json::arrayValue); + arr.append("nothexstring"); + j[sfHashes] = arr; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test array with string of wrong length (should fail) + { + Json::Value j; + Json::Value arr(Json::arrayValue); + arr.append("0123456789ABCDEF"); // too short for uint256 + j[sfHashes] = arr; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test array with non-string element (should fail) + { + Json::Value j; + Json::Value arr(Json::arrayValue); + arr.append(12345); + j[sfHashes] = arr; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test non-array value for Vector256 (should fail) + { + Json::Value j; + j[sfHashes] = "notanarray"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test array with object element (should fail) + { + Json::Value j; + Json::Value arr(Json::arrayValue); + Json::Value objElem(Json::objectValue); + objElem["foo"] = "bar"; + arr.append(objElem); + j[sfHashes] = arr; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + } + + void + testAccount() + { + testcase("Account"); + // Test with valid base58 string for AccountID + { + Json::Value j; + j[sfAccount] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfAccount)); + auto const& acct = obj.object->getAccountID(sfAccount); + BEAST_EXPECT(acct.size() == 20); + BEAST_EXPECT( + toBase58(acct) == "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"); + } + + // Valid hex string for AccountID + { + Json::Value j; + j[sfAccount] = "000102030405060708090A0B0C0D0E0F10111213"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfAccount)); + auto const& acct = obj.object->getAccountID(sfAccount); + BEAST_EXPECT(acct.size() == 20); + } + + // Invalid base58 string for AccountID + { + Json::Value j; + j[sfAccount] = "notAValidBase58Account"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Invalid hex string for AccountID (too short) + { + Json::Value j; + j[sfAccount] = "001122334455"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Invalid hex string for AccountID (too long) + { + Json::Value j; + j[sfAccount] = "000102030405060708090A0B0C0D0E0F101112131415"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Invalid hex string for AccountID (bad chars) + { + Json::Value j; + j[sfAccount] = "000102030405060708090A0B0C0D0E0F1011121G"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Empty string for AccountID (should fail) + { + Json::Value j; + j[sfAccount] = ""; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Array value for AccountID (should fail) + { + Json::Value j; + j[sfAccount] = Json::Value(Json::arrayValue); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Object value for AccountID (should fail) + { + Json::Value j; + j[sfAccount] = Json::Value(Json::objectValue); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + } + + void + testCurrency() + { + testcase("Currency"); + // Test with valid ISO code for currency + { + Json::Value j; + j[sfBaseAsset] = "USD"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfBaseAsset)); + auto const& curr = obj.object->getFieldCurrency(sfBaseAsset); + BEAST_EXPECT(curr.currency().size() == 20); + } + + // Valid ISO code + { + Json::Value j; + j[sfBaseAsset] = "EUR"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfBaseAsset)); + auto const& curr = obj.object->getFieldCurrency(sfBaseAsset); + BEAST_EXPECT(curr.currency().size() == 20); + } + + // Valid hex string for currency + { + Json::Value j; + j[sfBaseAsset] = "0123456789ABCDEF01230123456789ABCDEF0123"; + STParsedJSONObject obj("Test", j); + if (BEAST_EXPECT(obj.object.has_value())) + { + BEAST_EXPECT(obj.object->isFieldPresent(sfBaseAsset)); + auto const& curr = obj.object->getFieldCurrency(sfBaseAsset); + BEAST_EXPECT(curr.currency().size() == 20); + } + } + + // Invalid ISO code (too long) + { + Json::Value j; + j[sfBaseAsset] = "USDD"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // lowercase ISO code + { + Json::Value j; + j[sfBaseAsset] = "usd"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfBaseAsset)); + auto const& curr = obj.object->getFieldCurrency(sfBaseAsset); + BEAST_EXPECT(curr.currency().size() == 20); + } + + // Invalid hex string (too short) + { + Json::Value j; + j[sfBaseAsset] = "0123456789AB"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Invalid hex string (too long) + { + Json::Value j; + j[sfBaseAsset] = "0123456789ABCDEF0123456789"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Empty string for currency (should fail) + { + Json::Value j; + j[sfBaseAsset] = ""; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfBaseAsset)); + auto const& curr = obj.object->getFieldCurrency(sfBaseAsset); + BEAST_EXPECT(curr.currency().size() == 20); + } + + // Array value for currency (should fail) + { + Json::Value j; + j[sfBaseAsset] = Json::Value(Json::arrayValue); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Object value for currency (should fail) + { + Json::Value j; + j[sfBaseAsset] = Json::Value(Json::objectValue); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + } + + void + testAmount() + { + testcase("Amount"); + // Test with string value for Amount + { + Json::Value j; + j[sfAmount] = "100000000000000000"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfAmount)); + BEAST_EXPECT( + obj.object->getFieldAmount(sfAmount) == + STAmount(100000000000000000ull)); + } + + // Test with int value for Amount + { + Json::Value j; + j[sfAmount] = 4294967295u; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfAmount)); + BEAST_EXPECT( + obj.object->getFieldAmount(sfAmount) == STAmount(4294967295u)); + } + + // Test with decimal string for Amount (should fail) + { + Json::Value j; + j[sfAmount] = "123.45"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test with empty string for Amount (should fail) + { + Json::Value j; + j[sfAmount] = ""; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test with non-numeric string for Amount (should fail) + { + Json::Value j; + j[sfAmount] = "notanumber"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test with object value for Amount (should fail) + { + Json::Value j; + j[sfAmount] = Json::Value(Json::objectValue); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + } + + void + testPathSet() + { + testcase("PathSet"); + // Valid test: single path with single element + { + Json::Value j; + Json::Value path(Json::arrayValue); + Json::Value elem(Json::objectValue); + elem["account"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + elem["currency"] = "USD"; + elem["issuer"] = "rPT1Sjq2YGrBMTttX4GZHjKu9dyfzbpAYe"; + path.append(elem); + Json::Value pathset(Json::arrayValue); + pathset.append(path); + j[sfPaths] = pathset; + STParsedJSONObject obj("Test", j); + if (BEAST_EXPECT(obj.object.has_value())) + { + BEAST_EXPECT(obj.object->isFieldPresent(sfPaths)); + auto const& ps = obj.object->getFieldPathSet(sfPaths); + BEAST_EXPECT(!ps.empty()); + BEAST_EXPECT(ps.size() == 1); + BEAST_EXPECT(ps[0].size() == 1); + BEAST_EXPECT( + ps[0][0].getAccountID() == + parseBase58( + "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh")); + BEAST_EXPECT(to_string(ps[0][0].getCurrency()) == "USD"); + BEAST_EXPECT( + ps[0][0].getIssuerID() == + parseBase58( + "rPT1Sjq2YGrBMTttX4GZHjKu9dyfzbpAYe")); + } + } + + // Valid test: non-standard currency code + { + Json::Value j; + Json::Value path(Json::arrayValue); + Json::Value elem(Json::objectValue); + elem["account"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + elem["currency"] = "0123456789ABCDEF01230123456789ABCDEF0123"; + elem["issuer"] = "rPT1Sjq2YGrBMTttX4GZHjKu9dyfzbpAYe"; + path.append(elem); + Json::Value pathset(Json::arrayValue); + pathset.append(path); + j[sfPaths] = pathset; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfPaths)); + auto const& ps = obj.object->getFieldPathSet(sfPaths); + BEAST_EXPECT(!ps.empty()); + } + + // Test with non-array value for PathSet (should fail) + { + Json::Value j; + j[sfPaths] = "notanarray"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test with array containing non-array element (should fail) + { + Json::Value j; + Json::Value pathset(Json::arrayValue); + pathset.append("notanarray"); + j[sfPaths] = pathset; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test with array containing array with non-object element (should + // fail) + { + Json::Value j; + Json::Value path(Json::arrayValue); + path.append("notanobject"); + Json::Value pathset(Json::arrayValue); + pathset.append(path); + j[sfPaths] = pathset; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test with array containing array with object missing required keys + // (should fail) + { + Json::Value j; + Json::Value path(Json::arrayValue); + Json::Value elem(Json::objectValue); + elem["foo"] = "bar"; // not a valid path element key + path.append(elem); + Json::Value pathset(Json::arrayValue); + pathset.append(path); + j[sfPaths] = pathset; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test with array containing array with object with invalid account + // value (should fail) + { + Json::Value j; + Json::Value path(Json::arrayValue); + Json::Value elem(Json::objectValue); + elem["account"] = "notAValidBase58Account"; + path.append(elem); + Json::Value pathset(Json::arrayValue); + pathset.append(path); + j[sfPaths] = pathset; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test with account not string (should fail) + { + Json::Value j; + Json::Value path(Json::arrayValue); + Json::Value elem(Json::objectValue); + elem["account"] = 12345; + path.append(elem); + Json::Value pathset(Json::arrayValue); + pathset.append(path); + j[sfPaths] = pathset; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test with currency not string (should fail) + { + Json::Value j; + Json::Value path(Json::arrayValue); + Json::Value elem(Json::objectValue); + elem["currency"] = 12345; + path.append(elem); + Json::Value pathset(Json::arrayValue); + pathset.append(path); + j[sfPaths] = pathset; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test with non-standard currency not hex (should fail) + { + Json::Value j; + Json::Value path(Json::arrayValue); + Json::Value elem(Json::objectValue); + elem["currency"] = "notAValidCurrency"; + path.append(elem); + Json::Value pathset(Json::arrayValue); + pathset.append(path); + j[sfPaths] = pathset; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test with issuer not string (should fail) + { + Json::Value j; + Json::Value path(Json::arrayValue); + Json::Value elem(Json::objectValue); + elem["issuer"] = 12345; + path.append(elem); + Json::Value pathset(Json::arrayValue); + pathset.append(path); + j[sfPaths] = pathset; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test with issuer not base58 (should fail) + { + Json::Value j; + Json::Value path(Json::arrayValue); + Json::Value elem(Json::objectValue); + elem["issuer"] = "notAValidBase58Account"; + path.append(elem); + Json::Value pathset(Json::arrayValue); + pathset.append(path); + j[sfPaths] = pathset; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + } + + void + testIssue() + { + testcase("Issue"); + // Valid Issue: currency and issuer as base58 + { + Json::Value j; + Json::Value issueJson(Json::objectValue); + issueJson["currency"] = "USD"; + issueJson["issuer"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + j[sfAsset] = issueJson; + STParsedJSONObject obj("Test", j); + if (BEAST_EXPECTS( + obj.object.has_value(), obj.error.toStyledString())) + { + BEAST_EXPECT(obj.object->isFieldPresent(sfAsset)); + auto const& issueField = (*obj.object)[sfAsset]; + auto const issue = issueField.value().get(); + BEAST_EXPECT(issue.currency.size() == 20); + BEAST_EXPECT(to_string(issue.currency) == "USD"); + BEAST_EXPECT(issue.account.size() == 20); + BEAST_EXPECT( + issue.account == + parseBase58( + "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh")); + } + } + + // Valid Issue: currency as hex + { + Json::Value j; + Json::Value issueJson(Json::objectValue); + issueJson["currency"] = "0123456789ABCDEF01230123456789ABCDEF0123"; + issueJson["issuer"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + j[sfAsset] = issueJson; + STParsedJSONObject obj("Test", j); + if (BEAST_EXPECT(obj.object.has_value())) + { + BEAST_EXPECT(obj.object->isFieldPresent(sfAsset)); + auto const& issueField = (*obj.object)[sfAsset]; + auto const issue = issueField.value().get(); + BEAST_EXPECT(issue.currency.size() == 20); + BEAST_EXPECT(issue.account.size() == 20); + } + } + + // Valid Issue: MPTID + { + Json::Value j; + Json::Value issueJson(Json::objectValue); + issueJson["mpt_issuance_id"] = + "0000000000000000000000004D5054494431323334234234"; + j[sfAsset] = issueJson; + STParsedJSONObject obj("Test", j); + if (BEAST_EXPECT(obj.object.has_value())) + { + BEAST_EXPECT(obj.object->isFieldPresent(sfAsset)); + auto const& issueField = (*obj.object)[sfAsset]; + auto const issue = issueField.value().get(); + BEAST_EXPECT(issue.getMptID().size() == 24); + } + } + + // Invalid Issue: missing currency + { + Json::Value j; + Json::Value issue(Json::objectValue); + issue["issuer"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + j[sfAsset] = issue; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Invalid Issue: missing issuer + { + Json::Value j; + Json::Value issue(Json::objectValue); + issue["currency"] = "USD"; + j[sfAsset] = issue; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Invalid Issue: currency too long + { + Json::Value j; + Json::Value issue(Json::objectValue); + issue["currency"] = "USDD"; + issue["issuer"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + j[sfAsset] = issue; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Invalid Issue: issuer not base58 or hex + { + Json::Value j; + Json::Value issue(Json::objectValue); + issue["currency"] = "USD"; + issue["issuer"] = "notAValidIssuer"; + j[sfAsset] = issue; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Invalid Issue: currency not string + { + Json::Value j; + Json::Value issue(Json::objectValue); + issue["currency"] = Json::Value(Json::arrayValue); + issue["issuer"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + j[sfAsset] = issue; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Invalid Issue: issuer not string + { + Json::Value j; + Json::Value issue(Json::objectValue); + issue["currency"] = "USD"; + issue["issuer"] = Json::Value(Json::objectValue); + j[sfAsset] = issue; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Invalid Issue: not an object + { + Json::Value j; + j[sfAsset] = "notanobject"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + } + + void + testXChainBridge() + { + testcase("XChainBridge"); + // Valid XChainBridge + { + Json::Value j; + Json::Value bridge(Json::objectValue); + Json::Value issuingChainIssue(Json::objectValue); + issuingChainIssue["currency"] = "USD"; + issuingChainIssue["issuer"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + Json::Value lockingChainIssue(Json::objectValue); + lockingChainIssue["currency"] = "EUR"; + lockingChainIssue["issuer"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + bridge["LockingChainIssue"] = lockingChainIssue; + bridge["IssuingChainIssue"] = issuingChainIssue; + bridge["LockingChainDoor"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + bridge["IssuingChainDoor"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + j[sfXChainBridge] = bridge; + STParsedJSONObject obj("Test", j); + if (BEAST_EXPECT(obj.object.has_value())) + { + BEAST_EXPECT(obj.object->isFieldPresent(sfXChainBridge)); + auto const& bridgeField = (*obj.object)[sfXChainBridge]; + BEAST_EXPECT( + bridgeField->lockingChainIssue().currency.size() == 20); + BEAST_EXPECT( + bridgeField->issuingChainIssue().currency.size() == 20); + } + } + + // Valid XChainBridge: issues as hex currency + { + Json::Value j; + Json::Value bridge(Json::objectValue); + Json::Value issuingChainIssue(Json::objectValue); + issuingChainIssue["currency"] = + "0123456789ABCDEF01230123456789ABCDEF0123"; + issuingChainIssue["issuer"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + Json::Value lockingChainIssue(Json::objectValue); + lockingChainIssue["currency"] = + "0123456789ABCDEF01230123456789ABCDEF0123"; + lockingChainIssue["issuer"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + bridge["LockingChainIssue"] = lockingChainIssue; + bridge["IssuingChainIssue"] = issuingChainIssue; + bridge["LockingChainDoor"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + bridge["IssuingChainDoor"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + j[sfXChainBridge] = bridge; + STParsedJSONObject obj("Test", j); + if (BEAST_EXPECT(obj.object.has_value())) + { + BEAST_EXPECT(obj.object->isFieldPresent(sfXChainBridge)); + auto const& bridgeField = (*obj.object)[sfXChainBridge]; + BEAST_EXPECT( + bridgeField->lockingChainIssue().currency.size() == 20); + BEAST_EXPECT( + bridgeField->issuingChainIssue().currency.size() == 20); + } + } + + // Invalid XChainBridge: missing LockingChainIssue + { + Json::Value j; + Json::Value bridge(Json::objectValue); + Json::Value issuingChainIssue(Json::objectValue); + issuingChainIssue["currency"] = "USD"; + issuingChainIssue["issuer"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + bridge["IssuingChainIssue"] = issuingChainIssue; + bridge["LockingChainDoor"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + bridge["IssuingChainDoor"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + j[sfXChainBridge] = bridge; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Invalid XChainBridge: missing IssuingChainIssue + { + Json::Value j; + Json::Value bridge(Json::objectValue); + Json::Value lockingChainIssue(Json::objectValue); + lockingChainIssue["currency"] = "EUR"; + lockingChainIssue["issuer"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + bridge["LockingChainIssue"] = lockingChainIssue; + bridge["LockingChainDoor"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + bridge["IssuingChainDoor"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + j[sfXChainBridge] = bridge; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Invalid XChainBridge: missing LockingChainDoor + { + Json::Value j; + Json::Value bridge(Json::objectValue); + Json::Value issuingChainIssue(Json::objectValue); + issuingChainIssue["currency"] = "USD"; + issuingChainIssue["issuer"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + bridge["IssuingChainIssue"] = issuingChainIssue; + Json::Value lockingChainIssue(Json::objectValue); + lockingChainIssue["currency"] = "EUR"; + lockingChainIssue["issuer"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + bridge["LockingChainIssue"] = lockingChainIssue; + bridge["IssuingChainDoor"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + j[sfXChainBridge] = bridge; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Invalid XChainBridge: missing IssuingChainDoor + { + Json::Value j; + Json::Value bridge(Json::objectValue); + Json::Value issuingChainIssue(Json::objectValue); + issuingChainIssue["currency"] = "USD"; + issuingChainIssue["issuer"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + bridge["IssuingChainIssue"] = issuingChainIssue; + Json::Value lockingChainIssue(Json::objectValue); + lockingChainIssue["currency"] = "EUR"; + lockingChainIssue["issuer"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + bridge["LockingChainIssue"] = lockingChainIssue; + bridge["LockingChainDoor"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + j[sfXChainBridge] = bridge; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Invalid XChainBridge: IssuingChainIssue not an object + { + Json::Value j; + Json::Value bridge(Json::objectValue); + bridge["LockingChainIssue"] = "notanobject"; + bridge["IssuingChainIssue"] = "notanobject"; + j[sfXChainBridge] = bridge; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Invalid XChainBridge: IssuingChainIssue missing currency + { + Json::Value j; + Json::Value bridge(Json::objectValue); + Json::Value asset(Json::objectValue); + asset["issuer"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + Json::Value lockingChainIssue(Json::objectValue); + lockingChainIssue["currency"] = "EUR"; + lockingChainIssue["issuer"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + bridge["LockingChainIssue"] = lockingChainIssue; + bridge["IssuingChainIssue"] = asset; + j[sfXChainBridge] = bridge; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Invalid XChainBridge: asset missing issuer + { + Json::Value j; + Json::Value bridge(Json::objectValue); + Json::Value asset(Json::objectValue); + asset["currency"] = "USD"; + Json::Value lockingChainIssue(Json::objectValue); + lockingChainIssue["currency"] = "EUR"; + lockingChainIssue["issuer"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + bridge["LockingChainIssue"] = lockingChainIssue; + bridge["IssuingChainIssue"] = asset; + j[sfXChainBridge] = bridge; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Invalid XChainBridge: asset issuer not base58 + { + Json::Value j; + Json::Value bridge(Json::objectValue); + Json::Value asset(Json::objectValue); + asset["currency"] = "USD"; + asset["issuer"] = "notAValidBase58Account"; + Json::Value lockingChainIssue(Json::objectValue); + lockingChainIssue["currency"] = "EUR"; + lockingChainIssue["issuer"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + bridge["LockingChainIssue"] = lockingChainIssue; + bridge["IssuingChainIssue"] = asset; + j[sfXChainBridge] = bridge; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Invalid XChainBridge: not an object + { + Json::Value j; + j[sfXChainBridge] = "notanobject"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + } + + void + testNumber() + { + testcase("Number"); + // Valid integer value for STNumber + { + Json::Value j; + j[sfNumber] = 12345; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfNumber)); + BEAST_EXPECT( + obj.object->getFieldNumber(sfNumber).value() == + Number(12345, 0)); + } + + // Valid uint value for STNumber + { + Json::Value j; + j[sfNumber] = 12345u; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfNumber)); + BEAST_EXPECT( + obj.object->getFieldNumber(sfNumber).value() == + Number(12345, 0)); + } + + // Valid string integer value for STNumber + { + Json::Value j; + j[sfNumber] = "67890"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfNumber)); + BEAST_EXPECT( + obj.object->getFieldNumber(sfNumber).value() == + Number(67890, 0)); + } + + // Valid negative integer value for STNumber + { + Json::Value j; + j[sfNumber] = -42; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfNumber)); + BEAST_EXPECT( + obj.object->getFieldNumber(sfNumber).value() == Number(-42, 0)); + } + + // Valid string negative integer value for STNumber + { + Json::Value j; + j[sfNumber] = "-123"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfNumber)); + BEAST_EXPECT( + obj.object->getFieldNumber(sfNumber).value() == + Number(-123, 0)); + } + + // Valid floating point value for STNumber + { + Json::Value j; + j[sfNumber] = "3.14159"; + STParsedJSONObject obj("Test", j); + if (BEAST_EXPECT(obj.object.has_value())) + { + BEAST_EXPECT(obj.object->isFieldPresent(sfNumber)); + BEAST_EXPECT( + obj.object->getFieldNumber(sfNumber).value() == + Number(314159, -5)); + } + } + + // Invalid string value for STNumber (not a number) + { + Json::Value j; + j[sfNumber] = "notanumber"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Invalid array value for STNumber + { + Json::Value j; + j[sfNumber] = Json::Value(Json::arrayValue); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Invalid object value for STNumber + { + Json::Value j; + j[sfNumber] = Json::Value(Json::objectValue); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Empty string for STNumber (should fail) + { + Json::Value j; + j[sfNumber] = ""; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + } + + void + testObject() + { + testcase("Object"); + // Test with valid object for Object + { + Json::Value j; + Json::Value objVal(Json::objectValue); + objVal[sfTransactionResult] = 1; + j[sfTransactionMetaData] = objVal; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfTransactionMetaData)); + auto const& result = + obj.object->peekFieldObject(sfTransactionMetaData); + BEAST_EXPECT(result.getFieldU8(sfTransactionResult) == 1); + } + + // Test with non-object value for Object (should fail) + { + Json::Value j; + j[sfTransactionMetaData] = "notanobject"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test with array value for Object (should fail) + { + Json::Value j; + Json::Value arr(Json::arrayValue); + arr.append(1); + j[sfTransactionMetaData] = arr; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test with null value for Object (should fail) + { + Json::Value j; + j[sfTransactionMetaData] = Json::Value(Json::nullValue); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test with max depth (should succeed) + // max depth is 64 + { + Json::Value j; + Json::Value obj(Json::objectValue); + Json::Value* current = &obj; + for (int i = 0; i < 63; ++i) + { + Json::Value next(Json::objectValue); + (*current)[sfTransactionMetaData] = next; + current = &((*current)[sfTransactionMetaData]); + } + (*current)[sfTransactionResult.getJsonName()] = 1; + j[sfTransactionMetaData] = obj; + STParsedJSONObject parsed("Test", j); + BEAST_EXPECT(parsed.object.has_value()); + BEAST_EXPECT(parsed.object->isFieldPresent(sfTransactionMetaData)); + } + + // Test with depth exceeding maxDepth (should fail) + { + Json::Value j; + Json::Value obj(Json::objectValue); + Json::Value* current = &obj; + for (int i = 0; i < 64; ++i) + { + Json::Value next(Json::objectValue); + (*current)[sfTransactionMetaData] = next; + current = &((*current)[sfTransactionMetaData]); + } + (*current)[sfTransactionResult.getJsonName()] = 1; + j[sfTransactionMetaData] = obj; + STParsedJSONObject parsed("Test", j); + BEAST_EXPECT(!parsed.object.has_value()); + } + } + + void + testArray() + { + testcase("Array"); + // Test with valid array for Array + { + Json::Value j; + Json::Value arr(Json::arrayValue); + Json::Value elem(Json::objectValue); + elem[sfTransactionResult] = 2; + Json::Value elem2(Json::objectValue); + elem2[sfTransactionMetaData] = elem; + arr.append(elem2); + j[sfSignerEntries] = arr; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfSignerEntries)); + auto const& result = obj.object->getFieldArray(sfSignerEntries); + if (BEAST_EXPECT(result.size() == 1)) + { + BEAST_EXPECT(result[0].getFName() == sfTransactionMetaData); + BEAST_EXPECT(result[0].getJson(0) == elem); + } + } + + // Test with array containing non-object element (should fail) + { + Json::Value j; + Json::Value arr(Json::arrayValue); + arr.append("notanobject"); + j[sfSignerEntries] = arr; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test with array containing object with invalid field (should fail) + { + Json::Value j; + Json::Value arr(Json::arrayValue); + Json::Value elem(Json::objectValue); + elem["invalidField"] = 1; + arr.append(elem); + j[sfSignerEntries] = arr; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test with array containing object with multiple keys (should fail) + { + Json::Value j; + Json::Value arr(Json::arrayValue); + Json::Value elem(Json::objectValue); + elem[sfTransactionResult] = 2; + elem[sfNetworkID] = 3; + arr.append(elem); + j[sfSignerEntries] = arr; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test with non-array value for Array (should fail) + { + Json::Value j; + j[sfSignerEntries] = "notanarray"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test with array containing object with valid field but invalid value + // (should fail) + { + Json::Value j; + Json::Value arr(Json::arrayValue); + Json::Value elem(Json::objectValue); + elem[sfTransactionResult] = "notanint"; + arr.append(elem); + j[sfSignerEntries] = arr; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test with empty array for Array (should be valid) + { + Json::Value j; + Json::Value arr(Json::arrayValue); + j[sfSignerEntries] = arr; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + BEAST_EXPECT(obj.object->isFieldPresent(sfSignerEntries)); + } + + // Test with object provided but not object SField + { + Json::Value j; + Json::Value obj(Json::arrayValue); + obj.append(Json::Value(Json::objectValue)); + obj[0u][sfTransactionResult] = 1; + j[sfSignerEntries] = obj; + STParsedJSONObject parsed("Test", j); + BEAST_EXPECT(!parsed.object.has_value()); + } + + // Test invalid children + { + try + { + /* + + STArray/STObject constructs don't really map perfectly to json + arrays/objects. + + STObject is an associative container, mapping fields to value, + but an STObject may also have a Field as its name, stored + outside the associative structure. The name is important, so to + maintain fidelity, it will take TWO json objects to represent + them. + + */ + std::string faulty( + "{\"Template\":[{" + "\"ModifiedNode\":{\"Sequence\":1}, " + "\"DeletedNode\":{\"Sequence\":1}" + "}]}"); + + std::unique_ptr so; + Json::Value faultyJson; + bool parsedOK(parseJSONString(faulty, faultyJson)); + unexpected(!parsedOK, "failed to parse"); + STParsedJSONObject parsed("test", faultyJson); + BEAST_EXPECT(!parsed.object); + } + catch (std::runtime_error& e) + { + std::string what(e.what()); + unexpected( + what.find("First level children of `Template`") != 0); + } + } + } + + void + testEdgeCases() + { + testcase("General Invalid Cases"); + + { + Json::Value j; + j[sfLedgerEntry] = 1; // not a valid SField for STParsedJSON + } { std::string const goodJson(R"({"CloseResolution":19,"Method":250,)" @@ -328,9 +2227,26 @@ public: { // Instantiate a jtx::Env so debugLog writes are exercised. test::jtx::Env env(*this); - testParseJSONArrayWithInvalidChildrenObjects(); - testParseJSONArray(); - testParseJSONEdgeCases(); + testUInt8(); + testUInt16(); + testUInt32(); + testUInt64(); + testUInt128(); + testUInt160(); + testUInt192(); + testUInt256(); + testBlob(); + testVector256(); + testAccount(); + testCurrency(); + testAmount(); + testPathSet(); + testIssue(); + testXChainBridge(); + testNumber(); + testObject(); + testArray(); + testEdgeCases(); } }; From 08b136528e078a691ece1033a072132234f52943 Mon Sep 17 00:00:00 2001 From: Bart Date: Mon, 22 Sep 2025 20:27:02 +0200 Subject: [PATCH 200/244] Revert "Update Conan dependencies: OpenSSL" (#5807) This change reverts #5617, because it will require extensive testing that will take up more time than we have before the next scheduled release. Reverting this change does not mean we are abandoning it. We aim to pick it back up once there's a sufficient time window to allow for testing on multiple distros running a mixture of OpenSSL 1.x and 3.x. --- cmake/RippledCompiler.cmake | 17 +++++++---------- conan.lock | 2 +- conanfile.py | 2 +- 3 files changed, 9 insertions(+), 12 deletions(-) diff --git a/cmake/RippledCompiler.cmake b/cmake/RippledCompiler.cmake index 4d16222cbe..bc3a62a48c 100644 --- a/cmake/RippledCompiler.cmake +++ b/cmake/RippledCompiler.cmake @@ -16,16 +16,13 @@ set(CMAKE_CXX_EXTENSIONS OFF) target_compile_definitions (common INTERFACE $<$:DEBUG _DEBUG> - #[===[ - NOTE: CMAKE release builds already have NDEBUG defined, so no need to add it - explicitly except for the special case of (profile ON) and (assert OFF). - Presumably this is because we don't want profile builds asserting unless - asserts were specifically requested. - ]===] - $<$,$>>:NDEBUG> - # TODO: Remove once we have migrated functions from OpenSSL 1.x to 3.x. - OPENSSL_SUPPRESS_DEPRECATED -) + $<$,$>>:NDEBUG>) + # ^^^^ NOTE: CMAKE release builds already have NDEBUG + # defined, so no need to add it explicitly except for + # this special case of (profile ON) and (assert OFF) + # -- presumably this is because we don't want profile + # builds asserting unless asserts were specifically + # requested if (MSVC) # remove existing exception flag since we set it to -EHa diff --git a/conan.lock b/conan.lock index 0f11f086b4..ec790e16ce 100644 --- a/conan.lock +++ b/conan.lock @@ -9,7 +9,7 @@ "rocksdb/10.0.1#85537f46e538974d67da0c3977de48ac%1756234304.347", "re2/20230301#dfd6e2bf050eb90ddd8729cfb4c844a4%1756234257.976", "protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1756234251.614", - "openssl/3.5.2#0c5a5e15ae569f45dff57adcf1770cf7%1756234259.61", + "openssl/1.1.1w#a8f0792d7c5121b954578a7149d23e03%1756223730.729", "nudb/2.0.9#c62cfd501e57055a7e0d8ee3d5e5427d%1756234237.107", "lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1756234228.999", "libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1756223727.64", diff --git a/conanfile.py b/conanfile.py index 01f61c5d4e..3146b887e0 100644 --- a/conanfile.py +++ b/conanfile.py @@ -27,7 +27,7 @@ class Xrpl(ConanFile): 'grpc/1.50.1', 'libarchive/3.8.1', 'nudb/2.0.9', - 'openssl/3.5.2', + 'openssl/1.1.1w', 'soci/4.0.3', 'zlib/1.3.1', ] From 73ff54143d0a9b378cf3612b9ba9ac3751928551 Mon Sep 17 00:00:00 2001 From: Valentin Balaschenko <13349202+vlntb@users.noreply.github.com> Date: Tue, 23 Sep 2025 12:26:26 +0100 Subject: [PATCH 201/244] docs: Add warning about using std::counting_semaphore (#5595) This adds a comment to avoid using `std::counting_semaphore` until the minimum compiler versions of GCC and Clang have been updated to no longer contain the bug that is present in older compilers. --- src/xrpld/core/detail/semaphore.h | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/src/xrpld/core/detail/semaphore.h b/src/xrpld/core/detail/semaphore.h index 3b64265bb1..fbeb79c66a 100644 --- a/src/xrpld/core/detail/semaphore.h +++ b/src/xrpld/core/detail/semaphore.h @@ -17,6 +17,34 @@ */ //============================================================================== +/** + * + * TODO: Remove ripple::basic_semaphore (and this file) and use + * std::counting_semaphore. + * + * Background: + * - PR: https://github.com/XRPLF/rippled/pull/5512/files + * - std::counting_semaphore had a bug fixed in both GCC and Clang: + * * GCC PR 104928: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=104928 + * * LLVM PR 79265: https://github.com/llvm/llvm-project/pull/79265 + * + * GCC: + * According to GCC Bugzilla PR104928 + * (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=104928#c15), the fix is + * scheduled for inclusion in GCC 16.0 (see comment #15, Target + * Milestone: 16.0). It is not included in GCC 14.x or earlier, and there is no + * indication that it will be backported to GCC 13.x or 14.x branches. + * + * Clang: + * The fix for is included in Clang 19.1.0+ + * + * Once the minimum compiler version is updated to > GCC 16.0 or Clang 19.1.0, + * we can remove this file. + * + * WARNING: Avoid using std::counting_semaphore until the minimum compiler + * version is updated. + */ + #ifndef RIPPLE_CORE_SEMAPHORE_H_INCLUDED #define RIPPLE_CORE_SEMAPHORE_H_INCLUDED From 51c5f2bfc9db56a18d8ede1b3d9f9fde75a9f62d Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Thu, 25 Sep 2025 15:14:29 +0100 Subject: [PATCH 202/244] Improve ValidatorList invalid UNL manifest logging (#5804) This change raises logging severity from `INFO` to `WARN` when handling UNL manifest signed with an unexpected / invalid key. It also changes the internal error code for an invalid format of UNL manifest to `invalid` (from `untrusted`). This is a follow up to problems experienced by an UNL node due to old manifest key configured in `validators.txt`, which would be easier to diagnose with improved logging. It also replaces a log line with `UNREACHABLE` for an impossible situation when we match UNL manifest key against a configured key which has an invalid type (we cannot configure such a key because of checks when loading configured keys). --- src/test/app/ValidatorList_test.cpp | 18 ++++++++++ src/xrpld/app/misc/ValidatorList.h | 2 +- src/xrpld/app/misc/detail/ValidatorList.cpp | 40 +++++++++++++-------- 3 files changed, 44 insertions(+), 16 deletions(-) diff --git a/src/test/app/ValidatorList_test.cpp b/src/test/app/ValidatorList_test.cpp index a3b62bd4f7..2b004c3b52 100644 --- a/src/test/app/ValidatorList_test.cpp +++ b/src/test/app/ValidatorList_test.cpp @@ -768,6 +768,24 @@ private: expectUntrusted(lists.at(7)); expectTrusted(lists.at(2)); + // try empty or mangled manifest + checkResult( + trustedKeys->applyLists( + "", version, {{blob7, sig7, {}}, {blob6, sig6, {}}}, siteUri), + publisherPublic, + ListDisposition::invalid, + ListDisposition::invalid); + + checkResult( + trustedKeys->applyLists( + base64_encode("not a manifest"), + version, + {{blob7, sig7, {}}, {blob6, sig6, {}}}, + siteUri), + publisherPublic, + ListDisposition::invalid, + ListDisposition::invalid); + // do not use list from untrusted publisher auto const untrustedManifest = base64_encode(makeManifestString( randomMasterKey(), diff --git a/src/xrpld/app/misc/ValidatorList.h b/src/xrpld/app/misc/ValidatorList.h index 1f5d728824..9a2018cbd4 100644 --- a/src/xrpld/app/misc/ValidatorList.h +++ b/src/xrpld/app/misc/ValidatorList.h @@ -877,7 +877,7 @@ private: verify( lock_guard const&, Json::Value& list, - std::string const& manifest, + Manifest manifest, std::string const& blob, std::string const& signature); diff --git a/src/xrpld/app/misc/detail/ValidatorList.cpp b/src/xrpld/app/misc/detail/ValidatorList.cpp index 1ddb51c9dd..2b45cec3be 100644 --- a/src/xrpld/app/misc/detail/ValidatorList.cpp +++ b/src/xrpld/app/misc/detail/ValidatorList.cpp @@ -1149,21 +1149,33 @@ ValidatorList::applyList( Json::Value list; auto const& manifest = localManifest ? *localManifest : globalManifest; - auto [result, pubKeyOpt] = verify(lock, list, manifest, blob, signature); + auto m = deserializeManifest(base64_decode(manifest)); + if (!m) + { + JLOG(j_.warn()) << "UNL manifest cannot be deserialized"; + return PublisherListStats{ListDisposition::invalid}; + } + + auto [result, pubKeyOpt] = + verify(lock, list, std::move(*m), blob, signature); if (!pubKeyOpt) { - JLOG(j_.info()) << "ValidatorList::applyList unable to retrieve the " - "master public key from the verify function\n"; + JLOG(j_.warn()) + << "UNL manifest is signed with an unrecognized master public key"; return PublisherListStats{result}; } if (!publicKeyType(*pubKeyOpt)) - { - JLOG(j_.info()) << "ValidatorList::applyList Invalid Public Key type" - " retrieved from the verify function\n "; + { // LCOV_EXCL_START + // This is an impossible situation because we will never load an + // invalid public key type (see checks in `ValidatorList::load`) however + // we can only arrive here if the key used by the manifest matched one of + // the loaded keys + UNREACHABLE( + "ripple::ValidatorList::applyList : invalid public key type"); return PublisherListStats{result}; - } + } // LCOV_EXCL_STOP PublicKey pubKey = *pubKeyOpt; if (result > ListDisposition::pending) @@ -1356,19 +1368,17 @@ std::pair> ValidatorList::verify( ValidatorList::lock_guard const& lock, Json::Value& list, - std::string const& manifest, + Manifest manifest, std::string const& blob, std::string const& signature) { - auto m = deserializeManifest(base64_decode(manifest)); - - if (!m || !publisherLists_.count(m->masterKey)) + if (!publisherLists_.count(manifest.masterKey)) return {ListDisposition::untrusted, {}}; - PublicKey masterPubKey = m->masterKey; - auto const revoked = m->revoked(); + PublicKey masterPubKey = manifest.masterKey; + auto const revoked = manifest.revoked(); - auto const result = publisherManifests_.applyManifest(std::move(*m)); + auto const result = publisherManifests_.applyManifest(std::move(manifest)); if (revoked && result == ManifestDisposition::accepted) { @@ -1796,7 +1806,7 @@ ValidatorList::getAvailable( if (!keyBlob || !publicKeyType(makeSlice(*keyBlob))) { - JLOG(j_.info()) << "Invalid requested validator list publisher key: " + JLOG(j_.warn()) << "Invalid requested validator list publisher key: " << pubKey; return {}; } From a12f5de68d0acd2641829fdc144404e1ad1ff9e3 Mon Sep 17 00:00:00 2001 From: Bart Date: Thu, 25 Sep 2025 18:08:07 +0200 Subject: [PATCH 203/244] chore: Pin all CI Docker tags (#5813) To avoid surprises and ensure reproducibility, this change pins all CI Docker image tags to the latest version in the XRPLF/CI repo. --- .github/workflows/build-test.yml | 2 +- .github/workflows/notify-clio.yml | 2 +- .github/workflows/pre-commit.yml | 3 ++- .github/workflows/publish-docs.yml | 2 +- .github/workflows/upload-conan-deps.yml | 2 +- 5 files changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml index 634ed42690..2197e88a42 100644 --- a/.github/workflows/build-test.yml +++ b/.github/workflows/build-test.yml @@ -63,7 +63,7 @@ jobs: matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }} max-parallel: 10 runs-on: ${{ matrix.architecture.runner }} - container: ${{ inputs.os == 'linux' && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version) || null }} + container: ${{ inputs.os == 'linux' && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-5dd7158', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version) || null }} steps: - name: Check strategy matrix run: | diff --git a/.github/workflows/notify-clio.yml b/.github/workflows/notify-clio.yml index 692904ff12..2d6fa63796 100644 --- a/.github/workflows/notify-clio.yml +++ b/.github/workflows/notify-clio.yml @@ -40,7 +40,7 @@ jobs: upload: if: ${{ github.event.pull_request.head.repo.full_name == github.repository }} runs-on: ubuntu-latest - container: ghcr.io/xrplf/ci/ubuntu-noble:gcc-13 + container: ghcr.io/xrplf/ci/ubuntu-noble:gcc-13-sha-5dd7158 steps: - name: Checkout repository uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index ead137308d..9b85a3bd11 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -7,8 +7,9 @@ on: workflow_dispatch: jobs: + # Call the workflow in the XRPLF/actions repo that runs the pre-commit hooks. run-hooks: uses: XRPLF/actions/.github/workflows/pre-commit.yml@af1b0f0d764cda2e5435f5ac97b240d4bd4d95d3 with: runs_on: ubuntu-latest - container: '{ "image": "ghcr.io/xrplf/ci/tools-rippled-pre-commit" }' + container: '{ "image": "ghcr.io/xrplf/ci/tools-rippled-pre-commit:sha-d1496b8" }' diff --git a/.github/workflows/publish-docs.yml b/.github/workflows/publish-docs.yml index 2fcdd581d1..efd89a5b22 100644 --- a/.github/workflows/publish-docs.yml +++ b/.github/workflows/publish-docs.yml @@ -27,7 +27,7 @@ env: jobs: publish: runs-on: ubuntu-latest - container: ghcr.io/xrplf/ci/tools-rippled-documentation + container: ghcr.io/xrplf/ci/tools-rippled-documentation:sha-d1496b8 permissions: contents: write steps: diff --git a/.github/workflows/upload-conan-deps.yml b/.github/workflows/upload-conan-deps.yml index c52b3c89d3..98db52a436 100644 --- a/.github/workflows/upload-conan-deps.yml +++ b/.github/workflows/upload-conan-deps.yml @@ -56,7 +56,7 @@ jobs: matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }} max-parallel: 10 runs-on: ${{ matrix.architecture.runner }} - container: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version) || null }} + container: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-5dd7158', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version) || null }} steps: - name: Cleanup workspace From 2c3024716b5de465a59c2e48572add6cc3321d22 Mon Sep 17 00:00:00 2001 From: tequ Date: Fri, 26 Sep 2025 20:07:48 +0900 Subject: [PATCH 204/244] change `fixPriceOracleOrder` to `Supported::yes` (#5749) --- include/xrpl/protocol/detail/features.macro | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/xrpl/protocol/detail/features.macro b/include/xrpl/protocol/detail/features.macro index 9dc40dc8e5..ce9583dace 100644 --- a/include/xrpl/protocol/detail/features.macro +++ b/include/xrpl/protocol/detail/features.macro @@ -36,7 +36,7 @@ XRPL_FIX (IncludeKeyletFields, Supported::no, VoteBehavior::DefaultNo XRPL_FEATURE(DynamicMPT, Supported::no, VoteBehavior::DefaultNo) XRPL_FIX (TokenEscrowV1, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (DelegateV1_1, Supported::no, VoteBehavior::DefaultNo) -XRPL_FIX (PriceOracleOrder, Supported::no, VoteBehavior::DefaultNo) +XRPL_FIX (PriceOracleOrder, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (MPTDeliveredAmount, Supported::no, VoteBehavior::DefaultNo) XRPL_FIX (AMMClawbackRounding, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(TokenEscrow, Supported::yes, VoteBehavior::DefaultNo) From cfd26f444cddb297fe6273f956b653997abf6de7 Mon Sep 17 00:00:00 2001 From: Jingchen Date: Fri, 26 Sep 2025 12:40:43 +0100 Subject: [PATCH 205/244] fix: Address http header case sensitivity (#5767) This change makes the regex in `HttpClient.cpp` that matches the content-length http header case insensitive to improve compatibility, as http headers are case insensitive. --- .../scripts/levelization/results/ordering.txt | 1 + src/libxrpl/net/HTTPClient.cpp | 2 +- src/tests/libxrpl/CMakeLists.txt | 2 + src/tests/libxrpl/net/HTTPClient.cpp | 346 ++++++++++++++++++ src/tests/libxrpl/net/main.cpp | 21 ++ 5 files changed, 371 insertions(+), 1 deletion(-) create mode 100644 src/tests/libxrpl/net/HTTPClient.cpp create mode 100644 src/tests/libxrpl/net/main.cpp diff --git a/.github/scripts/levelization/results/ordering.txt b/.github/scripts/levelization/results/ordering.txt index 13de36e2a5..55df4c2672 100644 --- a/.github/scripts/levelization/results/ordering.txt +++ b/.github/scripts/levelization/results/ordering.txt @@ -138,6 +138,7 @@ test.toplevel > test.csf test.toplevel > xrpl.json test.unit_test > xrpl.basics tests.libxrpl > xrpl.basics +tests.libxrpl > xrpl.net xrpl.json > xrpl.basics xrpl.ledger > xrpl.basics xrpl.ledger > xrpl.protocol diff --git a/src/libxrpl/net/HTTPClient.cpp b/src/libxrpl/net/HTTPClient.cpp index 964be32dd8..74b8b61ca6 100644 --- a/src/libxrpl/net/HTTPClient.cpp +++ b/src/libxrpl/net/HTTPClient.cpp @@ -383,7 +383,7 @@ public: static boost::regex reStatus{ "\\`HTTP/1\\S+ (\\d{3}) .*\\'"}; // HTTP/1.1 200 OK static boost::regex reSize{ - "\\`.*\\r\\nContent-Length:\\s+([0-9]+).*\\'"}; + "\\`.*\\r\\nContent-Length:\\s+([0-9]+).*\\'", boost::regex::icase}; static boost::regex reBody{"\\`.*\\r\\n\\r\\n(.*)\\'"}; boost::smatch smMatch; diff --git a/src/tests/libxrpl/CMakeLists.txt b/src/tests/libxrpl/CMakeLists.txt index 68c6fa6cb3..f97283c955 100644 --- a/src/tests/libxrpl/CMakeLists.txt +++ b/src/tests/libxrpl/CMakeLists.txt @@ -12,3 +12,5 @@ xrpl_add_test(basics) target_link_libraries(xrpl.test.basics PRIVATE xrpl.imports.test) xrpl_add_test(crypto) target_link_libraries(xrpl.test.crypto PRIVATE xrpl.imports.test) +xrpl_add_test(net) +target_link_libraries(xrpl.test.net PRIVATE xrpl.imports.test) diff --git a/src/tests/libxrpl/net/HTTPClient.cpp b/src/tests/libxrpl/net/HTTPClient.cpp new file mode 100644 index 0000000000..4d50c47220 --- /dev/null +++ b/src/tests/libxrpl/net/HTTPClient.cpp @@ -0,0 +1,346 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2024 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +using namespace ripple; + +namespace { + +// Simple HTTP server using Beast for testing +class TestHTTPServer +{ +private: + boost::asio::io_context ioc_; + boost::asio::ip::tcp::acceptor acceptor_; + boost::asio::ip::tcp::endpoint endpoint_; + std::atomic running_{true}; + unsigned short port_; + + // Custom headers to return + std::map custom_headers_; + std::string response_body_; + unsigned int status_code_{200}; + +public: + TestHTTPServer() : acceptor_(ioc_), port_(0) + { + // Bind to any available port + endpoint_ = {boost::asio::ip::tcp::v4(), 0}; + acceptor_.open(endpoint_.protocol()); + acceptor_.set_option(boost::asio::socket_base::reuse_address(true)); + acceptor_.bind(endpoint_); + acceptor_.listen(); + + // Get the actual port that was assigned + port_ = acceptor_.local_endpoint().port(); + + accept(); + } + + ~TestHTTPServer() + { + stop(); + } + + boost::asio::io_context& + ioc() + { + return ioc_; + } + + unsigned short + port() const + { + return port_; + } + + void + setHeader(std::string const& name, std::string const& value) + { + custom_headers_[name] = value; + } + + void + setResponseBody(std::string const& body) + { + response_body_ = body; + } + + void + setStatusCode(unsigned int code) + { + status_code_ = code; + } + +private: + void + stop() + { + running_ = false; + acceptor_.close(); + } + + void + accept() + { + if (!running_) + return; + + acceptor_.async_accept( + ioc_, + endpoint_, + [&](boost::system::error_code const& error, + boost::asio::ip::tcp::socket peer) { + if (!running_) + return; + + if (!error) + { + handleConnection(std::move(peer)); + } + }); + } + + void + handleConnection(boost::asio::ip::tcp::socket socket) + { + try + { + // Read the HTTP request + boost::beast::flat_buffer buffer; + boost::beast::http::request req; + boost::beast::http::read(socket, buffer, req); + + // Create response + boost::beast::http::response res; + res.version(req.version()); + res.result(status_code_); + res.set(boost::beast::http::field::server, "TestServer"); + + // Add custom headers + for (auto const& [name, value] : custom_headers_) + { + res.set(name, value); + } + + // Set body and prepare payload first + res.body() = response_body_; + res.prepare_payload(); + + // Override Content-Length with custom headers after prepare_payload + // This allows us to test case-insensitive header parsing + for (auto const& [name, value] : custom_headers_) + { + if (boost::iequals(name, "Content-Length")) + { + res.erase(boost::beast::http::field::content_length); + res.set(name, value); + } + } + + // Send response + boost::beast::http::write(socket, res); + + // Shutdown socket gracefully + boost::system::error_code ec; + socket.shutdown(boost::asio::ip::tcp::socket::shutdown_send, ec); + } + catch (std::exception const&) + { + // Connection handling errors are expected + } + + if (running_) + accept(); + } +}; + +// Helper function to run HTTP client test +bool +runHTTPTest( + TestHTTPServer& server, + std::string const& path, + std::atomic& completed, + std::atomic& result_status, + std::string& result_data, + boost::system::error_code& result_error) +{ + // Create a null journal for testing + beast::Journal j{beast::Journal::getNullSink()}; + + // Initialize HTTPClient SSL context + HTTPClient::initializeSSLContext("", "", false, j); + + HTTPClient::get( + false, // no SSL + server.ioc(), + "127.0.0.1", + server.port(), + path, + 1024, // max response size + std::chrono::seconds(5), + [&](boost::system::error_code const& ec, + int status, + std::string const& data) -> bool { + result_error = ec; + result_status = status; + result_data = data; + completed = true; + return false; // don't retry + }, + j); + + // Run the IO context until completion + auto start = std::chrono::steady_clock::now(); + while (!completed && + std::chrono::steady_clock::now() - start < std::chrono::seconds(10)) + { + if (server.ioc().run_one() == 0) + { + break; + } + } + + return completed; +} + +} // anonymous namespace + +TEST_CASE("HTTPClient case insensitive Content-Length") +{ + // Test different cases of Content-Length header + std::vector header_cases = { + "Content-Length", // Standard case + "content-length", // Lowercase - this tests the regex icase fix + "CONTENT-LENGTH", // Uppercase + "Content-length", // Mixed case + "content-Length" // Mixed case 2 + }; + + for (auto const& header_name : header_cases) + { + TestHTTPServer server; + std::string test_body = "Hello World!"; + server.setResponseBody(test_body); + server.setHeader(header_name, std::to_string(test_body.size())); + + std::atomic completed{false}; + std::atomic result_status{0}; + std::string result_data; + boost::system::error_code result_error; + + bool test_completed = runHTTPTest( + server, + "/test", + completed, + result_status, + result_data, + result_error); + + // Verify results + CHECK(test_completed); + CHECK(!result_error); + CHECK(result_status == 200); + CHECK(result_data == test_body); + } +} + +TEST_CASE("HTTPClient basic HTTP request") +{ + TestHTTPServer server; + std::string test_body = "Test response body"; + server.setResponseBody(test_body); + server.setHeader("Content-Type", "text/plain"); + + std::atomic completed{false}; + std::atomic result_status{0}; + std::string result_data; + boost::system::error_code result_error; + + bool test_completed = runHTTPTest( + server, "/basic", completed, result_status, result_data, result_error); + + CHECK(test_completed); + CHECK(!result_error); + CHECK(result_status == 200); + CHECK(result_data == test_body); +} + +TEST_CASE("HTTPClient empty response") +{ + TestHTTPServer server; + server.setResponseBody(""); // Empty body + server.setHeader("Content-Length", "0"); + + std::atomic completed{false}; + std::atomic result_status{0}; + std::string result_data; + boost::system::error_code result_error; + + bool test_completed = runHTTPTest( + server, "/empty", completed, result_status, result_data, result_error); + + CHECK(test_completed); + CHECK(!result_error); + CHECK(result_status == 200); + CHECK(result_data.empty()); +} + +TEST_CASE("HTTPClient different status codes") +{ + std::vector status_codes = {200, 404, 500}; + + for (auto status : status_codes) + { + TestHTTPServer server; + server.setStatusCode(status); + server.setResponseBody("Status " + std::to_string(status)); + + std::atomic completed{false}; + std::atomic result_status{0}; + std::string result_data; + boost::system::error_code result_error; + + bool test_completed = runHTTPTest( + server, + "/status", + completed, + result_status, + result_data, + result_error); + + CHECK(test_completed); + CHECK(!result_error); + CHECK(result_status == static_cast(status)); + } +} diff --git a/src/tests/libxrpl/net/main.cpp b/src/tests/libxrpl/net/main.cpp new file mode 100644 index 0000000000..be9fc14bbf --- /dev/null +++ b/src/tests/libxrpl/net/main.cpp @@ -0,0 +1,21 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2024 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN +#include From d02c306f1e3f954cbeeedba40da85db125f4986b Mon Sep 17 00:00:00 2001 From: Mayukha Vadari Date: Fri, 26 Sep 2025 13:40:19 -0400 Subject: [PATCH 206/244] test: add more comprehensive tests for `FeeVote` (#5746) This change adds more comprehensive tests for the `FeeVote` module, which previously only checked the basics, and not the more comprehensive flows in that class. --- src/test/app/FeeVote_test.cpp | 699 ++++++++++++++++++++++++++++++++++ 1 file changed, 699 insertions(+) diff --git a/src/test/app/FeeVote_test.cpp b/src/test/app/FeeVote_test.cpp index ba3d379219..4fe0a62e3b 100644 --- a/src/test/app/FeeVote_test.cpp +++ b/src/test/app/FeeVote_test.cpp @@ -19,11 +19,203 @@ #include +#include +#include +#include + #include +#include +#include +#include +#include +#include +#include namespace ripple { namespace test { +struct FeeSettingsFields +{ + std::optional baseFee = std::nullopt; + std::optional reserveBase = std::nullopt; + std::optional reserveIncrement = std::nullopt; + std::optional referenceFeeUnits = std::nullopt; + std::optional baseFeeDrops = std::nullopt; + std::optional reserveBaseDrops = std::nullopt; + std::optional reserveIncrementDrops = std::nullopt; +}; + +STTx +createFeeTx( + Rules const& rules, + std::uint32_t seq, + FeeSettingsFields const& fields) +{ + auto fill = [&](auto& obj) { + obj.setAccountID(sfAccount, AccountID()); + obj.setFieldU32(sfLedgerSequence, seq); + + if (rules.enabled(featureXRPFees)) + { + // New XRPFees format - all three fields are REQUIRED + obj.setFieldAmount( + sfBaseFeeDrops, + fields.baseFeeDrops ? *fields.baseFeeDrops : XRPAmount{0}); + obj.setFieldAmount( + sfReserveBaseDrops, + fields.reserveBaseDrops ? *fields.reserveBaseDrops + : XRPAmount{0}); + obj.setFieldAmount( + sfReserveIncrementDrops, + fields.reserveIncrementDrops ? *fields.reserveIncrementDrops + : XRPAmount{0}); + } + else + { + // Legacy format - all four fields are REQUIRED + obj.setFieldU64(sfBaseFee, fields.baseFee ? *fields.baseFee : 0); + obj.setFieldU32( + sfReserveBase, fields.reserveBase ? *fields.reserveBase : 0); + obj.setFieldU32( + sfReserveIncrement, + fields.reserveIncrement ? *fields.reserveIncrement : 0); + obj.setFieldU32( + sfReferenceFeeUnits, + fields.referenceFeeUnits ? *fields.referenceFeeUnits : 0); + } + }; + return STTx(ttFEE, fill); +} + +STTx +createInvalidFeeTx( + Rules const& rules, + std::uint32_t seq, + bool missingRequiredFields = true, + bool wrongFeatureFields = false, + std::uint32_t uniqueValue = 42) +{ + auto fill = [&](auto& obj) { + obj.setAccountID(sfAccount, AccountID()); + obj.setFieldU32(sfLedgerSequence, seq); + + if (wrongFeatureFields) + { + if (rules.enabled(featureXRPFees)) + { + obj.setFieldU64(sfBaseFee, 10 + uniqueValue); + obj.setFieldU32(sfReserveBase, 200000); + obj.setFieldU32(sfReserveIncrement, 50000); + obj.setFieldU32(sfReferenceFeeUnits, 10); + } + else + { + obj.setFieldAmount(sfBaseFeeDrops, XRPAmount{10 + uniqueValue}); + obj.setFieldAmount(sfReserveBaseDrops, XRPAmount{200000}); + obj.setFieldAmount(sfReserveIncrementDrops, XRPAmount{50000}); + } + } + else if (!missingRequiredFields) + { + // Create valid transaction (all required fields present) + if (rules.enabled(featureXRPFees)) + { + obj.setFieldAmount(sfBaseFeeDrops, XRPAmount{10 + uniqueValue}); + obj.setFieldAmount(sfReserveBaseDrops, XRPAmount{200000}); + obj.setFieldAmount(sfReserveIncrementDrops, XRPAmount{50000}); + } + else + { + obj.setFieldU64(sfBaseFee, 10 + uniqueValue); + obj.setFieldU32(sfReserveBase, 200000); + obj.setFieldU32(sfReserveIncrement, 50000); + obj.setFieldU32(sfReferenceFeeUnits, 10); + } + } + // If missingRequiredFields is true, we don't add the required fields + // (default behavior) + }; + return STTx(ttFEE, fill); +} + +bool +applyFeeAndTestResult(jtx::Env& env, OpenView& view, STTx const& tx) +{ + auto const res = + apply(env.app(), view, tx, ApplyFlags::tapNONE, env.journal); + return res.ter == tesSUCCESS; +} + +bool +verifyFeeObject( + std::shared_ptr const& ledger, + Rules const& rules, + FeeSettingsFields const& expected) +{ + auto const feeObject = ledger->read(keylet::fees()); + if (!feeObject) + return false; + + auto checkEquality = [&](auto const& field, auto const& expected) { + if (!feeObject->isFieldPresent(field)) + return false; + return feeObject->at(field) == expected; + }; + + if (rules.enabled(featureXRPFees)) + { + if (feeObject->isFieldPresent(sfBaseFee) || + feeObject->isFieldPresent(sfReserveBase) || + feeObject->isFieldPresent(sfReserveIncrement) || + feeObject->isFieldPresent(sfReferenceFeeUnits)) + return false; + + if (!checkEquality( + sfBaseFeeDrops, expected.baseFeeDrops.value_or(XRPAmount{0}))) + return false; + if (!checkEquality( + sfReserveBaseDrops, + expected.reserveBaseDrops.value_or(XRPAmount{0}))) + return false; + if (!checkEquality( + sfReserveIncrementDrops, + expected.reserveIncrementDrops.value_or(XRPAmount{0}))) + return false; + } + else + { + if (feeObject->isFieldPresent(sfBaseFeeDrops) || + feeObject->isFieldPresent(sfReserveBaseDrops) || + feeObject->isFieldPresent(sfReserveIncrementDrops)) + return false; + + // Read sfBaseFee as a hex string and compare to expected.baseFee + if (!checkEquality(sfBaseFee, expected.baseFee)) + return false; + if (!checkEquality(sfReserveBase, expected.reserveBase)) + return false; + if (!checkEquality(sfReserveIncrement, expected.reserveIncrement)) + return false; + if (!checkEquality(sfReferenceFeeUnits, expected.referenceFeeUnits)) + return false; + } + + return true; +} + +std::vector +getTxs(std::shared_ptr const& txSet) +{ + std::vector txs; + for (auto i = txSet->begin(); i != txSet->end(); ++i) + { + auto const data = i->slice(); + auto serialIter = SerialIter(data); + txs.push_back(STTx(serialIter)); + } + return txs; +}; + class FeeVote_test : public beast::unit_test::suite { void @@ -93,10 +285,517 @@ class FeeVote_test : public beast::unit_test::suite } } + void + testBasic() + { + testcase("Basic SetFee transaction"); + + // Test with XRPFees disabled (legacy format) + { + jtx::Env env(*this, jtx::testable_amendments() - featureXRPFees); + auto ledger = std::make_shared( + create_genesis, + env.app().config(), + std::vector{}, + env.app().getNodeFamily()); + + // Create the next ledger to apply transaction to + ledger = std::make_shared( + *ledger, env.app().timeKeeper().closeTime()); + + // Test successful fee transaction with legacy fields + + FeeSettingsFields fields{ + .baseFee = 10, + .reserveBase = 200000, + .reserveIncrement = 50000, + .referenceFeeUnits = 10}; + auto feeTx = createFeeTx(ledger->rules(), ledger->seq(), fields); + + OpenView accum(ledger.get()); + BEAST_EXPECT(applyFeeAndTestResult(env, accum, feeTx)); + accum.apply(*ledger); + + // Verify fee object was created/updated correctly + BEAST_EXPECT(verifyFeeObject(ledger, ledger->rules(), fields)); + } + + // Test with XRPFees enabled (new format) + { + jtx::Env env(*this, jtx::testable_amendments() | featureXRPFees); + auto ledger = std::make_shared( + create_genesis, + env.app().config(), + std::vector{}, + env.app().getNodeFamily()); + + // Create the next ledger to apply transaction to + ledger = std::make_shared( + *ledger, env.app().timeKeeper().closeTime()); + + FeeSettingsFields fields{ + .baseFeeDrops = XRPAmount{10}, + .reserveBaseDrops = XRPAmount{200000}, + .reserveIncrementDrops = XRPAmount{50000}}; + // Test successful fee transaction with new fields + auto feeTx = createFeeTx(ledger->rules(), ledger->seq(), fields); + + OpenView accum(ledger.get()); + BEAST_EXPECT(applyFeeAndTestResult(env, accum, feeTx)); + accum.apply(*ledger); + + // Verify fee object was created/updated correctly + BEAST_EXPECT(verifyFeeObject(ledger, ledger->rules(), fields)); + } + } + + void + testTransactionValidation() + { + testcase("Fee Transaction Validation"); + + { + jtx::Env env(*this, jtx::testable_amendments() - featureXRPFees); + auto ledger = std::make_shared( + create_genesis, + env.app().config(), + std::vector{}, + env.app().getNodeFamily()); + + // Create the next ledger to apply transaction to + ledger = std::make_shared( + *ledger, env.app().timeKeeper().closeTime()); + + // Test transaction with missing required legacy fields + auto invalidTx = createInvalidFeeTx( + ledger->rules(), ledger->seq(), true, false, 1); + OpenView accum(ledger.get()); + BEAST_EXPECT(!applyFeeAndTestResult(env, accum, invalidTx)); + + // Test transaction with new format fields when XRPFees is disabled + auto disallowedTx = createInvalidFeeTx( + ledger->rules(), ledger->seq(), false, true, 2); + BEAST_EXPECT(!applyFeeAndTestResult(env, accum, disallowedTx)); + } + + { + jtx::Env env(*this, jtx::testable_amendments() | featureXRPFees); + auto ledger = std::make_shared( + create_genesis, + env.app().config(), + std::vector{}, + env.app().getNodeFamily()); + + // Create the next ledger to apply transaction to + ledger = std::make_shared( + *ledger, env.app().timeKeeper().closeTime()); + + // Test transaction with missing required new fields + auto invalidTx = createInvalidFeeTx( + ledger->rules(), ledger->seq(), true, false, 3); + OpenView accum(ledger.get()); + BEAST_EXPECT(!applyFeeAndTestResult(env, accum, invalidTx)); + + // Test transaction with legacy fields when XRPFees is enabled + auto disallowedTx = createInvalidFeeTx( + ledger->rules(), ledger->seq(), false, true, 4); + BEAST_EXPECT(!applyFeeAndTestResult(env, accum, disallowedTx)); + } + } + + void + testPseudoTransactionProperties() + { + testcase("Pseudo Transaction Properties"); + + jtx::Env env(*this, jtx::testable_amendments()); + auto ledger = std::make_shared( + create_genesis, + env.app().config(), + std::vector{}, + env.app().getNodeFamily()); + + // Create the next ledger to apply transaction to + ledger = std::make_shared( + *ledger, env.app().timeKeeper().closeTime()); + + auto feeTx = createFeeTx( + ledger->rules(), + ledger->seq(), + {.baseFeeDrops = XRPAmount{10}, + .reserveBaseDrops = XRPAmount{200000}, + .reserveIncrementDrops = XRPAmount{50000}}); + + // Verify pseudo-transaction properties + BEAST_EXPECT(feeTx.getAccountID(sfAccount) == AccountID()); + BEAST_EXPECT(feeTx.getFieldAmount(sfFee) == XRPAmount{0}); + BEAST_EXPECT(feeTx.getSigningPubKey().empty()); + BEAST_EXPECT(feeTx.getSignature().empty()); + BEAST_EXPECT(!feeTx.isFieldPresent(sfSigners)); + BEAST_EXPECT(feeTx.getFieldU32(sfSequence) == 0); + BEAST_EXPECT(!feeTx.isFieldPresent(sfPreviousTxnID)); + + // But can be applied to a closed ledger + { + OpenView closedAccum(ledger.get()); + BEAST_EXPECT(applyFeeAndTestResult(env, closedAccum, feeTx)); + } + } + + void + testMultipleFeeUpdates() + { + testcase("Multiple Fee Updates"); + + jtx::Env env(*this, jtx::testable_amendments() | featureXRPFees); + auto ledger = std::make_shared( + create_genesis, + env.app().config(), + std::vector{}, + env.app().getNodeFamily()); + + ledger = std::make_shared( + *ledger, env.app().timeKeeper().closeTime()); + + FeeSettingsFields fields1{ + .baseFeeDrops = XRPAmount{10}, + .reserveBaseDrops = XRPAmount{200000}, + .reserveIncrementDrops = XRPAmount{50000}}; + auto feeTx1 = createFeeTx(ledger->rules(), ledger->seq(), fields1); + + { + OpenView accum(ledger.get()); + BEAST_EXPECT(applyFeeAndTestResult(env, accum, feeTx1)); + accum.apply(*ledger); + } + + BEAST_EXPECT(verifyFeeObject(ledger, ledger->rules(), fields1)); + + // Apply second fee transaction with different values + ledger = std::make_shared( + *ledger, env.app().timeKeeper().closeTime()); + + FeeSettingsFields fields2{ + .baseFeeDrops = XRPAmount{20}, + .reserveBaseDrops = XRPAmount{300000}, + .reserveIncrementDrops = XRPAmount{75000}}; + auto feeTx2 = createFeeTx(ledger->rules(), ledger->seq(), fields2); + + { + OpenView accum(ledger.get()); + BEAST_EXPECT(applyFeeAndTestResult(env, accum, feeTx2)); + accum.apply(*ledger); + } + + // Verify second update overwrote the first + BEAST_EXPECT(verifyFeeObject(ledger, ledger->rules(), fields2)); + } + + void + testWrongLedgerSequence() + { + testcase("Wrong Ledger Sequence"); + + jtx::Env env(*this, jtx::testable_amendments() | featureXRPFees); + auto ledger = std::make_shared( + create_genesis, + env.app().config(), + std::vector{}, + env.app().getNodeFamily()); + + ledger = std::make_shared( + *ledger, env.app().timeKeeper().closeTime()); + + // Test transaction with wrong ledger sequence + auto feeTx = createFeeTx( + ledger->rules(), + ledger->seq() + 5, // Wrong sequence (should be ledger->seq()) + {.baseFeeDrops = XRPAmount{10}, + .reserveBaseDrops = XRPAmount{200000}, + .reserveIncrementDrops = XRPAmount{50000}}); + + OpenView accum(ledger.get()); + + // The transaction should still succeed as long as other fields are + // valid + // The ledger sequence field is only used for informational purposes + BEAST_EXPECT(applyFeeAndTestResult(env, accum, feeTx)); + } + + void + testPartialFieldUpdates() + { + testcase("Partial Field Updates"); + + jtx::Env env(*this, jtx::testable_amendments() | featureXRPFees); + auto ledger = std::make_shared( + create_genesis, + env.app().config(), + std::vector{}, + env.app().getNodeFamily()); + + ledger = std::make_shared( + *ledger, env.app().timeKeeper().closeTime()); + + FeeSettingsFields fields1{ + .baseFeeDrops = XRPAmount{10}, + .reserveBaseDrops = XRPAmount{200000}, + .reserveIncrementDrops = XRPAmount{50000}}; + auto feeTx1 = createFeeTx(ledger->rules(), ledger->seq(), fields1); + + { + OpenView accum(ledger.get()); + BEAST_EXPECT(applyFeeAndTestResult(env, accum, feeTx1)); + accum.apply(*ledger); + } + + BEAST_EXPECT(verifyFeeObject(ledger, ledger->rules(), fields1)); + + ledger = std::make_shared( + *ledger, env.app().timeKeeper().closeTime()); + + // Apply partial update (only some fields) + FeeSettingsFields fields2{ + .baseFeeDrops = XRPAmount{20}, + .reserveBaseDrops = XRPAmount{200000}}; + auto feeTx2 = createFeeTx(ledger->rules(), ledger->seq(), fields2); + + { + OpenView accum(ledger.get()); + BEAST_EXPECT(applyFeeAndTestResult(env, accum, feeTx2)); + accum.apply(*ledger); + } + + // Verify the partial update worked + BEAST_EXPECT(verifyFeeObject(ledger, ledger->rules(), fields2)); + } + + void + testSingleInvalidTransaction() + { + testcase("Single Invalid Transaction"); + + jtx::Env env(*this, jtx::testable_amendments() | featureXRPFees); + auto ledger = std::make_shared( + create_genesis, + env.app().config(), + std::vector{}, + env.app().getNodeFamily()); + + ledger = std::make_shared( + *ledger, env.app().timeKeeper().closeTime()); + + // Test invalid transaction with non-zero account - this should fail + // validation + auto invalidTx = STTx(ttFEE, [&](auto& obj) { + obj.setAccountID( + sfAccount, + AccountID(1)); // Should be zero (this makes it invalid) + obj.setFieldU32(sfLedgerSequence, ledger->seq()); + obj.setFieldAmount(sfBaseFeeDrops, XRPAmount{10}); + obj.setFieldAmount(sfReserveBaseDrops, XRPAmount{200000}); + obj.setFieldAmount(sfReserveIncrementDrops, XRPAmount{50000}); + }); + + OpenView accum(ledger.get()); + BEAST_EXPECT(!applyFeeAndTestResult(env, accum, invalidTx)); + } + + void + testDoValidation() + { + testcase("doValidation"); + + using namespace jtx; + + FeeSetup setup; + setup.reference_fee = 42; + setup.account_reserve = 1234567; + setup.owner_reserve = 7654321; + + // Test with XRPFees enabled + { + Env env(*this, testable_amendments() | featureXRPFees); + auto feeVote = make_FeeVote(setup, env.app().journal("FeeVote")); + + auto ledger = std::make_shared( + create_genesis, + env.app().config(), + std::vector{}, + env.app().getNodeFamily()); + + auto sec = randomSecretKey(); + auto pub = derivePublicKey(KeyType::secp256k1, sec); + + auto val = std::make_shared( + env.app().timeKeeper().now(), + pub, + sec, + calcNodeID(pub), + [](STValidation& v) { + v.setFieldU32(sfLedgerSequence, 12345); + }); + + // Use the current ledger's fees as the "current" fees for + // doValidation + auto const& currentFees = ledger->fees(); + + feeVote->doValidation(currentFees, ledger->rules(), *val); + + BEAST_EXPECT(val->isFieldPresent(sfBaseFeeDrops)); + BEAST_EXPECT( + val->getFieldAmount(sfBaseFeeDrops) == + XRPAmount(setup.reference_fee)); + } + + // Test with XRPFees disabled (legacy format) + { + Env env(*this, testable_amendments() - featureXRPFees); + auto feeVote = make_FeeVote(setup, env.app().journal("FeeVote")); + + auto ledger = std::make_shared( + create_genesis, + env.app().config(), + std::vector{}, + env.app().getNodeFamily()); + + auto sec = randomSecretKey(); + auto pub = derivePublicKey(KeyType::secp256k1, sec); + + auto val = std::make_shared( + env.app().timeKeeper().now(), + pub, + sec, + calcNodeID(pub), + [](STValidation& v) { + v.setFieldU32(sfLedgerSequence, 12345); + }); + + auto const& currentFees = ledger->fees(); + + feeVote->doValidation(currentFees, ledger->rules(), *val); + + // In legacy mode, should vote using legacy fields + BEAST_EXPECT(val->isFieldPresent(sfBaseFee)); + BEAST_EXPECT(val->getFieldU64(sfBaseFee) == setup.reference_fee); + } + } + + void + testDoVoting() + { + testcase("doVoting"); + + using namespace jtx; + + FeeSetup setup; + setup.reference_fee = 42; + setup.account_reserve = 1234567; + setup.owner_reserve = 7654321; + + Env env(*this, testable_amendments() | featureXRPFees); + + // establish what the current fees are + BEAST_EXPECT( + env.current()->fees().base == XRPAmount{UNIT_TEST_REFERENCE_FEE}); + BEAST_EXPECT(env.current()->fees().reserve == XRPAmount{200'000'000}); + BEAST_EXPECT(env.current()->fees().increment == XRPAmount{50'000'000}); + + auto feeVote = make_FeeVote(setup, env.app().journal("FeeVote")); + auto ledger = std::make_shared( + create_genesis, + env.app().config(), + std::vector{}, + env.app().getNodeFamily()); + + // doVoting requires a flag ledger (every 256th ledger) + // We need to create a ledger at sequence 256 to make it a flag ledger + for (int i = 0; i < 256 - 1; ++i) + { + ledger = std::make_shared( + *ledger, env.app().timeKeeper().closeTime()); + } + BEAST_EXPECT(ledger->isFlagLedger()); + + // Create some mock validations with fee votes + std::vector> validations; + + for (int i = 0; i < 5; i++) + { + auto sec = randomSecretKey(); + auto pub = derivePublicKey(KeyType::secp256k1, sec); + + auto val = std::make_shared( + env.app().timeKeeper().now(), + pub, + sec, + calcNodeID(pub), + [&](STValidation& v) { + v.setFieldU32(sfLedgerSequence, ledger->seq()); + // Vote for different fees than current + v.setFieldAmount( + sfBaseFeeDrops, XRPAmount{setup.reference_fee}); + v.setFieldAmount( + sfReserveBaseDrops, XRPAmount{setup.account_reserve}); + v.setFieldAmount( + sfReserveIncrementDrops, + XRPAmount{setup.owner_reserve}); + }); + if (i % 2) + val->setTrusted(); + validations.push_back(val); + } + + auto txSet = std::make_shared( + SHAMapType::TRANSACTION, env.app().getNodeFamily()); + + // This should not throw since we have a flag ledger + feeVote->doVoting(ledger, validations, txSet); + + auto const txs = getTxs(txSet); + BEAST_EXPECT(txs.size() == 1); + auto const& feeTx = txs[0]; + + BEAST_EXPECT(feeTx.getTxnType() == ttFEE); + + BEAST_EXPECT(feeTx.getAccountID(sfAccount) == AccountID()); + BEAST_EXPECT(feeTx.getFieldU32(sfLedgerSequence) == ledger->seq() + 1); + + BEAST_EXPECT(feeTx.isFieldPresent(sfBaseFeeDrops)); + BEAST_EXPECT(feeTx.isFieldPresent(sfReserveBaseDrops)); + BEAST_EXPECT(feeTx.isFieldPresent(sfReserveIncrementDrops)); + + // The legacy fields should NOT be present + BEAST_EXPECT(!feeTx.isFieldPresent(sfBaseFee)); + BEAST_EXPECT(!feeTx.isFieldPresent(sfReserveBase)); + BEAST_EXPECT(!feeTx.isFieldPresent(sfReserveIncrement)); + BEAST_EXPECT(!feeTx.isFieldPresent(sfReferenceFeeUnits)); + + // Check the values + BEAST_EXPECT( + feeTx.getFieldAmount(sfBaseFeeDrops) == + XRPAmount{setup.reference_fee}); + BEAST_EXPECT( + feeTx.getFieldAmount(sfReserveBaseDrops) == + XRPAmount{setup.account_reserve}); + BEAST_EXPECT( + feeTx.getFieldAmount(sfReserveIncrementDrops) == + XRPAmount{setup.owner_reserve}); + } + void run() override { testSetup(); + testBasic(); + testTransactionValidation(); + testPseudoTransactionProperties(); + testMultipleFeeUpdates(); + testWrongLedgerSequence(); + testPartialFieldUpdates(); + testSingleInvalidTransaction(); + testDoValidation(); + testDoVoting(); } }; From 19c4226d3d8c5c9ed47930cfb96c731f8d4959f2 Mon Sep 17 00:00:00 2001 From: Ayaz Salikhov Date: Fri, 26 Sep 2025 19:33:42 +0100 Subject: [PATCH 207/244] ci: Call all reusable workflows reusable (#5818) --- .github/scripts/levelization/README.md | 6 +++--- .github/workflows/on-pr.yml | 12 ++++++------ .github/workflows/on-trigger.yml | 8 ++++---- .../{build-test.yml => reusable-build-test.yml} | 0 ...elization.yml => reusable-check-levelization.yml} | 0 ...ommits.yml => reusable-check-missing-commits.yml} | 0 .../{notify-clio.yml => reusable-notify-clio.yml} | 0 7 files changed, 13 insertions(+), 13 deletions(-) rename .github/workflows/{build-test.yml => reusable-build-test.yml} (100%) rename .github/workflows/{check-levelization.yml => reusable-check-levelization.yml} (100%) rename .github/workflows/{check-missing-commits.yml => reusable-check-missing-commits.yml} (100%) rename .github/workflows/{notify-clio.yml => reusable-notify-clio.yml} (100%) diff --git a/.github/scripts/levelization/README.md b/.github/scripts/levelization/README.md index 31c6d34b6b..f3ba1e2518 100644 --- a/.github/scripts/levelization/README.md +++ b/.github/scripts/levelization/README.md @@ -72,15 +72,15 @@ It generates many files of [results](results): desired as described above. In a perfect repo, this file will be empty. This file is committed to the repo, and is used by the [levelization - Github workflow](../../workflows/check-levelization.yml) to validate + Github workflow](../../workflows/reusable-check-levelization.yml) to validate that nothing changed. - [`ordering.txt`](results/ordering.txt): A list showing relationships between modules where there are no loops as they actually exist, as opposed to how they are desired as described above. This file is committed to the repo, and is used by the [levelization - Github workflow](../../workflows/check-levelization.yml) to validate + Github workflow](../../workflows/reusable-check-levelization.yml) to validate that nothing changed. -- [`levelization.yml`](../../workflows/check-levelization.yml) +- [`levelization.yml`](../../workflows/reusable-check-levelization.yml) Github Actions workflow to test that levelization loops haven't changed. Unfortunately, if changes are detected, it can't tell if they are improvements or not, so if you have resolved any issues or diff --git a/.github/workflows/on-pr.yml b/.github/workflows/on-pr.yml index 9befd31e71..a206bbf041 100644 --- a/.github/workflows/on-pr.yml +++ b/.github/workflows/on-pr.yml @@ -50,8 +50,8 @@ jobs: files: | # These paths are unique to `on-pr.yml`. .github/scripts/levelization/** - .github/workflows/check-levelization.yml - .github/workflows/notify-clio.yml + .github/workflows/reusable-check-levelization.yml + .github/workflows/reusable-notify-clio.yml .github/workflows/on-pr.yml # Keep the paths below in sync with those in `on-trigger.yml`. @@ -59,7 +59,7 @@ jobs: .github/actions/build-test/** .github/actions/setup-conan/** .github/scripts/strategy-matrix/** - .github/workflows/build-test.yml + .github/workflows/reusable-build-test.yml .github/workflows/reusable-strategy-matrix.yml .codecov.yml cmake/** @@ -93,12 +93,12 @@ jobs: check-levelization: needs: should-run if: ${{ needs.should-run.outputs.go == 'true' }} - uses: ./.github/workflows/check-levelization.yml + uses: ./.github/workflows/reusable-check-levelization.yml build-test: needs: should-run if: ${{ needs.should-run.outputs.go == 'true' }} - uses: ./.github/workflows/build-test.yml + uses: ./.github/workflows/reusable-build-test.yml strategy: matrix: os: [linux, macos, windows] @@ -112,7 +112,7 @@ jobs: - should-run - build-test if: ${{ needs.should-run.outputs.go == 'true' && contains(fromJSON('["release", "master"]'), github.ref_name) }} - uses: ./.github/workflows/notify-clio.yml + uses: ./.github/workflows/reusable-notify-clio.yml secrets: clio_notify_token: ${{ secrets.CLIO_NOTIFY_TOKEN }} conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }} diff --git a/.github/workflows/on-trigger.yml b/.github/workflows/on-trigger.yml index 06abbd3f17..7b5bda021f 100644 --- a/.github/workflows/on-trigger.yml +++ b/.github/workflows/on-trigger.yml @@ -14,7 +14,7 @@ on: - master paths: # These paths are unique to `on-trigger.yml`. - - ".github/workflows/check-missing-commits.yml" + - ".github/workflows/reusable-check-missing-commits.yml" - ".github/workflows/on-trigger.yml" - ".github/workflows/publish-docs.yml" @@ -23,7 +23,7 @@ on: - ".github/actions/build-test/**" - ".github/actions/setup-conan/**" - ".github/scripts/strategy-matrix/**" - - ".github/workflows/build-test.yml" + - ".github/workflows/reusable-build-test.yml" - ".github/workflows/reusable-strategy-matrix.yml" - ".codecov.yml" - "cmake/**" @@ -71,10 +71,10 @@ defaults: jobs: check-missing-commits: if: ${{ github.event_name == 'push' && github.ref_type == 'branch' && contains(fromJSON('["develop", "release"]'), github.ref_name) }} - uses: ./.github/workflows/check-missing-commits.yml + uses: ./.github/workflows/reusable-check-missing-commits.yml build-test: - uses: ./.github/workflows/build-test.yml + uses: ./.github/workflows/reusable-build-test.yml strategy: matrix: os: [linux, macos, windows] diff --git a/.github/workflows/build-test.yml b/.github/workflows/reusable-build-test.yml similarity index 100% rename from .github/workflows/build-test.yml rename to .github/workflows/reusable-build-test.yml diff --git a/.github/workflows/check-levelization.yml b/.github/workflows/reusable-check-levelization.yml similarity index 100% rename from .github/workflows/check-levelization.yml rename to .github/workflows/reusable-check-levelization.yml diff --git a/.github/workflows/check-missing-commits.yml b/.github/workflows/reusable-check-missing-commits.yml similarity index 100% rename from .github/workflows/check-missing-commits.yml rename to .github/workflows/reusable-check-missing-commits.yml diff --git a/.github/workflows/notify-clio.yml b/.github/workflows/reusable-notify-clio.yml similarity index 100% rename from .github/workflows/notify-clio.yml rename to .github/workflows/reusable-notify-clio.yml From 807462b191a76a7c939743238c1f7ddbd125c24e Mon Sep 17 00:00:00 2001 From: Mayukha Vadari Date: Fri, 26 Sep 2025 16:13:15 -0400 Subject: [PATCH 208/244] Add `STInt32` as a new `SType` (#5788) This change adds `STInt32` as a new `SType` under the `STInteger` umbrella, with `SType` value `12`. This is the first and only `STInteger` type that supports negative values. --- include/xrpl/protocol/SField.h | 7 +- include/xrpl/protocol/STInteger.h | 2 + include/xrpl/protocol/STObject.h | 4 + include/xrpl/protocol/detail/sfields.macro | 6 ++ src/libxrpl/protocol/STInteger.cpp | 29 ++++++ src/libxrpl/protocol/STObject.cpp | 12 +++ src/libxrpl/protocol/STParsedJSON.cpp | 97 ++++++++++++++----- src/libxrpl/protocol/STVar.cpp | 3 + src/libxrpl/protocol/Serializer.cpp | 6 ++ src/test/protocol/STAccount_test.cpp | 17 ++++ src/test/protocol/STInteger_test.cpp | 28 ++++++ src/test/protocol/STParsedJSON_test.cpp | 105 ++++++++++++++++++++- src/test/protocol/types_test.cpp | 52 ---------- 13 files changed, 287 insertions(+), 81 deletions(-) delete mode 100644 src/test/protocol/types_test.cpp diff --git a/include/xrpl/protocol/SField.h b/include/xrpl/protocol/SField.h index 2f85cf3b7c..b6ae98b48f 100644 --- a/include/xrpl/protocol/SField.h +++ b/include/xrpl/protocol/SField.h @@ -72,8 +72,10 @@ class STCurrency; STYPE(STI_VL, 7) \ STYPE(STI_ACCOUNT, 8) \ STYPE(STI_NUMBER, 9) \ + STYPE(STI_INT32, 10) \ + STYPE(STI_INT64, 11) \ \ - /* 10-13 are reserved */ \ + /* 12-13 are reserved */ \ STYPE(STI_OBJECT, 14) \ STYPE(STI_ARRAY, 15) \ \ @@ -356,6 +358,9 @@ using SF_UINT256 = TypedField>; using SF_UINT384 = TypedField>; using SF_UINT512 = TypedField>; +using SF_INT32 = TypedField>; +using SF_INT64 = TypedField>; + using SF_ACCOUNT = TypedField; using SF_AMOUNT = TypedField; using SF_ISSUE = TypedField; diff --git a/include/xrpl/protocol/STInteger.h b/include/xrpl/protocol/STInteger.h index b259638774..154ee7f203 100644 --- a/include/xrpl/protocol/STInteger.h +++ b/include/xrpl/protocol/STInteger.h @@ -81,6 +81,8 @@ using STUInt16 = STInteger; using STUInt32 = STInteger; using STUInt64 = STInteger; +using STInt32 = STInteger; + template inline STInteger::STInteger(Integer v) : value_(v) { diff --git a/include/xrpl/protocol/STObject.h b/include/xrpl/protocol/STObject.h index b3cb561390..1c22b08aba 100644 --- a/include/xrpl/protocol/STObject.h +++ b/include/xrpl/protocol/STObject.h @@ -231,6 +231,8 @@ public: getFieldH192(SField const& field) const; uint256 getFieldH256(SField const& field) const; + std::int32_t + getFieldI32(SField const& field) const; AccountID getAccountID(SField const& field) const; @@ -365,6 +367,8 @@ public: void setFieldH256(SField const& field, uint256 const&); void + setFieldI32(SField const& field, std::int32_t); + void setFieldVL(SField const& field, Blob const&); void setFieldVL(SField const& field, Slice const&); diff --git a/include/xrpl/protocol/detail/sfields.macro b/include/xrpl/protocol/detail/sfields.macro index 10fe015dac..f932ae2328 100644 --- a/include/xrpl/protocol/detail/sfields.macro +++ b/include/xrpl/protocol/detail/sfields.macro @@ -208,6 +208,12 @@ TYPED_SFIELD(sfAssetsMaximum, NUMBER, 3) TYPED_SFIELD(sfAssetsTotal, NUMBER, 4) TYPED_SFIELD(sfLossUnrealized, NUMBER, 5) +// int32 +// NOTE: Do not use `sfDummyInt32`. It's so far the only use of INT32 +// in this file and has been defined here for test only. +// TODO: Replace `sfDummyInt32` with actually useful field. +TYPED_SFIELD(sfDummyInt32, INT32, 1) // for tests only + // currency amount (common) TYPED_SFIELD(sfAmount, AMOUNT, 1) TYPED_SFIELD(sfBalance, AMOUNT, 2) diff --git a/src/libxrpl/protocol/STInteger.cpp b/src/libxrpl/protocol/STInteger.cpp index 5d6c1802cc..355fa4c113 100644 --- a/src/libxrpl/protocol/STInteger.cpp +++ b/src/libxrpl/protocol/STInteger.cpp @@ -249,4 +249,33 @@ STUInt64::getJson(JsonOptions) const return convertToString(value_, 16); // Convert to base 16 } +//------------------------------------------------------------------------------ + +template <> +STInteger::STInteger(SerialIter& sit, SField const& name) + : STInteger(name, sit.get32()) +{ +} + +template <> +SerializedTypeID +STInt32::getSType() const +{ + return STI_INT32; +} + +template <> +std::string +STInt32::getText() const +{ + return std::to_string(value_); +} + +template <> +Json::Value +STInt32::getJson(JsonOptions) const +{ + return value_; +} + } // namespace ripple diff --git a/src/libxrpl/protocol/STObject.cpp b/src/libxrpl/protocol/STObject.cpp index 9c23898a74..77e5fd1ad9 100644 --- a/src/libxrpl/protocol/STObject.cpp +++ b/src/libxrpl/protocol/STObject.cpp @@ -647,6 +647,12 @@ STObject::getFieldH256(SField const& field) const return getFieldByValue(field); } +std::int32_t +STObject::getFieldI32(SField const& field) const +{ + return getFieldByValue(field); +} + AccountID STObject::getAccountID(SField const& field) const { @@ -761,6 +767,12 @@ STObject::setFieldH256(SField const& field, uint256 const& v) setFieldUsingSetValue(field, v); } +void +STObject::setFieldI32(SField const& field, std::int32_t v) +{ + setFieldUsingSetValue(field, v); +} + void STObject::setFieldV256(SField const& field, STVector256 const& v) { diff --git a/src/libxrpl/protocol/STParsedJSON.cpp b/src/libxrpl/protocol/STParsedJSON.cpp index 9fbe5e7f91..f99fec6b87 100644 --- a/src/libxrpl/protocol/STParsedJSON.cpp +++ b/src/libxrpl/protocol/STParsedJSON.cpp @@ -563,30 +563,6 @@ parseLeaf( break; } - case STI_UINT192: { - if (!value.isString()) - { - error = bad_type(json_name, fieldName); - return ret; - } - - uint192 num; - - if (auto const s = value.asString(); !num.parseHex(s)) - { - if (!s.empty()) - { - error = invalid_data(json_name, fieldName); - return ret; - } - - num.zero(); - } - - ret = detail::make_stvar(field, num); - break; - } - case STI_UINT160: { if (!value.isString()) { @@ -611,6 +587,30 @@ parseLeaf( break; } + case STI_UINT192: { + if (!value.isString()) + { + error = bad_type(json_name, fieldName); + return ret; + } + + uint192 num; + + if (auto const s = value.asString(); !num.parseHex(s)) + { + if (!s.empty()) + { + error = invalid_data(json_name, fieldName); + return ret; + } + + num.zero(); + } + + ret = detail::make_stvar(field, num); + break; + } + case STI_UINT256: { if (!value.isString()) { @@ -635,6 +635,52 @@ parseLeaf( break; } + case STI_INT32: + try + { + if (value.isString()) + { + ret = detail::make_stvar( + field, + beast::lexicalCastThrow( + value.asString())); + } + else if (value.isInt()) + { + // future-proofing - a static assert failure if the JSON + // library ever supports larger ints + // In such case, we will need additional bounds checks here + static_assert( + std::is_same_v); + ret = detail::make_stvar(field, value.asInt()); + } + else if (value.isUInt()) + { + auto const uintValue = value.asUInt(); + if (uintValue > + static_cast( + std::numeric_limits::max())) + { + error = out_of_range(json_name, fieldName); + return ret; + } + ret = detail::make_stvar( + field, static_cast(uintValue)); + } + else + { + error = bad_type(json_name, fieldName); + return ret; + } + } + catch (std::exception const&) + { + error = invalid_data(json_name, fieldName); + return ret; + } + + break; + case STI_VL: if (!value.isString()) { @@ -1120,8 +1166,7 @@ parseArray( Json::Value const objectFields(json[i][objectName]); std::stringstream ss; - ss << json_name << "." - << "[" << i << "]." << objectName; + ss << json_name << "." << "[" << i << "]." << objectName; auto ret = parseObject( ss.str(), objectFields, nameField, depth + 1, error); diff --git a/src/libxrpl/protocol/STVar.cpp b/src/libxrpl/protocol/STVar.cpp index 24954c4add..c46effb47e 100644 --- a/src/libxrpl/protocol/STVar.cpp +++ b/src/libxrpl/protocol/STVar.cpp @@ -208,6 +208,9 @@ STVar::constructST(SerializedTypeID id, int depth, Args&&... args) case STI_UINT256: construct(std::forward(args)...); return; + case STI_INT32: + construct(std::forward(args)...); + return; case STI_VECTOR256: construct(std::forward(args)...); return; diff --git a/src/libxrpl/protocol/Serializer.cpp b/src/libxrpl/protocol/Serializer.cpp index b8a68d28b8..098e68d2b2 100644 --- a/src/libxrpl/protocol/Serializer.cpp +++ b/src/libxrpl/protocol/Serializer.cpp @@ -83,6 +83,12 @@ Serializer::addInteger(std::uint64_t i) { return add64(i); } +template <> +int +Serializer::addInteger(std::int32_t i) +{ + return add32(i); +} int Serializer::addRaw(Blob const& vector) diff --git a/src/test/protocol/STAccount_test.cpp b/src/test/protocol/STAccount_test.cpp index 9476a47c5e..cc318b4458 100644 --- a/src/test/protocol/STAccount_test.cpp +++ b/src/test/protocol/STAccount_test.cpp @@ -122,10 +122,27 @@ struct STAccount_test : public beast::unit_test::suite } } + void + testAccountID() + { + auto const s = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; + if (auto const parsed = parseBase58(s); BEAST_EXPECT(parsed)) + { + BEAST_EXPECT(toBase58(*parsed) == s); + } + + { + auto const s = + "âabcd1rNxp4h8apvRis6mJf9Sh8C6iRxfrDWNâabcdAVâ\xc2\x80\xc2\x8f"; + BEAST_EXPECT(!parseBase58(s)); + } + } + void run() override { testSTAccount(); + testAccountID(); } }; diff --git a/src/test/protocol/STInteger_test.cpp b/src/test/protocol/STInteger_test.cpp index f4572e49bd..6c4cdd6fcf 100644 --- a/src/test/protocol/STInteger_test.cpp +++ b/src/test/protocol/STInteger_test.cpp @@ -30,6 +30,7 @@ struct STInteger_test : public beast::unit_test::suite void testUInt8() { + testcase("UInt8"); STUInt8 u8(255); BEAST_EXPECT(u8.value() == 255); BEAST_EXPECT(u8.getText() == "255"); @@ -56,6 +57,7 @@ struct STInteger_test : public beast::unit_test::suite void testUInt16() { + testcase("UInt16"); STUInt16 u16(65535); BEAST_EXPECT(u16.value() == 65535); BEAST_EXPECT(u16.getText() == "65535"); @@ -80,6 +82,7 @@ struct STInteger_test : public beast::unit_test::suite void testUInt32() { + testcase("UInt32"); STUInt32 u32(4'294'967'295u); BEAST_EXPECT(u32.value() == 4'294'967'295u); BEAST_EXPECT(u32.getText() == "4294967295"); @@ -102,6 +105,7 @@ struct STInteger_test : public beast::unit_test::suite void testUInt64() { + testcase("UInt64"); STUInt64 u64(0xFFFFFFFFFFFFFFFFull); BEAST_EXPECT(u64.value() == 0xFFFFFFFFFFFFFFFFull); BEAST_EXPECT(u64.getText() == "18446744073709551615"); @@ -120,6 +124,29 @@ struct STInteger_test : public beast::unit_test::suite u64_2.getJson(JsonOptions::none) == "18446744073709551615"); } + void + testInt32() + { + testcase("Int32"); + { + int const minInt32 = -2147483648; + STInt32 i32(minInt32); + BEAST_EXPECT(i32.value() == minInt32); + BEAST_EXPECT(i32.getText() == "-2147483648"); + BEAST_EXPECT(i32.getSType() == STI_INT32); + BEAST_EXPECT(i32.getJson(JsonOptions::none) == minInt32); + } + + { + int const maxInt32 = 2147483647; + STInt32 i32(maxInt32); + BEAST_EXPECT(i32.value() == maxInt32); + BEAST_EXPECT(i32.getText() == "2147483647"); + BEAST_EXPECT(i32.getSType() == STI_INT32); + BEAST_EXPECT(i32.getJson(JsonOptions::none) == maxInt32); + } + } + void run() override { @@ -127,6 +154,7 @@ struct STInteger_test : public beast::unit_test::suite testUInt16(); testUInt32(); testUInt64(); + testInt32(); } }; diff --git a/src/test/protocol/STParsedJSON_test.cpp b/src/test/protocol/STParsedJSON_test.cpp index 9ecb4c0365..1e1e1fb9f4 100644 --- a/src/test/protocol/STParsedJSON_test.cpp +++ b/src/test/protocol/STParsedJSON_test.cpp @@ -736,6 +736,107 @@ class STParsedJSON_test : public beast::unit_test::suite } } + void + testInt32() + { + testcase("Int32"); + { + Json::Value j; + int const minInt32 = -2147483648; + j[sfDummyInt32] = minInt32; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + if (BEAST_EXPECT(obj.object->isFieldPresent(sfDummyInt32))) + BEAST_EXPECT(obj.object->getFieldI32(sfDummyInt32) == minInt32); + } + + // max value + { + Json::Value j; + int const maxInt32 = 2147483647; + j[sfDummyInt32] = maxInt32; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + if (BEAST_EXPECT(obj.object->isFieldPresent(sfDummyInt32))) + BEAST_EXPECT(obj.object->getFieldI32(sfDummyInt32) == maxInt32); + } + + // max uint value + { + Json::Value j; + unsigned int const maxUInt32 = 2147483647u; + j[sfDummyInt32] = maxUInt32; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + if (BEAST_EXPECT(obj.object->isFieldPresent(sfDummyInt32))) + BEAST_EXPECT( + obj.object->getFieldI32(sfDummyInt32) == + static_cast(maxUInt32)); + } + + // Test with string value + { + Json::Value j; + j[sfDummyInt32] = "2147483647"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + if (BEAST_EXPECT(obj.object->isFieldPresent(sfDummyInt32))) + BEAST_EXPECT( + obj.object->getFieldI32(sfDummyInt32) == 2147483647u); + } + + // Test with string negative value + { + Json::Value j; + int value = -2147483648; + j[sfDummyInt32] = std::to_string(value); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(obj.object.has_value()); + if (BEAST_EXPECT(obj.object->isFieldPresent(sfDummyInt32))) + BEAST_EXPECT(obj.object->getFieldI32(sfDummyInt32) == value); + } + + // Test out of range value for int32 (negative) + { + Json::Value j; + j[sfDummyInt32] = "-2147483649"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test out of range value for int32 (positive) + { + Json::Value j; + j[sfDummyInt32] = 2147483648u; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test string value out of range + { + Json::Value j; + j[sfDummyInt32] = "2147483648"; + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test bad_type (arrayValue) + { + Json::Value j; + j[sfDummyInt32] = Json::Value(Json::arrayValue); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + + // Test bad_type (objectValue) + { + Json::Value j; + j[sfDummyInt32] = Json::Value(Json::objectValue); + STParsedJSONObject obj("Test", j); + BEAST_EXPECT(!obj.object.has_value()); + } + } + void testBlob() { @@ -1338,8 +1439,7 @@ class STParsedJSON_test : public beast::unit_test::suite issueJson["issuer"] = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; j[sfAsset] = issueJson; STParsedJSONObject obj("Test", j); - if (BEAST_EXPECTS( - obj.object.has_value(), obj.error.toStyledString())) + if (BEAST_EXPECT(obj.object.has_value())) { BEAST_EXPECT(obj.object->isFieldPresent(sfAsset)); auto const& issueField = (*obj.object)[sfAsset]; @@ -2235,6 +2335,7 @@ class STParsedJSON_test : public beast::unit_test::suite testUInt160(); testUInt192(); testUInt256(); + testInt32(); testBlob(); testVector256(); testAccount(); diff --git a/src/test/protocol/types_test.cpp b/src/test/protocol/types_test.cpp deleted file mode 100644 index 8257d9c649..0000000000 --- a/src/test/protocol/types_test.cpp +++ /dev/null @@ -1,52 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2013 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include - -namespace ripple { - -struct types_test : public beast::unit_test::suite -{ - void - testAccountID() - { - auto const s = "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"; - if (auto const parsed = parseBase58(s); BEAST_EXPECT(parsed)) - { - BEAST_EXPECT(toBase58(*parsed) == s); - } - - { - auto const s = - "âabcd1rNxp4h8apvRis6mJf9Sh8C6iRxfrDWNâabcdAVâ\xc2\x80\xc2\x8f"; - BEAST_EXPECT(!parseBase58(s)); - } - } - - void - run() override - { - testAccountID(); - } -}; - -BEAST_DEFINE_TESTSUITE(types, protocol, ripple); - -} // namespace ripple From 0fd2f715bbe9d45a81bc6163cc24f25447dadbf3 Mon Sep 17 00:00:00 2001 From: Mayukha Vadari Date: Sat, 27 Sep 2025 03:04:04 -0400 Subject: [PATCH 209/244] switch `fixIncludeKeyletFields` to `Supported::yes` (#5819) --- include/xrpl/protocol/detail/features.macro | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/xrpl/protocol/detail/features.macro b/include/xrpl/protocol/detail/features.macro index ce9583dace..d25d33b663 100644 --- a/include/xrpl/protocol/detail/features.macro +++ b/include/xrpl/protocol/detail/features.macro @@ -32,7 +32,7 @@ // If you add an amendment here, then do not forget to increment `numFeatures` // in include/xrpl/protocol/Feature.h. -XRPL_FIX (IncludeKeyletFields, Supported::no, VoteBehavior::DefaultNo) +XRPL_FIX (IncludeKeyletFields, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(DynamicMPT, Supported::no, VoteBehavior::DefaultNo) XRPL_FIX (TokenEscrowV1, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (DelegateV1_1, Supported::no, VoteBehavior::DefaultNo) From d67dcfe3c41218d6e09ce7dc73121c908ecc9cca Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Mon, 29 Sep 2025 17:31:42 -0400 Subject: [PATCH 210/244] refactor: Restructure Transactor::preflight to reduce boilerplate (#5592) * Restructures `Transactor::preflight` to create several functions that will remove the need for error-prone boilerplate code in derived classes' implementations of `preflight`. --- include/xrpl/protocol/Permissions.h | 3 + include/xrpl/protocol/TER.h | 3 +- src/libxrpl/protocol/Permissions.cpp | 21 ++- src/test/app/AMM_test.cpp | 6 +- src/test/rpc/AccountSet_test.cpp | 29 +++ src/xrpld/app/tx/detail/AMMBid.cpp | 20 +- src/xrpld/app/tx/detail/AMMBid.h | 3 + src/xrpld/app/tx/detail/AMMClawback.cpp | 20 +- src/xrpld/app/tx/detail/AMMClawback.h | 3 + src/xrpld/app/tx/detail/AMMCreate.cpp | 22 +-- src/xrpld/app/tx/detail/AMMCreate.h | 3 + src/xrpld/app/tx/detail/AMMDelete.cpp | 20 +- src/xrpld/app/tx/detail/AMMDelete.h | 3 + src/xrpld/app/tx/detail/AMMDeposit.cpp | 26 +-- src/xrpld/app/tx/detail/AMMDeposit.h | 6 + src/xrpld/app/tx/detail/AMMVote.cpp | 20 +- src/xrpld/app/tx/detail/AMMVote.h | 3 + src/xrpld/app/tx/detail/AMMWithdraw.cpp | 25 +-- src/xrpld/app/tx/detail/AMMWithdraw.h | 6 + src/xrpld/app/tx/detail/Batch.cpp | 43 +++-- src/xrpld/app/tx/detail/Batch.h | 6 + src/xrpld/app/tx/detail/CancelCheck.cpp | 16 +- src/xrpld/app/tx/detail/CancelOffer.cpp | 14 +- src/xrpld/app/tx/detail/CashCheck.cpp | 16 +- src/xrpld/app/tx/detail/Change.cpp | 7 +- src/xrpld/app/tx/detail/Change.h | 3 - src/xrpld/app/tx/detail/Clawback.cpp | 17 +- src/xrpld/app/tx/detail/Clawback.h | 3 + src/xrpld/app/tx/detail/CreateCheck.cpp | 15 +- src/xrpld/app/tx/detail/CreateOffer.cpp | 36 ++-- src/xrpld/app/tx/detail/CreateOffer.h | 6 + src/xrpld/app/tx/detail/CreateTicket.cpp | 11 +- src/xrpld/app/tx/detail/Credentials.cpp | 76 +++----- src/xrpld/app/tx/detail/Credentials.h | 9 + src/xrpld/app/tx/detail/DID.cpp | 22 +-- src/xrpld/app/tx/detail/DelegateSet.cpp | 8 +- src/xrpld/app/tx/detail/DeleteAccount.cpp | 22 +-- src/xrpld/app/tx/detail/DeleteAccount.h | 3 + src/xrpld/app/tx/detail/DeleteOracle.cpp | 14 +- src/xrpld/app/tx/detail/DepositPreauth.cpp | 33 ++-- src/xrpld/app/tx/detail/DepositPreauth.h | 3 + src/xrpld/app/tx/detail/Escrow.cpp | 69 +++---- src/xrpld/app/tx/detail/Escrow.h | 15 ++ src/xrpld/app/tx/detail/LedgerStateFix.cpp | 13 +- src/xrpld/app/tx/detail/MPTokenAuthorize.cpp | 17 +- src/xrpld/app/tx/detail/MPTokenAuthorize.h | 3 + .../app/tx/detail/MPTokenIssuanceCreate.cpp | 32 ++-- .../app/tx/detail/MPTokenIssuanceCreate.h | 6 + .../app/tx/detail/MPTokenIssuanceDestroy.cpp | 18 +- .../app/tx/detail/MPTokenIssuanceDestroy.h | 3 + .../app/tx/detail/MPTokenIssuanceSet.cpp | 32 ++-- src/xrpld/app/tx/detail/MPTokenIssuanceSet.h | 6 + .../app/tx/detail/NFTokenAcceptOffer.cpp | 17 +- src/xrpld/app/tx/detail/NFTokenAcceptOffer.h | 3 + src/xrpld/app/tx/detail/NFTokenBurn.cpp | 11 +- .../app/tx/detail/NFTokenCancelOffer.cpp | 17 +- src/xrpld/app/tx/detail/NFTokenCancelOffer.h | 3 + .../app/tx/detail/NFTokenCreateOffer.cpp | 17 +- src/xrpld/app/tx/detail/NFTokenCreateOffer.h | 3 + src/xrpld/app/tx/detail/NFTokenMint.cpp | 36 ++-- src/xrpld/app/tx/detail/NFTokenMint.h | 6 + src/xrpld/app/tx/detail/NFTokenModify.cpp | 18 +- src/xrpld/app/tx/detail/NFTokenModify.h | 3 + src/xrpld/app/tx/detail/PayChan.cpp | 56 +++--- src/xrpld/app/tx/detail/PayChan.h | 12 ++ src/xrpld/app/tx/detail/Payment.cpp | 37 ++-- src/xrpld/app/tx/detail/Payment.h | 6 + .../tx/detail/PermissionedDomainDelete.cpp | 14 +- .../app/tx/detail/PermissionedDomainSet.cpp | 21 +-- .../app/tx/detail/PermissionedDomainSet.h | 3 + src/xrpld/app/tx/detail/SetAccount.cpp | 17 +- src/xrpld/app/tx/detail/SetAccount.h | 3 + src/xrpld/app/tx/detail/SetOracle.cpp | 11 +- src/xrpld/app/tx/detail/SetRegularKey.cpp | 14 +- src/xrpld/app/tx/detail/SetSignerList.cpp | 19 +- src/xrpld/app/tx/detail/SetSignerList.h | 3 + src/xrpld/app/tx/detail/SetTrust.cpp | 17 +- src/xrpld/app/tx/detail/SetTrust.h | 3 + src/xrpld/app/tx/detail/Transactor.cpp | 171 +++++++++++++----- src/xrpld/app/tx/detail/Transactor.h | 162 ++++++++++++++++- src/xrpld/app/tx/detail/VaultClawback.cpp | 11 +- src/xrpld/app/tx/detail/VaultCreate.cpp | 69 +++---- src/xrpld/app/tx/detail/VaultCreate.h | 6 + src/xrpld/app/tx/detail/VaultDelete.cpp | 11 +- src/xrpld/app/tx/detail/VaultDeposit.cpp | 11 +- src/xrpld/app/tx/detail/VaultSet.cpp | 22 +-- src/xrpld/app/tx/detail/VaultSet.h | 3 + src/xrpld/app/tx/detail/VaultWithdraw.cpp | 11 +- src/xrpld/app/tx/detail/XChainBridge.cpp | 89 ++------- src/xrpld/app/tx/detail/XChainBridge.h | 3 + src/xrpld/app/tx/detail/applySteps.cpp | 2 +- 91 files changed, 936 insertions(+), 844 deletions(-) diff --git a/include/xrpl/protocol/Permissions.h b/include/xrpl/protocol/Permissions.h index 2eca441124..d3f5253cd0 100644 --- a/include/xrpl/protocol/Permissions.h +++ b/include/xrpl/protocol/Permissions.h @@ -86,6 +86,9 @@ public: std::optional getGranularTxType(GranularPermissionType const& gpType) const; + std::optional> const + getTxFeature(TxType txType) const; + bool isDelegatable(std::uint32_t const& permissionValue, Rules const& rules) const; diff --git a/include/xrpl/protocol/TER.h b/include/xrpl/protocol/TER.h index 9ace6b80f8..0a3a3b999e 100644 --- a/include/xrpl/protocol/TER.h +++ b/include/xrpl/protocol/TER.h @@ -673,7 +673,8 @@ isTerRetry(TER x) noexcept inline bool isTesSuccess(TER x) noexcept { - return (x == tesSUCCESS); + // Makes use of TERSubset::operator bool() + return !(x); } inline bool diff --git a/src/libxrpl/protocol/Permissions.cpp b/src/libxrpl/protocol/Permissions.cpp index 6a4b0678e0..c9e32c5056 100644 --- a/src/libxrpl/protocol/Permissions.cpp +++ b/src/libxrpl/protocol/Permissions.cpp @@ -147,6 +147,19 @@ Permission::getGranularTxType(GranularPermissionType const& gpType) const return std::nullopt; } +std::optional> const +Permission::getTxFeature(TxType txType) const +{ + auto const txFeaturesIt = txFeatureMap_.find(txType); + XRPL_ASSERT( + txFeaturesIt != txFeatureMap_.end(), + "ripple::Permissions::getTxFeature : tx exists in txFeatureMap_"); + + if (txFeaturesIt->second == uint256{}) + return std::nullopt; + return txFeaturesIt->second; +} + bool Permission::isDelegatable( std::uint32_t const& permissionValue, @@ -166,16 +179,12 @@ Permission::isDelegatable( if (it == delegatableTx_.end()) return false; - auto const txFeaturesIt = txFeatureMap_.find(txType); - XRPL_ASSERT( - txFeaturesIt != txFeatureMap_.end(), - "ripple::Permissions::isDelegatable : tx exists in txFeatureMap_"); + auto const feature = getTxFeature(txType); // fixDelegateV1_1: Delegation is only allowed if the required amendment // for the transaction is enabled. For transactions that do not require // an amendment, delegation is always allowed. - if (txFeaturesIt->second != uint256{} && - !rules.enabled(txFeaturesIt->second)) + if (feature && !rules.enabled(*feature)) return false; } diff --git a/src/test/app/AMM_test.cpp b/src/test/app/AMM_test.cpp index cfe1ffab16..1fe37bb7c1 100644 --- a/src/test/app/AMM_test.cpp +++ b/src/test/app/AMM_test.cpp @@ -3572,7 +3572,7 @@ private: env.current()->rules(), tapNONE, env.journal); - auto pf = AMMBid::preflight(pfctx); + auto pf = Transactor::invokePreflight(pfctx); BEAST_EXPECT(pf == temDISABLED); env.app().config().features.insert(featureAMM); } @@ -3587,7 +3587,7 @@ private: env.current()->rules(), tapNONE, env.journal); - auto pf = AMMBid::preflight(pfctx); + auto pf = Transactor::invokePreflight(pfctx); BEAST_EXPECT(pf != tesSUCCESS); } @@ -3602,7 +3602,7 @@ private: env.current()->rules(), tapNONE, env.journal); - auto pf = AMMBid::preflight(pfctx); + auto pf = Transactor::invokePreflight(pfctx); BEAST_EXPECT(pf == temBAD_AMM_TOKENS); } } diff --git a/src/test/rpc/AccountSet_test.cpp b/src/test/rpc/AccountSet_test.cpp index 3615a715cd..52dc331a2b 100644 --- a/src/test/rpc/AccountSet_test.cpp +++ b/src/test/rpc/AccountSet_test.cpp @@ -19,6 +19,8 @@ #include +#include + #include #include #include @@ -578,6 +580,32 @@ public: env.close(); } + void + testBadSigningKey() + { + using namespace test::jtx; + testcase("Bad signing key"); + Env env(*this); + Account const alice("alice"); + + env.fund(XRP(10000), alice); + env.close(); + + auto jtx = env.jt(noop("alice"), ter(temBAD_SIGNATURE)); + if (!BEAST_EXPECT(jtx.stx)) + return; + auto stx = std::make_shared(*jtx.stx); + stx->at(sfSigningPubKey) = makeSlice(std::string("badkey")); + + env.app().openLedger().modify([&](OpenView& view, beast::Journal j) { + auto const result = + ripple::apply(env.app(), view, *stx, tapNONE, j); + BEAST_EXPECT(result.ter == temBAD_SIGNATURE); + BEAST_EXPECT(!result.applied); + return result.applied; + }); + } + void run() override { @@ -594,6 +622,7 @@ public: testRequireAuthWithDir(); testTransferRate(); testTicket(); + testBadSigningKey(); } }; diff --git a/src/xrpld/app/tx/detail/AMMBid.cpp b/src/xrpld/app/tx/detail/AMMBid.cpp index 806c075c4f..769668b07b 100644 --- a/src/xrpld/app/tx/detail/AMMBid.cpp +++ b/src/xrpld/app/tx/detail/AMMBid.cpp @@ -30,21 +30,15 @@ namespace ripple { +bool +AMMBid::checkExtraFeatures(PreflightContext const& ctx) +{ + return ammEnabled(ctx.rules); +} + NotTEC AMMBid::preflight(PreflightContext const& ctx) { - if (!ammEnabled(ctx.rules)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - if (ctx.tx.getFlags() & tfUniversalMask) - { - JLOG(ctx.j.debug()) << "AMM Bid: invalid flags."; - return temINVALID_FLAG; - } - if (auto const res = invalidAMMAssetPair( ctx.tx[sfAsset].get(), ctx.tx[sfAsset2].get())) { @@ -95,7 +89,7 @@ AMMBid::preflight(PreflightContext const& ctx) } } - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/AMMBid.h b/src/xrpld/app/tx/detail/AMMBid.h index 4bb3a2adfd..4a527b6a93 100644 --- a/src/xrpld/app/tx/detail/AMMBid.h +++ b/src/xrpld/app/tx/detail/AMMBid.h @@ -71,6 +71,9 @@ public: { } + static bool + checkExtraFeatures(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/AMMClawback.cpp b/src/xrpld/app/tx/detail/AMMClawback.cpp index 634b948a64..9a79c94a58 100644 --- a/src/xrpld/app/tx/detail/AMMClawback.cpp +++ b/src/xrpld/app/tx/detail/AMMClawback.cpp @@ -33,19 +33,15 @@ namespace ripple { +std::uint32_t +AMMClawback::getFlagsMask(PreflightContext const& ctx) +{ + return tfAMMClawbackMask; +} + NotTEC AMMClawback::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureAMMClawback)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; // LCOV_EXCL_LINE - - auto const flags = ctx.tx.getFlags(); - if (flags & tfAMMClawbackMask) - return temINVALID_FLAG; - AccountID const issuer = ctx.tx[sfAccount]; AccountID const holder = ctx.tx[sfHolder]; @@ -63,6 +59,8 @@ AMMClawback::preflight(PreflightContext const& ctx) if (isXRP(asset)) return temMALFORMED; + auto const flags = ctx.tx.getFlags(); + if (flags & tfClawTwoAssets && asset.account != asset2.account) { JLOG(ctx.j.trace()) @@ -88,7 +86,7 @@ AMMClawback::preflight(PreflightContext const& ctx) if (clawAmount && *clawAmount <= beast::zero) return temBAD_AMOUNT; - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/AMMClawback.h b/src/xrpld/app/tx/detail/AMMClawback.h index fdcfc53e2c..1984937971 100644 --- a/src/xrpld/app/tx/detail/AMMClawback.h +++ b/src/xrpld/app/tx/detail/AMMClawback.h @@ -33,6 +33,9 @@ public: { } + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/AMMCreate.cpp b/src/xrpld/app/tx/detail/AMMCreate.cpp index 03c972f1cd..63e20b42fb 100644 --- a/src/xrpld/app/tx/detail/AMMCreate.cpp +++ b/src/xrpld/app/tx/detail/AMMCreate.cpp @@ -31,21 +31,15 @@ namespace ripple { +bool +AMMCreate::checkExtraFeatures(PreflightContext const& ctx) +{ + return ammEnabled(ctx.rules); +} + NotTEC AMMCreate::preflight(PreflightContext const& ctx) { - if (!ammEnabled(ctx.rules)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - if (ctx.tx.getFlags() & tfUniversalMask) - { - JLOG(ctx.j.debug()) << "AMM Instance: invalid flags."; - return temINVALID_FLAG; - } - auto const amount = ctx.tx[sfAmount]; auto const amount2 = ctx.tx[sfAmount2]; @@ -74,14 +68,14 @@ AMMCreate::preflight(PreflightContext const& ctx) return temBAD_FEE; } - return preflight2(ctx); + return tesSUCCESS; } XRPAmount AMMCreate::calculateBaseFee(ReadView const& view, STTx const& tx) { // The fee required for AMMCreate is one owner reserve. - return view.fees().increment; + return calculateOwnerReserveFee(view, tx); } TER diff --git a/src/xrpld/app/tx/detail/AMMCreate.h b/src/xrpld/app/tx/detail/AMMCreate.h index 189d66a55a..98231e5554 100644 --- a/src/xrpld/app/tx/detail/AMMCreate.h +++ b/src/xrpld/app/tx/detail/AMMCreate.h @@ -63,6 +63,9 @@ public: { } + static bool + checkExtraFeatures(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/AMMDelete.cpp b/src/xrpld/app/tx/detail/AMMDelete.cpp index 004e0b2229..663a4c4b0a 100644 --- a/src/xrpld/app/tx/detail/AMMDelete.cpp +++ b/src/xrpld/app/tx/detail/AMMDelete.cpp @@ -27,22 +27,16 @@ namespace ripple { +bool +AMMDelete::checkExtraFeatures(PreflightContext const& ctx) +{ + return ammEnabled(ctx.rules); +} + NotTEC AMMDelete::preflight(PreflightContext const& ctx) { - if (!ammEnabled(ctx.rules)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - if (ctx.tx.getFlags() & tfUniversalMask) - { - JLOG(ctx.j.debug()) << "AMM Delete: invalid flags."; - return temINVALID_FLAG; - } - - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/AMMDelete.h b/src/xrpld/app/tx/detail/AMMDelete.h index 19885b1dad..36dace2e18 100644 --- a/src/xrpld/app/tx/detail/AMMDelete.h +++ b/src/xrpld/app/tx/detail/AMMDelete.h @@ -39,6 +39,9 @@ public: { } + static bool + checkExtraFeatures(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/AMMDeposit.cpp b/src/xrpld/app/tx/detail/AMMDeposit.cpp index 614d788c71..8a3e50ed63 100644 --- a/src/xrpld/app/tx/detail/AMMDeposit.cpp +++ b/src/xrpld/app/tx/detail/AMMDeposit.cpp @@ -29,21 +29,23 @@ namespace ripple { +bool +AMMDeposit::checkExtraFeatures(PreflightContext const& ctx) +{ + return ammEnabled(ctx.rules); +} + +std::uint32_t +AMMDeposit::getFlagsMask(PreflightContext const& ctx) + +{ + return tfDepositMask; +} + NotTEC AMMDeposit::preflight(PreflightContext const& ctx) { - if (!ammEnabled(ctx.rules)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - auto const flags = ctx.tx.getFlags(); - if (flags & tfDepositMask) - { - JLOG(ctx.j.debug()) << "AMM Deposit: invalid flags."; - return temINVALID_FLAG; - } auto const amount = ctx.tx[~sfAmount]; auto const amount2 = ctx.tx[~sfAmount2]; @@ -159,7 +161,7 @@ AMMDeposit::preflight(PreflightContext const& ctx) return temBAD_FEE; } - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/AMMDeposit.h b/src/xrpld/app/tx/detail/AMMDeposit.h index 0acb1dd9ab..c1a37be452 100644 --- a/src/xrpld/app/tx/detail/AMMDeposit.h +++ b/src/xrpld/app/tx/detail/AMMDeposit.h @@ -68,6 +68,12 @@ public: { } + static bool + checkExtraFeatures(PreflightContext const& ctx); + + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/AMMVote.cpp b/src/xrpld/app/tx/detail/AMMVote.cpp index 6fbff86056..0ffbb38b37 100644 --- a/src/xrpld/app/tx/detail/AMMVote.cpp +++ b/src/xrpld/app/tx/detail/AMMVote.cpp @@ -27,15 +27,15 @@ namespace ripple { +bool +AMMVote::checkExtraFeatures(PreflightContext const& ctx) +{ + return ammEnabled(ctx.rules); +} + NotTEC AMMVote::preflight(PreflightContext const& ctx) { - if (!ammEnabled(ctx.rules)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - if (auto const res = invalidAMMAssetPair( ctx.tx[sfAsset].get(), ctx.tx[sfAsset2].get())) { @@ -43,19 +43,13 @@ AMMVote::preflight(PreflightContext const& ctx) return res; } - if (ctx.tx.getFlags() & tfUniversalMask) - { - JLOG(ctx.j.debug()) << "AMM Vote: invalid flags."; - return temINVALID_FLAG; - } - if (ctx.tx[sfTradingFee] > TRADING_FEE_THRESHOLD) { JLOG(ctx.j.debug()) << "AMM Vote: invalid trading fee."; return temBAD_FEE; } - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/AMMVote.h b/src/xrpld/app/tx/detail/AMMVote.h index 2bee01aff5..dc99480111 100644 --- a/src/xrpld/app/tx/detail/AMMVote.h +++ b/src/xrpld/app/tx/detail/AMMVote.h @@ -56,6 +56,9 @@ public: { } + static bool + checkExtraFeatures(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/AMMWithdraw.cpp b/src/xrpld/app/tx/detail/AMMWithdraw.cpp index 9bc36efc81..f5af9dfb9c 100644 --- a/src/xrpld/app/tx/detail/AMMWithdraw.cpp +++ b/src/xrpld/app/tx/detail/AMMWithdraw.cpp @@ -28,21 +28,22 @@ namespace ripple { +bool +AMMWithdraw::checkExtraFeatures(PreflightContext const& ctx) +{ + return ammEnabled(ctx.rules); +} + +std::uint32_t +AMMWithdraw::getFlagsMask(PreflightContext const& ctx) +{ + return tfWithdrawMask; +} + NotTEC AMMWithdraw::preflight(PreflightContext const& ctx) { - if (!ammEnabled(ctx.rules)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - auto const flags = ctx.tx.getFlags(); - if (flags & tfWithdrawMask) - { - JLOG(ctx.j.debug()) << "AMM Withdraw: invalid flags."; - return temINVALID_FLAG; - } auto const amount = ctx.tx[~sfAmount]; auto const amount2 = ctx.tx[~sfAmount2]; @@ -150,7 +151,7 @@ AMMWithdraw::preflight(PreflightContext const& ctx) } } - return preflight2(ctx); + return tesSUCCESS; } static std::optional diff --git a/src/xrpld/app/tx/detail/AMMWithdraw.h b/src/xrpld/app/tx/detail/AMMWithdraw.h index e9a597bdb7..31a7904626 100644 --- a/src/xrpld/app/tx/detail/AMMWithdraw.h +++ b/src/xrpld/app/tx/detail/AMMWithdraw.h @@ -76,6 +76,12 @@ public: { } + static bool + checkExtraFeatures(PreflightContext const& ctx); + + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/Batch.cpp b/src/xrpld/app/tx/detail/Batch.cpp index 86d6e8a8f4..cba89348d0 100644 --- a/src/xrpld/app/tx/detail/Batch.cpp +++ b/src/xrpld/app/tx/detail/Batch.cpp @@ -164,6 +164,12 @@ Batch::calculateBaseFee(ReadView const& view, STTx const& tx) return signerFees + txnFees + batchBase; } +std::uint32_t +Batch::getFlagsMask(PreflightContext const& ctx) +{ + return tfBatchMask; +} + /** * @brief Performs preflight validation checks for a Batch transaction. * @@ -200,23 +206,9 @@ Batch::calculateBaseFee(ReadView const& view, STTx const& tx) NotTEC Batch::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureBatch)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - auto const parentBatchId = ctx.tx.getTransactionID(); - auto const outerAccount = ctx.tx.getAccountID(sfAccount); auto const flags = ctx.tx.getFlags(); - if (flags & tfBatchMask) - { - JLOG(ctx.j.debug()) << "BatchTrace[" << parentBatchId << "]:" - << "invalid flags."; - return temINVALID_FLAG; - } - if (std::popcount( flags & (tfAllOrNothing | tfOnlyOne | tfUntilFailure | tfIndependent)) != 1) @@ -242,7 +234,6 @@ Batch::preflight(PreflightContext const& ctx) } // Validation Inner Batch Txns - std::unordered_set requiredSigners; std::unordered_set uniqueHashes; std::unordered_map> accountSeqTicket; @@ -372,6 +363,23 @@ Batch::preflight(PreflightContext const& ctx) } } } + } + + return tesSUCCESS; +} + +NotTEC +Batch::preflightSigValidated(PreflightContext const& ctx) +{ + auto const parentBatchId = ctx.tx.getTransactionID(); + auto const outerAccount = ctx.tx.getAccountID(sfAccount); + auto const& rawTxns = ctx.tx.getFieldArray(sfRawTransactions); + + // Build the signers list + std::unordered_set requiredSigners; + for (STObject const& rb : rawTxns) + { + auto const innerAccount = rb.getAccountID(sfAccount); // If the inner account is the same as the outer account, do not add the // inner account to the required signers set. @@ -379,11 +387,6 @@ Batch::preflight(PreflightContext const& ctx) requiredSigners.insert(innerAccount); } - // LCOV_EXCL_START - if (auto const ret = preflight2(ctx); !isTesSuccess(ret)) - return ret; - // LCOV_EXCL_STOP - // Validation Batch Signers std::unordered_set batchSigners; if (ctx.tx.isFieldPresent(sfBatchSigners)) diff --git a/src/xrpld/app/tx/detail/Batch.h b/src/xrpld/app/tx/detail/Batch.h index 211bce0589..07863a5f33 100644 --- a/src/xrpld/app/tx/detail/Batch.h +++ b/src/xrpld/app/tx/detail/Batch.h @@ -40,9 +40,15 @@ public: static XRPAmount calculateBaseFee(ReadView const& view, STTx const& tx); + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); + static NotTEC + preflightSigValidated(PreflightContext const& ctx); + static NotTEC checkSign(PreclaimContext const& ctx); diff --git a/src/xrpld/app/tx/detail/CancelCheck.cpp b/src/xrpld/app/tx/detail/CancelCheck.cpp index 39d0d23096..f1a9b42a89 100644 --- a/src/xrpld/app/tx/detail/CancelCheck.cpp +++ b/src/xrpld/app/tx/detail/CancelCheck.cpp @@ -32,21 +32,7 @@ namespace ripple { NotTEC CancelCheck::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureChecks)) - return temDISABLED; - - NotTEC const ret{preflight1(ctx)}; - if (!isTesSuccess(ret)) - return ret; - - if (ctx.tx.getFlags() & tfUniversalMask) - { - // There are no flags (other than universal) for CreateCheck yet. - JLOG(ctx.j.warn()) << "Malformed transaction: Invalid flags set."; - return temINVALID_FLAG; - } - - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/CancelOffer.cpp b/src/xrpld/app/tx/detail/CancelOffer.cpp index e0a5c7baa7..e7ec28ce17 100644 --- a/src/xrpld/app/tx/detail/CancelOffer.cpp +++ b/src/xrpld/app/tx/detail/CancelOffer.cpp @@ -28,25 +28,13 @@ namespace ripple { NotTEC CancelOffer::preflight(PreflightContext const& ctx) { - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - auto const uTxFlags = ctx.tx.getFlags(); - - if (uTxFlags & tfUniversalMask) - { - JLOG(ctx.j.trace()) << "Malformed transaction: " - << "Invalid flags set."; - return temINVALID_FLAG; - } - if (!ctx.tx[sfOfferSequence]) { JLOG(ctx.j.trace()) << "CancelOffer::preflight: missing sequence"; return temBAD_SEQUENCE; } - return preflight2(ctx); + return tesSUCCESS; } //------------------------------------------------------------------------------ diff --git a/src/xrpld/app/tx/detail/CashCheck.cpp b/src/xrpld/app/tx/detail/CashCheck.cpp index 0f1d08689c..f8ab6189a3 100644 --- a/src/xrpld/app/tx/detail/CashCheck.cpp +++ b/src/xrpld/app/tx/detail/CashCheck.cpp @@ -35,20 +35,6 @@ namespace ripple { NotTEC CashCheck::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureChecks)) - return temDISABLED; - - NotTEC const ret{preflight1(ctx)}; - if (!isTesSuccess(ret)) - return ret; - - if (ctx.tx.getFlags() & tfUniversalMask) - { - // There are no flags (other than universal) for CashCheck yet. - JLOG(ctx.j.warn()) << "Malformed transaction: Invalid flags set."; - return temINVALID_FLAG; - } - // Exactly one of Amount or DeliverMin must be present. auto const optAmount = ctx.tx[~sfAmount]; auto const optDeliverMin = ctx.tx[~sfDeliverMin]; @@ -76,7 +62,7 @@ CashCheck::preflight(PreflightContext const& ctx) return temBAD_CURRENCY; } - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/Change.cpp b/src/xrpld/app/tx/detail/Change.cpp index de30ed5f62..d6a31024f3 100644 --- a/src/xrpld/app/tx/detail/Change.cpp +++ b/src/xrpld/app/tx/detail/Change.cpp @@ -33,11 +33,12 @@ namespace ripple { +template <> NotTEC -Change::preflight(PreflightContext const& ctx) +Transactor::invokePreflight(PreflightContext const& ctx) { - auto const ret = preflight0(ctx); - if (!isTesSuccess(ret)) + // 0 means "Allow any flags" + if (auto const ret = preflight0(ctx, 0)) return ret; auto account = ctx.tx.getAccountID(sfAccount); diff --git a/src/xrpld/app/tx/detail/Change.h b/src/xrpld/app/tx/detail/Change.h index d710827dd6..7b8fbf3421 100644 --- a/src/xrpld/app/tx/detail/Change.h +++ b/src/xrpld/app/tx/detail/Change.h @@ -33,9 +33,6 @@ public: { } - static NotTEC - preflight(PreflightContext const& ctx); - TER doApply() override; void diff --git a/src/xrpld/app/tx/detail/Clawback.cpp b/src/xrpld/app/tx/detail/Clawback.cpp index 08cf4baef0..b346e4a1c1 100644 --- a/src/xrpld/app/tx/detail/Clawback.cpp +++ b/src/xrpld/app/tx/detail/Clawback.cpp @@ -75,25 +75,22 @@ preflightHelper(PreflightContext const& ctx) return tesSUCCESS; } +std::uint32_t +Clawback::getFlagsMask(PreflightContext const& ctx) +{ + return tfClawbackMask; +} + NotTEC Clawback::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureClawback)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - if (ctx.tx.getFlags() & tfClawbackMask) - return temINVALID_FLAG; - if (auto const ret = std::visit( [&](T const&) { return preflightHelper(ctx); }, ctx.tx[sfAmount].asset().value()); !isTesSuccess(ret)) return ret; - return preflight2(ctx); + return tesSUCCESS; } template diff --git a/src/xrpld/app/tx/detail/Clawback.h b/src/xrpld/app/tx/detail/Clawback.h index d908a2e4ef..b02233c2ed 100644 --- a/src/xrpld/app/tx/detail/Clawback.h +++ b/src/xrpld/app/tx/detail/Clawback.h @@ -33,6 +33,9 @@ public: { } + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/CreateCheck.cpp b/src/xrpld/app/tx/detail/CreateCheck.cpp index 4dbfd1f81d..57f3a92255 100644 --- a/src/xrpld/app/tx/detail/CreateCheck.cpp +++ b/src/xrpld/app/tx/detail/CreateCheck.cpp @@ -31,19 +31,6 @@ namespace ripple { NotTEC CreateCheck::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureChecks)) - return temDISABLED; - - NotTEC const ret{preflight1(ctx)}; - if (!isTesSuccess(ret)) - return ret; - - if (ctx.tx.getFlags() & tfUniversalMask) - { - // There are no flags (other than universal) for CreateCheck yet. - JLOG(ctx.j.warn()) << "Malformed transaction: Invalid flags set."; - return temINVALID_FLAG; - } if (ctx.tx[sfAccount] == ctx.tx[sfDestination]) { // They wrote a check to themselves. @@ -76,7 +63,7 @@ CreateCheck::preflight(PreflightContext const& ctx) } } - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/CreateOffer.cpp b/src/xrpld/app/tx/detail/CreateOffer.cpp index 6185e52183..86750eb51d 100644 --- a/src/xrpld/app/tx/detail/CreateOffer.cpp +++ b/src/xrpld/app/tx/detail/CreateOffer.cpp @@ -43,30 +43,36 @@ CreateOffer::makeTxConsequences(PreflightContext const& ctx) return TxConsequences{ctx.tx, calculateMaxXRPSpend(ctx.tx)}; } -NotTEC -CreateOffer::preflight(PreflightContext const& ctx) +bool +CreateOffer::checkExtraFeatures(PreflightContext const& ctx) { if (ctx.tx.isFieldPresent(sfDomainID) && !ctx.rules.enabled(featurePermissionedDEX)) - return temDISABLED; + return false; - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; + return true; +} +std::uint32_t +CreateOffer::getFlagsMask(PreflightContext const& ctx) +{ + // The tfOfferCreateMask is built assuming that PermissionedDEX is + // enabled + if (ctx.rules.enabled(featurePermissionedDEX)) + return tfOfferCreateMask; + // If PermissionedDEX is not enabled, add tfHybrid to the mask, + // indicating it is not allowed. + return tfOfferCreateMask | tfHybrid; +} + +NotTEC +CreateOffer::preflight(PreflightContext const& ctx) +{ auto& tx = ctx.tx; auto& j = ctx.j; std::uint32_t const uTxFlags = tx.getFlags(); - if (uTxFlags & tfOfferCreateMask) - { - JLOG(j.debug()) << "Malformed transaction: Invalid flags set."; - return temINVALID_FLAG; - } - - if (!ctx.rules.enabled(featurePermissionedDEX) && tx.isFlag(tfHybrid)) - return temINVALID_FLAG; - if (tx.isFlag(tfHybrid) && !tx.isFieldPresent(sfDomainID)) return temINVALID_FLAG; @@ -136,7 +142,7 @@ CreateOffer::preflight(PreflightContext const& ctx) return temBAD_ISSUER; } - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/CreateOffer.h b/src/xrpld/app/tx/detail/CreateOffer.h index 6e3d6145b1..c38e244b34 100644 --- a/src/xrpld/app/tx/detail/CreateOffer.h +++ b/src/xrpld/app/tx/detail/CreateOffer.h @@ -43,6 +43,12 @@ public: static TxConsequences makeTxConsequences(PreflightContext const& ctx); + static bool + checkExtraFeatures(PreflightContext const& ctx); + + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + /** Enforce constraints beyond those of the Transactor base class. */ static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/CreateTicket.cpp b/src/xrpld/app/tx/detail/CreateTicket.cpp index 594335f489..d48da2d780 100644 --- a/src/xrpld/app/tx/detail/CreateTicket.cpp +++ b/src/xrpld/app/tx/detail/CreateTicket.cpp @@ -36,20 +36,11 @@ CreateTicket::makeTxConsequences(PreflightContext const& ctx) NotTEC CreateTicket::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureTicketBatch)) - return temDISABLED; - - if (ctx.tx.getFlags() & tfUniversalMask) - return temINVALID_FLAG; - if (std::uint32_t const count = ctx.tx[sfTicketCount]; count < minValidCount || count > maxValidCount) return temINVALID_COUNT; - if (NotTEC const ret{preflight1(ctx)}; !isTesSuccess(ret)) - return ret; - - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/Credentials.cpp b/src/xrpld/app/tx/detail/Credentials.cpp index b30ae200b7..4b77163c5d 100644 --- a/src/xrpld/app/tx/detail/Credentials.cpp +++ b/src/xrpld/app/tx/detail/Credentials.cpp @@ -48,28 +48,19 @@ using namespace credentials; // ------- CREATE -------------------------- +std::uint32_t +CredentialCreate::getFlagsMask(PreflightContext const& ctx) +{ + // 0 means "Allow any flags" + return ctx.rules.enabled(fixInvalidTxFlags) ? tfUniversalMask : 0; +} + NotTEC CredentialCreate::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureCredentials)) - { - JLOG(ctx.j.trace()) << "featureCredentials is disabled."; - return temDISABLED; - } - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - auto const& tx = ctx.tx; auto& j = ctx.j; - if (ctx.rules.enabled(fixInvalidTxFlags) && - (tx.getFlags() & tfUniversalMask)) - { - JLOG(ctx.j.debug()) << "CredentialCreate: invalid flags."; - return temINVALID_FLAG; - } - if (!tx[sfSubject]) { JLOG(j.trace()) << "Malformed transaction: Invalid Subject"; @@ -91,7 +82,7 @@ CredentialCreate::preflight(PreflightContext const& ctx) return temMALFORMED; } - return preflight2(ctx); + return tesSUCCESS; } TER @@ -202,25 +193,17 @@ CredentialCreate::doApply() } // ------- DELETE -------------------------- + +std::uint32_t +CredentialDelete::getFlagsMask(PreflightContext const& ctx) +{ + // 0 means "Allow any flags" + return ctx.rules.enabled(fixInvalidTxFlags) ? tfUniversalMask : 0; +} + NotTEC CredentialDelete::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureCredentials)) - { - JLOG(ctx.j.trace()) << "featureCredentials is disabled."; - return temDISABLED; - } - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - if (ctx.rules.enabled(fixInvalidTxFlags) && - (ctx.tx.getFlags() & tfUniversalMask)) - { - JLOG(ctx.j.debug()) << "CredentialDelete: invalid flags."; - return temINVALID_FLAG; - } - auto const subject = ctx.tx[~sfSubject]; auto const issuer = ctx.tx[~sfIssuer]; @@ -248,7 +231,7 @@ CredentialDelete::preflight(PreflightContext const& ctx) return temMALFORMED; } - return preflight2(ctx); + return tesSUCCESS; } TER @@ -289,25 +272,16 @@ CredentialDelete::doApply() // ------- APPLY -------------------------- +std::uint32_t +CredentialAccept::getFlagsMask(PreflightContext const& ctx) +{ + // 0 means "Allow any flags" + return ctx.rules.enabled(fixInvalidTxFlags) ? tfUniversalMask : 0; +} + NotTEC CredentialAccept::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureCredentials)) - { - JLOG(ctx.j.trace()) << "featureCredentials is disabled."; - return temDISABLED; - } - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - if (ctx.rules.enabled(fixInvalidTxFlags) && - (ctx.tx.getFlags() & tfUniversalMask)) - { - JLOG(ctx.j.debug()) << "CredentialAccept: invalid flags."; - return temINVALID_FLAG; - } - if (!ctx.tx[sfIssuer]) { JLOG(ctx.j.trace()) << "Malformed transaction: Issuer field zeroed."; @@ -322,7 +296,7 @@ CredentialAccept::preflight(PreflightContext const& ctx) return temMALFORMED; } - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/Credentials.h b/src/xrpld/app/tx/detail/Credentials.h index 5b4acb3998..a5885a2226 100644 --- a/src/xrpld/app/tx/detail/Credentials.h +++ b/src/xrpld/app/tx/detail/Credentials.h @@ -33,6 +33,9 @@ public: { } + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); @@ -54,6 +57,9 @@ public: { } + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); @@ -75,6 +81,9 @@ public: { } + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/DID.cpp b/src/xrpld/app/tx/detail/DID.cpp index 8c4a220844..b38b207d36 100644 --- a/src/xrpld/app/tx/detail/DID.cpp +++ b/src/xrpld/app/tx/detail/DID.cpp @@ -45,15 +45,6 @@ namespace ripple { NotTEC DIDSet::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureDID)) - return temDISABLED; - - if (ctx.tx.getFlags() & tfUniversalMask) - return temINVALID_FLAG; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - if (!ctx.tx.isFieldPresent(sfURI) && !ctx.tx.isFieldPresent(sfDIDDocument) && !ctx.tx.isFieldPresent(sfData)) return temEMPTY_DID; @@ -74,7 +65,7 @@ DIDSet::preflight(PreflightContext const& ctx) isTooLong(sfData, maxDIDAttestationLength)) return temMALFORMED; - return preflight2(ctx); + return tesSUCCESS; } TER @@ -174,16 +165,7 @@ DIDSet::doApply() NotTEC DIDDelete::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureDID)) - return temDISABLED; - - if (ctx.tx.getFlags() & tfUniversalMask) - return temINVALID_FLAG; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/DelegateSet.cpp b/src/xrpld/app/tx/detail/DelegateSet.cpp index 53052fd75b..e769f75d8a 100644 --- a/src/xrpld/app/tx/detail/DelegateSet.cpp +++ b/src/xrpld/app/tx/detail/DelegateSet.cpp @@ -30,12 +30,6 @@ namespace ripple { NotTEC DelegateSet::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featurePermissionDelegation)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - auto const& permissions = ctx.tx.getFieldArray(sfPermissions); if (permissions.size() > permissionMaxSize) return temARRAY_TOO_LARGE; @@ -57,7 +51,7 @@ DelegateSet::preflight(PreflightContext const& ctx) return temMALFORMED; } - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/DeleteAccount.cpp b/src/xrpld/app/tx/detail/DeleteAccount.cpp index 02f84adcc3..565d938c83 100644 --- a/src/xrpld/app/tx/detail/DeleteAccount.cpp +++ b/src/xrpld/app/tx/detail/DeleteAccount.cpp @@ -38,22 +38,22 @@ namespace ripple { -NotTEC -DeleteAccount::preflight(PreflightContext const& ctx) +bool +DeleteAccount::checkExtraFeatures(PreflightContext const& ctx) { if (!ctx.rules.enabled(featureDeletableAccounts)) - return temDISABLED; + return false; if (ctx.tx.isFieldPresent(sfCredentialIDs) && !ctx.rules.enabled(featureCredentials)) - return temDISABLED; + return false; - if (ctx.tx.getFlags() & tfUniversalMask) - return temINVALID_FLAG; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; + return true; +} +NotTEC +DeleteAccount::preflight(PreflightContext const& ctx) +{ if (ctx.tx[sfAccount] == ctx.tx[sfDestination]) // An account cannot be deleted and give itself the resulting XRP. return temDST_IS_SRC; @@ -62,14 +62,14 @@ DeleteAccount::preflight(PreflightContext const& ctx) !isTesSuccess(err)) return err; - return preflight2(ctx); + return tesSUCCESS; } XRPAmount DeleteAccount::calculateBaseFee(ReadView const& view, STTx const& tx) { // The fee required for AccountDelete is one owner reserve. - return view.fees().increment; + return calculateOwnerReserveFee(view, tx); } namespace { diff --git a/src/xrpld/app/tx/detail/DeleteAccount.h b/src/xrpld/app/tx/detail/DeleteAccount.h index c9d3305562..ee9db97d50 100644 --- a/src/xrpld/app/tx/detail/DeleteAccount.h +++ b/src/xrpld/app/tx/detail/DeleteAccount.h @@ -33,6 +33,9 @@ public: { } + static bool + checkExtraFeatures(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/DeleteOracle.cpp b/src/xrpld/app/tx/detail/DeleteOracle.cpp index ac195d100c..7dba477aaa 100644 --- a/src/xrpld/app/tx/detail/DeleteOracle.cpp +++ b/src/xrpld/app/tx/detail/DeleteOracle.cpp @@ -29,19 +29,7 @@ namespace ripple { NotTEC DeleteOracle::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featurePriceOracle)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - if (ctx.tx.getFlags() & tfUniversalMask) - { - JLOG(ctx.j.debug()) << "Oracle Delete: invalid flags."; - return temINVALID_FLAG; - } - - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/DepositPreauth.cpp b/src/xrpld/app/tx/detail/DepositPreauth.cpp index 0e8c5c05d2..236b59a173 100644 --- a/src/xrpld/app/tx/detail/DepositPreauth.cpp +++ b/src/xrpld/app/tx/detail/DepositPreauth.cpp @@ -30,32 +30,29 @@ namespace ripple { +bool +DepositPreauth::checkExtraFeatures(PreflightContext const& ctx) +{ + bool const authArrPresent = ctx.tx.isFieldPresent(sfAuthorizeCredentials); + bool const unauthArrPresent = + ctx.tx.isFieldPresent(sfUnauthorizeCredentials); + bool const authCredPresent = authArrPresent || unauthArrPresent; + + if (authCredPresent && !ctx.rules.enabled(featureCredentials)) + return false; + + return true; +} + NotTEC DepositPreauth::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureDepositPreauth)) - return temDISABLED; - bool const authArrPresent = ctx.tx.isFieldPresent(sfAuthorizeCredentials); bool const unauthArrPresent = ctx.tx.isFieldPresent(sfUnauthorizeCredentials); int const authCredPresent = static_cast(authArrPresent) + static_cast(unauthArrPresent); - if (authCredPresent && !ctx.rules.enabled(featureCredentials)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - auto& tx = ctx.tx; - - if (tx.getFlags() & tfUniversalMask) - { - JLOG(ctx.j.trace()) << "Malformed transaction: Invalid flags set."; - return temINVALID_FLAG; - } - auto const optAuth = ctx.tx[~sfAuthorize]; auto const optUnauth = ctx.tx[~sfUnauthorize]; int const authPresent = static_cast(optAuth.has_value()) + @@ -102,7 +99,7 @@ DepositPreauth::preflight(PreflightContext const& ctx) return err; } - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/DepositPreauth.h b/src/xrpld/app/tx/detail/DepositPreauth.h index 76a7c08073..ead17742cd 100644 --- a/src/xrpld/app/tx/detail/DepositPreauth.h +++ b/src/xrpld/app/tx/detail/DepositPreauth.h @@ -33,6 +33,9 @@ public: { } + static bool + checkExtraFeatures(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/Escrow.cpp b/src/xrpld/app/tx/detail/Escrow.cpp index f1d1db79a0..969fd4dd4c 100644 --- a/src/xrpld/app/tx/detail/Escrow.cpp +++ b/src/xrpld/app/tx/detail/Escrow.cpp @@ -118,15 +118,16 @@ escrowCreatePreflightHelper(PreflightContext const& ctx) return tesSUCCESS; } +std::uint32_t +EscrowCreate::getFlagsMask(PreflightContext const& ctx) +{ + // 0 means "Allow any flags" + return ctx.rules.enabled(fix1543) ? tfUniversalMask : 0; +} + NotTEC EscrowCreate::preflight(PreflightContext const& ctx) { - if (ctx.rules.enabled(fix1543) && ctx.tx.getFlags() & tfUniversalMask) - return temINVALID_FLAG; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - STAmount const amount{ctx.tx[sfAmount]}; if (!isXRP(amount)) { @@ -189,7 +190,7 @@ EscrowCreate::preflight(PreflightContext const& ctx) return temDISABLED; } - return preflight2(ctx); + return tesSUCCESS; } template @@ -629,19 +630,23 @@ checkCondition(Slice f, Slice c) return validate(*fulfillment, *condition); } +bool +EscrowFinish::checkExtraFeatures(PreflightContext const& ctx) +{ + return !ctx.tx.isFieldPresent(sfCredentialIDs) || + ctx.rules.enabled(featureCredentials); +} + +std::uint32_t +EscrowFinish::getFlagsMask(PreflightContext const& ctx) +{ + // 0 means "Allow any flags" + return ctx.rules.enabled(fix1543) ? tfUniversalMask : 0; +} + NotTEC EscrowFinish::preflight(PreflightContext const& ctx) { - if (ctx.rules.enabled(fix1543) && ctx.tx.getFlags() & tfUniversalMask) - return temINVALID_FLAG; - - if (ctx.tx.isFieldPresent(sfCredentialIDs) && - !ctx.rules.enabled(featureCredentials)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - auto const cb = ctx.tx[~sfCondition]; auto const fb = ctx.tx[~sfFulfillment]; @@ -650,13 +655,14 @@ EscrowFinish::preflight(PreflightContext const& ctx) if (static_cast(cb) != static_cast(fb)) return temMALFORMED; - // Verify the transaction signature. If it doesn't work - // then don't do any more work. - { - auto const ret = preflight2(ctx); - if (!isTesSuccess(ret)) - return ret; - } + return tesSUCCESS; +} + +NotTEC +EscrowFinish::preflightSigValidated(PreflightContext const& ctx) +{ + auto const cb = ctx.tx[~sfCondition]; + auto const fb = ctx.tx[~sfFulfillment]; if (cb && fb) { @@ -1207,16 +1213,17 @@ EscrowFinish::doApply() //------------------------------------------------------------------------------ +std::uint32_t +EscrowCancel::getFlagsMask(PreflightContext const& ctx) +{ + // 0 means "Allow any flags" + return ctx.rules.enabled(fix1543) ? tfUniversalMask : 0; +} + NotTEC EscrowCancel::preflight(PreflightContext const& ctx) { - if (ctx.rules.enabled(fix1543) && ctx.tx.getFlags() & tfUniversalMask) - return temINVALID_FLAG; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - return preflight2(ctx); + return tesSUCCESS; } template diff --git a/src/xrpld/app/tx/detail/Escrow.h b/src/xrpld/app/tx/detail/Escrow.h index 2225c94f16..8956be2939 100644 --- a/src/xrpld/app/tx/detail/Escrow.h +++ b/src/xrpld/app/tx/detail/Escrow.h @@ -36,6 +36,9 @@ public: static TxConsequences makeTxConsequences(PreflightContext const& ctx); + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); @@ -57,9 +60,18 @@ public: { } + static bool + checkExtraFeatures(PreflightContext const& ctx); + + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); + static NotTEC + preflightSigValidated(PreflightContext const& ctx); + static XRPAmount calculateBaseFee(ReadView const& view, STTx const& tx); @@ -81,6 +93,9 @@ public: { } + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/LedgerStateFix.cpp b/src/xrpld/app/tx/detail/LedgerStateFix.cpp index b861f1d0ef..6059e15313 100644 --- a/src/xrpld/app/tx/detail/LedgerStateFix.cpp +++ b/src/xrpld/app/tx/detail/LedgerStateFix.cpp @@ -30,15 +30,6 @@ namespace ripple { NotTEC LedgerStateFix::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(fixNFTokenPageLinks)) - return temDISABLED; - - if (ctx.tx.getFlags() & tfUniversalMask) - return temINVALID_FLAG; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - switch (ctx.tx[sfLedgerFixType]) { case FixType::nfTokenPageLink: @@ -50,7 +41,7 @@ LedgerStateFix::preflight(PreflightContext const& ctx) return tefINVALID_LEDGER_FIX_TYPE; } - return preflight2(ctx); + return tesSUCCESS; } XRPAmount @@ -58,7 +49,7 @@ LedgerStateFix::calculateBaseFee(ReadView const& view, STTx const& tx) { // The fee required for LedgerStateFix is one owner reserve, just like // the fee for AccountDelete. - return view.fees().increment; + return calculateOwnerReserveFee(view, tx); } TER diff --git a/src/xrpld/app/tx/detail/MPTokenAuthorize.cpp b/src/xrpld/app/tx/detail/MPTokenAuthorize.cpp index 1c6d153ec5..edeb12e5c0 100644 --- a/src/xrpld/app/tx/detail/MPTokenAuthorize.cpp +++ b/src/xrpld/app/tx/detail/MPTokenAuthorize.cpp @@ -26,22 +26,19 @@ namespace ripple { +std::uint32_t +MPTokenAuthorize::getFlagsMask(PreflightContext const& ctx) +{ + return tfMPTokenAuthorizeMask; +} + NotTEC MPTokenAuthorize::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureMPTokensV1)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - if (ctx.tx.getFlags() & tfMPTokenAuthorizeMask) - return temINVALID_FLAG; - if (ctx.tx[sfAccount] == ctx.tx[~sfHolder]) return temMALFORMED; - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/MPTokenAuthorize.h b/src/xrpld/app/tx/detail/MPTokenAuthorize.h index 85e8edcf9f..43a962e24e 100644 --- a/src/xrpld/app/tx/detail/MPTokenAuthorize.h +++ b/src/xrpld/app/tx/detail/MPTokenAuthorize.h @@ -42,6 +42,9 @@ public: { } + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.cpp b/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.cpp index 478ef17bb0..eec4187573 100644 --- a/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.cpp +++ b/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.cpp @@ -25,31 +25,37 @@ namespace ripple { -NotTEC -MPTokenIssuanceCreate::preflight(PreflightContext const& ctx) +bool +MPTokenIssuanceCreate::checkExtraFeatures(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureMPTokensV1)) - return temDISABLED; - if (ctx.tx.isFieldPresent(sfDomainID) && !(ctx.rules.enabled(featurePermissionedDomains) && ctx.rules.enabled(featureSingleAssetVault))) - return temDISABLED; + return false; if (ctx.tx.isFieldPresent(sfMutableFlags) && !ctx.rules.enabled(featureDynamicMPT)) - return temDISABLED; + return false; - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; + return true; +} +std::uint32_t +MPTokenIssuanceCreate::getFlagsMask(PreflightContext const& ctx) +{ + // This mask is only compared against sfFlags + return tfMPTokenIssuanceCreateMask; +} + +NotTEC +MPTokenIssuanceCreate::preflight(PreflightContext const& ctx) +{ + // If the mutable flags field is included, at least one flag must be + // specified. if (auto const mutableFlags = ctx.tx[~sfMutableFlags]; mutableFlags && (!*mutableFlags || *mutableFlags & tmfMPTokenIssuanceCreateMutableMask)) return temINVALID_FLAG; - if (ctx.tx.getFlags() & tfMPTokenIssuanceCreateMask) - return temINVALID_FLAG; - if (auto const fee = ctx.tx[~sfTransferFee]) { if (fee > maxTransferFee) @@ -87,7 +93,7 @@ MPTokenIssuanceCreate::preflight(PreflightContext const& ctx) if (maxAmt > maxMPTokenAmount) return temMALFORMED; } - return preflight2(ctx); + return tesSUCCESS; } Expected diff --git a/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.h b/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.h index 0527b9602f..842ed88641 100644 --- a/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.h +++ b/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.h @@ -50,6 +50,12 @@ public: { } + static bool + checkExtraFeatures(PreflightContext const& ctx); + + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/MPTokenIssuanceDestroy.cpp b/src/xrpld/app/tx/detail/MPTokenIssuanceDestroy.cpp index 2c330ba8f7..4c502f1106 100644 --- a/src/xrpld/app/tx/detail/MPTokenIssuanceDestroy.cpp +++ b/src/xrpld/app/tx/detail/MPTokenIssuanceDestroy.cpp @@ -25,20 +25,16 @@ namespace ripple { +std::uint32_t +MPTokenIssuanceDestroy::getFlagsMask(PreflightContext const& ctx) +{ + return tfMPTokenIssuanceDestroyMask; +} + NotTEC MPTokenIssuanceDestroy::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureMPTokensV1)) - return temDISABLED; - - // check flags - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - if (ctx.tx.getFlags() & tfMPTokenIssuanceDestroyMask) - return temINVALID_FLAG; - - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/MPTokenIssuanceDestroy.h b/src/xrpld/app/tx/detail/MPTokenIssuanceDestroy.h index 69abb99feb..2cebdb7352 100644 --- a/src/xrpld/app/tx/detail/MPTokenIssuanceDestroy.h +++ b/src/xrpld/app/tx/detail/MPTokenIssuanceDestroy.h @@ -33,6 +33,9 @@ public: { } + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp b/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp index 37c563460a..c406a8ec5f 100644 --- a/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp +++ b/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp @@ -26,6 +26,20 @@ namespace ripple { +bool +MPTokenIssuanceSet::checkExtraFeatures(PreflightContext const& ctx) +{ + return !ctx.tx.isFieldPresent(sfDomainID) || + (ctx.rules.enabled(featurePermissionedDomains) && + ctx.rules.enabled(featureSingleAssetVault)); +} + +std::uint32_t +MPTokenIssuanceSet::getFlagsMask(PreflightContext const& ctx) +{ + return tfMPTokenIssuanceSetMask; +} + // Maps set/clear mutable flags in an MPTokenIssuanceSet transaction to the // corresponding ledger mutable flags that control whether the change is // allowed. @@ -49,14 +63,6 @@ static constexpr std::array mptMutabilityFlags = { NotTEC MPTokenIssuanceSet::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureMPTokensV1)) - return temDISABLED; - - if (ctx.tx.isFieldPresent(sfDomainID) && - !(ctx.rules.enabled(featurePermissionedDomains) && - ctx.rules.enabled(featureSingleAssetVault))) - return temDISABLED; - auto const mutableFlags = ctx.tx[~sfMutableFlags]; auto const metadata = ctx.tx[~sfMPTokenMetadata]; auto const transferFee = ctx.tx[~sfTransferFee]; @@ -68,16 +74,10 @@ MPTokenIssuanceSet::preflight(PreflightContext const& ctx) if (ctx.tx.isFieldPresent(sfDomainID) && ctx.tx.isFieldPresent(sfHolder)) return temMALFORMED; - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - auto const txFlags = ctx.tx.getFlags(); - // check flags - if (txFlags & tfMPTokenIssuanceSetMask) - return temINVALID_FLAG; // fails if both flags are set - else if ((txFlags & tfMPTLock) && (txFlags & tfMPTUnlock)) + if ((txFlags & tfMPTLock) && (txFlags & tfMPTUnlock)) return temINVALID_FLAG; auto const accountID = ctx.tx[sfAccount]; @@ -133,7 +133,7 @@ MPTokenIssuanceSet::preflight(PreflightContext const& ctx) } } - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/MPTokenIssuanceSet.h b/src/xrpld/app/tx/detail/MPTokenIssuanceSet.h index 5b3db0e75b..f63812097e 100644 --- a/src/xrpld/app/tx/detail/MPTokenIssuanceSet.h +++ b/src/xrpld/app/tx/detail/MPTokenIssuanceSet.h @@ -33,6 +33,12 @@ public: { } + static bool + checkExtraFeatures(PreflightContext const& ctx); + + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/NFTokenAcceptOffer.cpp b/src/xrpld/app/tx/detail/NFTokenAcceptOffer.cpp index 0cf6a86a37..3b4a27ffd7 100644 --- a/src/xrpld/app/tx/detail/NFTokenAcceptOffer.cpp +++ b/src/xrpld/app/tx/detail/NFTokenAcceptOffer.cpp @@ -27,18 +27,15 @@ namespace ripple { +std::uint32_t +NFTokenAcceptOffer::getFlagsMask(PreflightContext const& ctx) +{ + return tfNFTokenAcceptOfferMask; +} + NotTEC NFTokenAcceptOffer::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureNonFungibleTokensV1)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - if (ctx.tx.getFlags() & tfNFTokenAcceptOfferMask) - return temINVALID_FLAG; - auto const bo = ctx.tx[~sfNFTokenBuyOffer]; auto const so = ctx.tx[~sfNFTokenSellOffer]; @@ -57,7 +54,7 @@ NFTokenAcceptOffer::preflight(PreflightContext const& ctx) return temMALFORMED; } - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/NFTokenAcceptOffer.h b/src/xrpld/app/tx/detail/NFTokenAcceptOffer.h index dff3febbb2..995581d1ff 100644 --- a/src/xrpld/app/tx/detail/NFTokenAcceptOffer.h +++ b/src/xrpld/app/tx/detail/NFTokenAcceptOffer.h @@ -51,6 +51,9 @@ public: { } + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/NFTokenBurn.cpp b/src/xrpld/app/tx/detail/NFTokenBurn.cpp index 947a663f92..cb1b564402 100644 --- a/src/xrpld/app/tx/detail/NFTokenBurn.cpp +++ b/src/xrpld/app/tx/detail/NFTokenBurn.cpp @@ -29,16 +29,7 @@ namespace ripple { NotTEC NFTokenBurn::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureNonFungibleTokensV1)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - if (ctx.tx.getFlags() & tfUniversalMask) - return temINVALID_FLAG; - - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/NFTokenCancelOffer.cpp b/src/xrpld/app/tx/detail/NFTokenCancelOffer.cpp index 3d0bf04a1b..86e804b1a5 100644 --- a/src/xrpld/app/tx/detail/NFTokenCancelOffer.cpp +++ b/src/xrpld/app/tx/detail/NFTokenCancelOffer.cpp @@ -28,18 +28,15 @@ namespace ripple { +std::uint32_t +NFTokenCancelOffer::getFlagsMask(PreflightContext const& ctx) +{ + return tfNFTokenCancelOfferMask; +} + NotTEC NFTokenCancelOffer::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureNonFungibleTokensV1)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - if (ctx.tx.getFlags() & tfNFTokenCancelOfferMask) - return temINVALID_FLAG; - if (auto const& ids = ctx.tx[sfNFTokenOffers]; ids.empty() || (ids.size() > maxTokenOfferCancelCount)) return temMALFORMED; @@ -51,7 +48,7 @@ NFTokenCancelOffer::preflight(PreflightContext const& ctx) if (std::adjacent_find(ids.begin(), ids.end()) != ids.end()) return temMALFORMED; - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/NFTokenCancelOffer.h b/src/xrpld/app/tx/detail/NFTokenCancelOffer.h index d460675711..b35be0e757 100644 --- a/src/xrpld/app/tx/detail/NFTokenCancelOffer.h +++ b/src/xrpld/app/tx/detail/NFTokenCancelOffer.h @@ -33,6 +33,9 @@ public: { } + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/NFTokenCreateOffer.cpp b/src/xrpld/app/tx/detail/NFTokenCreateOffer.cpp index f9cc8c1fc8..2a02fed797 100644 --- a/src/xrpld/app/tx/detail/NFTokenCreateOffer.cpp +++ b/src/xrpld/app/tx/detail/NFTokenCreateOffer.cpp @@ -26,20 +26,17 @@ namespace ripple { +std::uint32_t +NFTokenCreateOffer::getFlagsMask(PreflightContext const& ctx) +{ + return tfNFTokenCreateOfferMask; +} + NotTEC NFTokenCreateOffer::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureNonFungibleTokensV1)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - auto const txFlags = ctx.tx.getFlags(); - if (txFlags & tfNFTokenCreateOfferMask) - return temINVALID_FLAG; - auto const nftFlags = nft::getFlags(ctx.tx[sfNFTokenID]); // Use implementation shared with NFTokenMint @@ -55,7 +52,7 @@ NFTokenCreateOffer::preflight(PreflightContext const& ctx) !isTesSuccess(notTec)) return notTec; - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/NFTokenCreateOffer.h b/src/xrpld/app/tx/detail/NFTokenCreateOffer.h index 075a5a712f..0a1c631298 100644 --- a/src/xrpld/app/tx/detail/NFTokenCreateOffer.h +++ b/src/xrpld/app/tx/detail/NFTokenCreateOffer.h @@ -33,6 +33,9 @@ public: { } + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/NFTokenMint.cpp b/src/xrpld/app/tx/detail/NFTokenMint.cpp index 4c07a6e499..8149d3b59d 100644 --- a/src/xrpld/app/tx/detail/NFTokenMint.cpp +++ b/src/xrpld/app/tx/detail/NFTokenMint.cpp @@ -38,22 +38,23 @@ extractNFTokenFlagsFromTxFlags(std::uint32_t txFlags) return static_cast(txFlags & 0x0000FFFF); } -NotTEC -NFTokenMint::preflight(PreflightContext const& ctx) +static bool +hasOfferFields(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureNonFungibleTokensV1)) - return temDISABLED; - - bool const hasOfferFields = ctx.tx.isFieldPresent(sfAmount) || + return ctx.tx.isFieldPresent(sfAmount) || ctx.tx.isFieldPresent(sfDestination) || ctx.tx.isFieldPresent(sfExpiration); +} - if (!ctx.rules.enabled(featureNFTokenMintOffer) && hasOfferFields) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; +bool +NFTokenMint::checkExtraFeatures(PreflightContext const& ctx) +{ + return ctx.rules.enabled(featureNFTokenMintOffer) || !hasOfferFields(ctx); +} +std::uint32_t +NFTokenMint::getFlagsMask(PreflightContext const& ctx) +{ // Prior to fixRemoveNFTokenAutoTrustLine, transfer of an NFToken between // accounts allowed a TrustLine to be added to the issuer of that token // without explicit permission from that issuer. This was enabled by @@ -67,7 +68,7 @@ NFTokenMint::preflight(PreflightContext const& ctx) // The fixRemoveNFTokenAutoTrustLine amendment disables minting with the // tfTrustLine flag as a way to prevent the attack. But until the // amendment passes we still need to keep the old behavior available. - std::uint32_t const NFTokenMintMask = + std::uint32_t const nfTokenMintMask = ctx.rules.enabled(fixRemoveNFTokenAutoTrustLine) // if featureDynamicNFT enabled then new flag allowing mutable URI // available @@ -76,9 +77,12 @@ NFTokenMint::preflight(PreflightContext const& ctx) : ctx.rules.enabled(featureDynamicNFT) ? tfNFTokenMintOldMaskWithMutable : tfNFTokenMintOldMask; - if (ctx.tx.getFlags() & NFTokenMintMask) - return temINVALID_FLAG; + return nfTokenMintMask; +} +NotTEC +NFTokenMint::preflight(PreflightContext const& ctx) +{ if (auto const f = ctx.tx[~sfTransferFee]) { if (f > maxTransferFee) @@ -100,7 +104,7 @@ NFTokenMint::preflight(PreflightContext const& ctx) return temMALFORMED; } - if (hasOfferFields) + if (hasOfferFields(ctx)) { // The Amount field must be present if either the Destination or // Expiration fields are present. @@ -123,7 +127,7 @@ NFTokenMint::preflight(PreflightContext const& ctx) } } - return preflight2(ctx); + return tesSUCCESS; } uint256 diff --git a/src/xrpld/app/tx/detail/NFTokenMint.h b/src/xrpld/app/tx/detail/NFTokenMint.h index f606120c54..1606514559 100644 --- a/src/xrpld/app/tx/detail/NFTokenMint.h +++ b/src/xrpld/app/tx/detail/NFTokenMint.h @@ -36,6 +36,12 @@ public: { } + static bool + checkExtraFeatures(PreflightContext const& ctx); + + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/NFTokenModify.cpp b/src/xrpld/app/tx/detail/NFTokenModify.cpp index a3803c423b..6ae095411b 100644 --- a/src/xrpld/app/tx/detail/NFTokenModify.cpp +++ b/src/xrpld/app/tx/detail/NFTokenModify.cpp @@ -25,19 +25,15 @@ namespace ripple { +bool +NFTokenModify::checkExtraFeatures(PreflightContext const& ctx) +{ + return ctx.rules.enabled(featureNonFungibleTokensV1_1); +} + NotTEC NFTokenModify::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureNonFungibleTokensV1_1) || - !ctx.rules.enabled(featureDynamicNFT)) - return temDISABLED; - - if (NotTEC const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - if (ctx.tx.getFlags() & tfUniversalMask) - return temINVALID_FLAG; - if (auto owner = ctx.tx[~sfOwner]; owner == ctx.tx[sfAccount]) return temMALFORMED; @@ -47,7 +43,7 @@ NFTokenModify::preflight(PreflightContext const& ctx) return temMALFORMED; } - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/NFTokenModify.h b/src/xrpld/app/tx/detail/NFTokenModify.h index 0d1e72ade1..04784381fb 100644 --- a/src/xrpld/app/tx/detail/NFTokenModify.h +++ b/src/xrpld/app/tx/detail/NFTokenModify.h @@ -33,6 +33,9 @@ public: { } + static bool + checkExtraFeatures(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/PayChan.cpp b/src/xrpld/app/tx/detail/PayChan.cpp index bdfe0d5c95..32c0abeb93 100644 --- a/src/xrpld/app/tx/detail/PayChan.cpp +++ b/src/xrpld/app/tx/detail/PayChan.cpp @@ -171,15 +171,16 @@ PayChanCreate::makeTxConsequences(PreflightContext const& ctx) return TxConsequences{ctx.tx, ctx.tx[sfAmount].xrp()}; } +std::uint32_t +PayChanCreate::getFlagsMask(PreflightContext const& ctx) +{ + // 0 means "Allow any flags" + return ctx.rules.enabled(fix1543) ? tfUniversalMask : 0; +} + NotTEC PayChanCreate::preflight(PreflightContext const& ctx) { - if (ctx.rules.enabled(fix1543) && ctx.tx.getFlags() & tfUniversalMask) - return temINVALID_FLAG; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - if (!isXRP(ctx.tx[sfAmount]) || (ctx.tx[sfAmount] <= beast::zero)) return temBAD_AMOUNT; @@ -189,7 +190,7 @@ PayChanCreate::preflight(PreflightContext const& ctx) if (!publicKeyType(ctx.tx[sfPublicKey])) return temMALFORMED; - return preflight2(ctx); + return tesSUCCESS; } TER @@ -330,19 +331,20 @@ PayChanFund::makeTxConsequences(PreflightContext const& ctx) return TxConsequences{ctx.tx, ctx.tx[sfAmount].xrp()}; } +std::uint32_t +PayChanFund::getFlagsMask(PreflightContext const& ctx) +{ + // 0 means "Allow any flags" + return ctx.rules.enabled(fix1543) ? tfUniversalMask : 0; +} + NotTEC PayChanFund::preflight(PreflightContext const& ctx) { - if (ctx.rules.enabled(fix1543) && ctx.tx.getFlags() & tfUniversalMask) - return temINVALID_FLAG; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - if (!isXRP(ctx.tx[sfAmount]) || (ctx.tx[sfAmount] <= beast::zero)) return temBAD_AMOUNT; - return preflight2(ctx); + return tesSUCCESS; } TER @@ -420,16 +422,23 @@ PayChanFund::doApply() //------------------------------------------------------------------------------ +bool +PayChanClaim::checkExtraFeatures(PreflightContext const& ctx) +{ + return !ctx.tx.isFieldPresent(sfCredentialIDs) || + ctx.rules.enabled(featureCredentials); +} + +std::uint32_t +PayChanClaim::getFlagsMask(PreflightContext const& ctx) +{ + // 0 means "Allow any flags" + return ctx.rules.enabled(fix1543) ? tfPayChanClaimMask : 0; +} + NotTEC PayChanClaim::preflight(PreflightContext const& ctx) { - if (ctx.tx.isFieldPresent(sfCredentialIDs) && - !ctx.rules.enabled(featureCredentials)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - auto const bal = ctx.tx[~sfBalance]; if (bal && (!isXRP(*bal) || *bal <= beast::zero)) return temBAD_AMOUNT; @@ -444,9 +453,6 @@ PayChanClaim::preflight(PreflightContext const& ctx) { auto const flags = ctx.tx.getFlags(); - if (ctx.rules.enabled(fix1543) && (flags & tfPayChanClaimMask)) - return temINVALID_FLAG; - if ((flags & tfClose) && (flags & tfRenew)) return temMALFORMED; } @@ -481,7 +487,7 @@ PayChanClaim::preflight(PreflightContext const& ctx) !isTesSuccess(err)) return err; - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/PayChan.h b/src/xrpld/app/tx/detail/PayChan.h index 2e09c473dc..b25a4529be 100644 --- a/src/xrpld/app/tx/detail/PayChan.h +++ b/src/xrpld/app/tx/detail/PayChan.h @@ -36,6 +36,9 @@ public: static TxConsequences makeTxConsequences(PreflightContext const& ctx); + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); @@ -62,6 +65,9 @@ public: static TxConsequences makeTxConsequences(PreflightContext const& ctx); + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); @@ -82,6 +88,12 @@ public: { } + static bool + checkExtraFeatures(PreflightContext const& ctx); + + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/Payment.cpp b/src/xrpld/app/tx/detail/Payment.cpp index e622d54498..8bc0e891d0 100644 --- a/src/xrpld/app/tx/detail/Payment.cpp +++ b/src/xrpld/app/tx/detail/Payment.cpp @@ -65,20 +65,33 @@ getMaxSourceAmount( dstAmount < beast::zero); } -NotTEC -Payment::preflight(PreflightContext const& ctx) +bool +Payment::checkExtraFeatures(PreflightContext const& ctx) { if (ctx.tx.isFieldPresent(sfCredentialIDs) && !ctx.rules.enabled(featureCredentials)) - return temDISABLED; - + return false; if (ctx.tx.isFieldPresent(sfDomainID) && !ctx.rules.enabled(featurePermissionedDEX)) - return temDISABLED; + return false; - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; + return true; +} +std::uint32_t +Payment::getFlagsMask(PreflightContext const& ctx) +{ + auto& tx = ctx.tx; + + STAmount const dstAmount(tx.getFieldAmount(sfAmount)); + bool const mptDirect = dstAmount.holds(); + + return mptDirect ? tfMPTPaymentMask : tfPaymentMask; +} + +NotTEC +Payment::preflight(PreflightContext const& ctx) +{ auto& tx = ctx.tx; auto& j = ctx.j; @@ -90,14 +103,6 @@ Payment::preflight(PreflightContext const& ctx) std::uint32_t const txFlags = tx.getFlags(); - std::uint32_t paymentMask = mptDirect ? tfMPTPaymentMask : tfPaymentMask; - - if (txFlags & paymentMask) - { - JLOG(j.trace()) << "Malformed transaction: Invalid flags set."; - return temINVALID_FLAG; - } - if (mptDirect && ctx.tx.isFieldPresent(sfPaths)) return temMALFORMED; @@ -242,7 +247,7 @@ Payment::preflight(PreflightContext const& ctx) !isTesSuccess(err)) return err; - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/Payment.h b/src/xrpld/app/tx/detail/Payment.h index 010a2453cf..04bba390e2 100644 --- a/src/xrpld/app/tx/detail/Payment.h +++ b/src/xrpld/app/tx/detail/Payment.h @@ -42,6 +42,12 @@ public: static TxConsequences makeTxConsequences(PreflightContext const& ctx); + static bool + checkExtraFeatures(PreflightContext const& ctx); + + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/PermissionedDomainDelete.cpp b/src/xrpld/app/tx/detail/PermissionedDomainDelete.cpp index 76224ba6b3..9fe48ba515 100644 --- a/src/xrpld/app/tx/detail/PermissionedDomainDelete.cpp +++ b/src/xrpld/app/tx/detail/PermissionedDomainDelete.cpp @@ -27,23 +27,11 @@ namespace ripple { NotTEC PermissionedDomainDelete::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featurePermissionedDomains)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - if (ctx.tx.getFlags() & tfUniversalMask) - { - JLOG(ctx.j.debug()) << "PermissionedDomainDelete: invalid flags."; - return temINVALID_FLAG; - } - auto const domain = ctx.tx.getFieldH256(sfDomainID); if (domain == beast::zero) return temMALFORMED; - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/PermissionedDomainSet.cpp b/src/xrpld/app/tx/detail/PermissionedDomainSet.cpp index cc25809aa1..d9fa481bb6 100644 --- a/src/xrpld/app/tx/detail/PermissionedDomainSet.cpp +++ b/src/xrpld/app/tx/detail/PermissionedDomainSet.cpp @@ -28,22 +28,15 @@ namespace ripple { +bool +PermissionedDomainSet::checkExtraFeatures(PreflightContext const& ctx) +{ + return ctx.rules.enabled(featureCredentials); +} + NotTEC PermissionedDomainSet::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featurePermissionedDomains) || - !ctx.rules.enabled(featureCredentials)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - if (ctx.tx.getFlags() & tfUniversalMask) - { - JLOG(ctx.j.debug()) << "PermissionedDomainSet: invalid flags."; - return temINVALID_FLAG; - } - if (auto err = credentials::checkArray( ctx.tx.getFieldArray(sfAcceptedCredentials), maxPermissionedDomainCredentialsArraySize, @@ -55,7 +48,7 @@ PermissionedDomainSet::preflight(PreflightContext const& ctx) if (domain && *domain == beast::zero) return temMALFORMED; - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/PermissionedDomainSet.h b/src/xrpld/app/tx/detail/PermissionedDomainSet.h index 502d576e32..ed27896a3b 100644 --- a/src/xrpld/app/tx/detail/PermissionedDomainSet.h +++ b/src/xrpld/app/tx/detail/PermissionedDomainSet.h @@ -33,6 +33,9 @@ public: { } + static bool + checkExtraFeatures(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/SetAccount.cpp b/src/xrpld/app/tx/detail/SetAccount.cpp index dc84c7cc7e..c2129ba1e1 100644 --- a/src/xrpld/app/tx/detail/SetAccount.cpp +++ b/src/xrpld/app/tx/detail/SetAccount.cpp @@ -57,23 +57,20 @@ SetAccount::makeTxConsequences(PreflightContext const& ctx) return TxConsequences{ctx.tx, getTxConsequencesCategory(ctx.tx)}; } +std::uint32_t +SetAccount::getFlagsMask(PreflightContext const& ctx) +{ + return tfAccountSetMask; +} + NotTEC SetAccount::preflight(PreflightContext const& ctx) { - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - auto& tx = ctx.tx; auto& j = ctx.j; std::uint32_t const uTxFlags = tx.getFlags(); - if (uTxFlags & tfAccountSetMask) - { - JLOG(j.trace()) << "Malformed transaction: Invalid flags set."; - return temINVALID_FLAG; - } - std::uint32_t const uSetFlag = tx.getFieldU32(sfSetFlag); std::uint32_t const uClearFlag = tx.getFieldU32(sfClearFlag); @@ -186,7 +183,7 @@ SetAccount::preflight(PreflightContext const& ctx) return temMALFORMED; } - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/SetAccount.h b/src/xrpld/app/tx/detail/SetAccount.h index ed4242c250..bcc0a61b1b 100644 --- a/src/xrpld/app/tx/detail/SetAccount.h +++ b/src/xrpld/app/tx/detail/SetAccount.h @@ -38,6 +38,9 @@ public: static TxConsequences makeTxConsequences(PreflightContext const& ctx); + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/SetOracle.cpp b/src/xrpld/app/tx/detail/SetOracle.cpp index 78ff8e2953..81f20b4342 100644 --- a/src/xrpld/app/tx/detail/SetOracle.cpp +++ b/src/xrpld/app/tx/detail/SetOracle.cpp @@ -39,15 +39,6 @@ tokenPairKey(STObject const& pair) NotTEC SetOracle::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featurePriceOracle)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - if (ctx.tx.getFlags() & tfUniversalMask) - return temINVALID_FLAG; - auto const& dataSeries = ctx.tx.getFieldArray(sfPriceDataSeries); if (dataSeries.empty()) return temARRAY_EMPTY; @@ -64,7 +55,7 @@ SetOracle::preflight(PreflightContext const& ctx) isInvalidLength(sfAssetClass, maxOracleSymbolClass)) return temMALFORMED; - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/SetRegularKey.cpp b/src/xrpld/app/tx/detail/SetRegularKey.cpp index 92d130a15a..4e063e7d1f 100644 --- a/src/xrpld/app/tx/detail/SetRegularKey.cpp +++ b/src/xrpld/app/tx/detail/SetRegularKey.cpp @@ -51,18 +51,6 @@ SetRegularKey::calculateBaseFee(ReadView const& view, STTx const& tx) NotTEC SetRegularKey::preflight(PreflightContext const& ctx) { - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - std::uint32_t const uTxFlags = ctx.tx.getFlags(); - - if (uTxFlags & tfUniversalMask) - { - JLOG(ctx.j.trace()) << "Malformed transaction: Invalid flags set."; - - return temINVALID_FLAG; - } - if (ctx.rules.enabled(fixMasterKeyAsRegularKey) && ctx.tx.isFieldPresent(sfRegularKey) && (ctx.tx.getAccountID(sfRegularKey) == ctx.tx.getAccountID(sfAccount))) @@ -70,7 +58,7 @@ SetRegularKey::preflight(PreflightContext const& ctx) return temBAD_REGKEY; } - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/SetSignerList.cpp b/src/xrpld/app/tx/detail/SetSignerList.cpp index 60f92cf87b..b5d9d4d5b8 100644 --- a/src/xrpld/app/tx/detail/SetSignerList.cpp +++ b/src/xrpld/app/tx/detail/SetSignerList.cpp @@ -77,19 +77,16 @@ SetSignerList::determineOperation( return std::make_tuple(tesSUCCESS, quorum, sign, op); } +std::uint32_t +SetSignerList::getFlagsMask(PreflightContext const& ctx) +{ + // 0 means "Allow any flags" + return ctx.rules.enabled(fixInvalidTxFlags) ? tfUniversalMask : 0; +} + NotTEC SetSignerList::preflight(PreflightContext const& ctx) { - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - if (ctx.rules.enabled(fixInvalidTxFlags) && - (ctx.tx.getFlags() & tfUniversalMask)) - { - JLOG(ctx.j.debug()) << "SetSignerList: invalid flags."; - return temINVALID_FLAG; - } - auto const result = determineOperation(ctx.tx, ctx.flags, ctx.j); if (std::get<0>(result) != tesSUCCESS) @@ -119,7 +116,7 @@ SetSignerList::preflight(PreflightContext const& ctx) } } - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/SetSignerList.h b/src/xrpld/app/tx/detail/SetSignerList.h index 1827aca975..be2df8152e 100644 --- a/src/xrpld/app/tx/detail/SetSignerList.h +++ b/src/xrpld/app/tx/detail/SetSignerList.h @@ -51,6 +51,9 @@ public: { } + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/SetTrust.cpp b/src/xrpld/app/tx/detail/SetTrust.cpp index 87f1721b29..21d4534f93 100644 --- a/src/xrpld/app/tx/detail/SetTrust.cpp +++ b/src/xrpld/app/tx/detail/SetTrust.cpp @@ -67,23 +67,20 @@ computeFreezeFlags( namespace ripple { +std::uint32_t +SetTrust::getFlagsMask(PreflightContext const& ctx) +{ + return tfTrustSetMask; +} + NotTEC SetTrust::preflight(PreflightContext const& ctx) { - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - auto& tx = ctx.tx; auto& j = ctx.j; std::uint32_t const uTxFlags = tx.getFlags(); - if (uTxFlags & tfTrustSetMask) - { - JLOG(j.trace()) << "Malformed transaction: Invalid flags set."; - return temINVALID_FLAG; - } - if (!ctx.rules.enabled(featureDeepFreeze)) { // Even though the deep freeze flags are included in the @@ -127,7 +124,7 @@ SetTrust::preflight(PreflightContext const& ctx) return temDST_NEEDED; } - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/SetTrust.h b/src/xrpld/app/tx/detail/SetTrust.h index a0476918ac..443080bf74 100644 --- a/src/xrpld/app/tx/detail/SetTrust.h +++ b/src/xrpld/app/tx/detail/SetTrust.h @@ -35,6 +35,9 @@ public: { } + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/Transactor.cpp b/src/xrpld/app/tx/detail/Transactor.cpp index fd396e4556..1c98233964 100644 --- a/src/xrpld/app/tx/detail/Transactor.cpp +++ b/src/xrpld/app/tx/detail/Transactor.cpp @@ -41,7 +41,7 @@ namespace ripple { /** Performs early sanity checks on the txid */ NotTEC -preflight0(PreflightContext const& ctx) +preflight0(PreflightContext const& ctx, std::uint32_t flagMask) { if (isPseudoTx(ctx.tx) && ctx.tx.isFlag(tfInnerBatchTxn)) { @@ -83,12 +83,84 @@ preflight0(PreflightContext const& ctx) return temINVALID; } + if (ctx.tx.getFlags() & flagMask) + { + JLOG(ctx.j.debug()) + << ctx.tx.peekAtField(sfTransactionType).getFullText() + << ": invalid flags."; + return temINVALID_FLAG; + } + return tesSUCCESS; } +namespace detail { + +/** Checks the validity of the transactor signing key. + * + * Normally called from preflight1. + */ +NotTEC +preflightCheckSigningKey(STObject const& sigObject, beast::Journal j) +{ + if (auto const spk = sigObject.getFieldVL(sfSigningPubKey); + !spk.empty() && !publicKeyType(makeSlice(spk))) + { + JLOG(j.debug()) << "preflightCheckSigningKey: invalid signing key"; + return temBAD_SIGNATURE; + } + return tesSUCCESS; +} + +std::optional +preflightCheckSimulateKeys( + ApplyFlags flags, + STObject const& sigObject, + beast::Journal j) +{ + if (flags & tapDRY_RUN) // simulation + { + std::optional const signature = sigObject[~sfTxnSignature]; + if (signature && !signature->empty()) + { + // NOTE: This code should never be hit because it's checked in the + // `simulate` RPC + return temINVALID; // LCOV_EXCL_LINE + } + + if (!sigObject.isFieldPresent(sfSigners)) + { + // no signers, no signature - a valid simulation + return tesSUCCESS; + } + + for (auto const& signer : sigObject.getFieldArray(sfSigners)) + { + if (signer.isFieldPresent(sfTxnSignature) && + !signer[sfTxnSignature].empty()) + { + // NOTE: This code should never be hit because it's + // checked in the `simulate` RPC + return temINVALID; // LCOV_EXCL_LINE + } + } + + Slice const signingPubKey = sigObject[sfSigningPubKey]; + if (!signingPubKey.empty()) + { + // trying to single-sign _and_ multi-sign a transaction + return temINVALID; + } + return tesSUCCESS; + } + return {}; +} + +} // namespace detail + /** Performs early sanity checks on the account and fee fields */ NotTEC -preflight1(PreflightContext const& ctx) +Transactor::preflight1(PreflightContext const& ctx, std::uint32_t flagMask) { // This is inappropriate in preflight0, because only Change transactions // skip this function, and those do not allow an sfTicketSequence field. @@ -107,8 +179,7 @@ preflight1(PreflightContext const& ctx) return temBAD_SIGNER; } - auto const ret = preflight0(ctx); - if (!isTesSuccess(ret)) + if (auto const ret = preflight0(ctx, flagMask)) return ret; auto const id = ctx.tx.getAccountID(sfAccount); @@ -126,13 +197,8 @@ preflight1(PreflightContext const& ctx) return temBAD_FEE; } - auto const spk = ctx.tx.getSigningPubKey(); - - if (!spk.empty() && !publicKeyType(makeSlice(spk))) - { - JLOG(ctx.j.debug()) << "preflight1: invalid signing key"; - return temBAD_SIGNATURE; - } + if (auto const ret = detail::preflightCheckSigningKey(ctx.tx, ctx.j)) + return ret; // An AccountTxnID field constrains transaction ordering more than the // Sequence field. Tickets, on the other hand, reduce ordering @@ -157,41 +223,13 @@ preflight1(PreflightContext const& ctx) /** Checks whether the signature appears valid */ NotTEC -preflight2(PreflightContext const& ctx) +Transactor::preflight2(PreflightContext const& ctx) { - if (ctx.flags & tapDRY_RUN) // simulation - { - if (!ctx.tx.getSignature().empty()) - { - // NOTE: This code should never be hit because it's checked in the - // `simulate` RPC - return temINVALID; // LCOV_EXCL_LINE - } - - if (!ctx.tx.isFieldPresent(sfSigners)) - { - // no signers, no signature - a valid simulation - return tesSUCCESS; - } - - for (auto const& signer : ctx.tx.getFieldArray(sfSigners)) - { - if (signer.isFieldPresent(sfTxnSignature) && - !signer[sfTxnSignature].empty()) - { - // NOTE: This code should never be hit because it's - // checked in the `simulate` RPC - return temINVALID; // LCOV_EXCL_LINE - } - } - - if (!ctx.tx.getSigningPubKey().empty()) - { - // trying to single-sign _and_ multi-sign a transaction - return temINVALID; - } - return tesSUCCESS; - } + if (auto const ret = + detail::preflightCheckSimulateKeys(ctx.flags, ctx.tx, ctx.j)) + // Skips following checks if the transaction is being simulated, + // regardless of success or failure + return *ret; auto const sigValid = checkValidity( ctx.app.getHashRouter(), ctx.tx, ctx.rules, ctx.app.config()); @@ -213,6 +251,28 @@ Transactor::Transactor(ApplyContext& ctx) { } +bool +Transactor::validDataLength( + std::optional const& slice, + std::size_t maxLength) +{ + if (!slice) + return true; + return !slice->empty() && slice->length() <= maxLength; +} + +std::uint32_t +Transactor::getFlagsMask(PreflightContext const& ctx) +{ + return tfUniversalMask; +} + +NotTEC +Transactor::preflightSigValidated(PreflightContext const& ctx) +{ + return tesSUCCESS; +} + TER Transactor::checkPermission(ReadView const& view, STTx const& tx) { @@ -247,6 +307,27 @@ Transactor::calculateBaseFee(ReadView const& view, STTx const& tx) return baseFee + (signerCount * baseFee); } +// Returns the fee in fee units, not scaled for load. +XRPAmount +Transactor::calculateOwnerReserveFee(ReadView const& view, STTx const& tx) +{ + // Assumption: One reserve increment is typically much greater than one base + // fee. + // This check is in an assert so that it will come to the attention of + // developers if that assumption is not correct. If the owner reserve is not + // significantly larger than the base fee (or even worse, smaller), we will + // need to rethink charging an owner reserve as a transaction fee. + // TODO: This function is static, and I don't want to add more parameters. + // When it is finally refactored to be in a context that has access to the + // Application, include "app().overlay().networkID() > 2 ||" in the + // condition. + XRPL_ASSERT( + view.fees().increment > view.fees().base * 100, + "ripple::Transactor::calculateOwnerReserveFee : Owner reserve is " + "reasonable"); + return view.fees().increment; +} + XRPAmount Transactor::minimumFee( Application& app, diff --git a/src/xrpld/app/tx/detail/Transactor.h b/src/xrpld/app/tx/detail/Transactor.h index e94b93523d..88b0664ea2 100644 --- a/src/xrpld/app/tx/detail/Transactor.h +++ b/src/xrpld/app/tx/detail/Transactor.h @@ -134,6 +134,8 @@ public: class TxConsequences; struct PreflightResult; +// Needed for preflight specialization +class Change; class Transactor { @@ -198,6 +200,35 @@ public: static XRPAmount calculateBaseFee(ReadView const& view, STTx const& tx); + /* Do NOT define an invokePreflight function in a derived class. + Instead, define: + + // Optional if the transaction is gated on an amendment that + // isn't specified in transactions.macro + static bool + checkExtraFeatures(PreflightContext const& ctx); + + // Optional if the transaction uses any flags other than tfUniversal + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + + // Required, even if it just returns tesSUCCESS. + static NotTEC + preflight(PreflightContext const& ctx); + + // Optional, rarely needed, if the transaction does any expensive + // checks after the signature is verified. + static NotTEC preflightSigValidated(PreflightContext const& ctx); + + * Do not try to call preflight1 or preflight2 directly. + * Do not check whether relevant amendments are enabled in preflight. + Instead, define checkExtraFeatures. + * Do not check flags in preflight. Instead, define getFlagsMask. + */ + template + static NotTEC + invokePreflight(PreflightContext const& ctx); + static TER preclaim(PreclaimContext const& ctx) { @@ -246,6 +277,36 @@ protected: Fees const& fees, ApplyFlags flags); + // Returns the fee in fee units, not scaled for load. + static XRPAmount + calculateOwnerReserveFee(ReadView const& view, STTx const& tx); + + // Base class always returns true + static bool + checkExtraFeatures(PreflightContext const& ctx); + + // Base class always returns tfUniversalMask + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + + // Base class always returns tesSUCCESS + static NotTEC + preflightSigValidated(PreflightContext const& ctx); + + static bool + validDataLength(std::optional const& slice, std::size_t maxLength); + + template + static bool + validNumericRange(std::optional value, T max, T min = {}); + + template + static bool + validNumericRange( + std::optional value, + unit::ValueUnit max, + unit::ValueUnit min = {}); + private: std::pair reset(XRPAmount fee); @@ -270,19 +331,106 @@ private: beast::Journal j); void trapTransaction(uint256) const; + + /** Performs early sanity checks on the account and fee fields. + + (And passes flagMask to preflight0) + + Do not try to call preflight1 from preflight() in derived classes. See + the description of invokePreflight for details. + */ + static NotTEC + preflight1(PreflightContext const& ctx, std::uint32_t flagMask); + + /** Checks whether the signature appears valid + + Do not try to call preflight2 from preflight() in derived classes. See + the description of invokePreflight for details. + */ + static NotTEC + preflight2(PreflightContext const& ctx); }; -/** Performs early sanity checks on the txid */ -NotTEC -preflight0(PreflightContext const& ctx); +inline bool +Transactor::checkExtraFeatures(PreflightContext const& ctx) +{ + return true; +} -/** Performs early sanity checks on the account and fee fields */ +/** Performs early sanity checks on the txid and flags */ NotTEC -preflight1(PreflightContext const& ctx); +preflight0(PreflightContext const& ctx, std::uint32_t flagMask); -/** Checks whether the signature appears valid */ +namespace detail { + +/** Checks the validity of the transactor signing key. + * + * Normally called from preflight1 with ctx.tx. + */ NotTEC -preflight2(PreflightContext const& ctx); +preflightCheckSigningKey(STObject const& sigObject, beast::Journal j); + +/** Checks the special signing key state needed for simulation + * + * Normally called from preflight2 with ctx.tx. + */ +std::optional +preflightCheckSimulateKeys( + ApplyFlags flags, + STObject const& sigObject, + beast::Journal j); +} // namespace detail + +// Defined in Change.cpp +template <> +NotTEC +Transactor::invokePreflight(PreflightContext const& ctx); + +template +NotTEC +Transactor::invokePreflight(PreflightContext const& ctx) +{ + // Using this lookup does NOT require checking the fixDelegateV1_1. The data + // exists regardless of whether it is enabled. + auto const feature = + Permission::getInstance().getTxFeature(ctx.tx.getTxnType()); + + if (feature && !ctx.rules.enabled(*feature)) + return temDISABLED; + + if (!T::checkExtraFeatures(ctx)) + return temDISABLED; + + if (auto const ret = preflight1(ctx, T::getFlagsMask(ctx))) + return ret; + + if (auto const ret = T::preflight(ctx)) + return ret; + + if (auto const ret = preflight2(ctx)) + return ret; + + return T::preflightSigValidated(ctx); +} + +template +bool +Transactor::validNumericRange(std::optional value, T max, T min) +{ + if (!value) + return true; + return value >= min && value <= max; +} + +template +bool +Transactor::validNumericRange( + std::optional value, + unit::ValueUnit max, + unit::ValueUnit min) +{ + return validNumericRange(value, max.value(), min.value()); +} } // namespace ripple diff --git a/src/xrpld/app/tx/detail/VaultClawback.cpp b/src/xrpld/app/tx/detail/VaultClawback.cpp index 061aacdbb8..45a56a6292 100644 --- a/src/xrpld/app/tx/detail/VaultClawback.cpp +++ b/src/xrpld/app/tx/detail/VaultClawback.cpp @@ -35,15 +35,6 @@ namespace ripple { NotTEC VaultClawback::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureSingleAssetVault)) - return temDISABLED; - - if (auto const ter = preflight1(ctx)) - return ter; - - if (ctx.tx.getFlags() & tfUniversalMask) - return temINVALID_FLAG; - if (ctx.tx[sfVaultID] == beast::zero) { JLOG(ctx.j.debug()) << "VaultClawback: zero/empty vault ID."; @@ -78,7 +69,7 @@ VaultClawback::preflight(PreflightContext const& ctx) } } - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/VaultCreate.cpp b/src/xrpld/app/tx/detail/VaultCreate.cpp index 855275bf4e..9447976a32 100644 --- a/src/xrpld/app/tx/detail/VaultCreate.cpp +++ b/src/xrpld/app/tx/detail/VaultCreate.cpp @@ -35,28 +35,27 @@ namespace ripple { +bool +VaultCreate::checkExtraFeatures(PreflightContext const& ctx) +{ + if (!ctx.rules.enabled(featureMPTokensV1)) + return false; + + return !ctx.tx.isFieldPresent(sfDomainID) || + ctx.rules.enabled(featurePermissionedDomains); +} + +std::uint32_t +VaultCreate::getFlagsMask(PreflightContext const& ctx) +{ + return tfVaultCreateMask; +} + NotTEC VaultCreate::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureSingleAssetVault) || - !ctx.rules.enabled(featureMPTokensV1)) - return temDISABLED; - - if (ctx.tx.isFieldPresent(sfDomainID) && - !ctx.rules.enabled(featurePermissionedDomains)) - return temDISABLED; - - if (auto const ter = preflight1(ctx)) - return ter; - - if (ctx.tx.getFlags() & tfVaultCreateMask) - return temINVALID_FLAG; - - if (auto const data = ctx.tx[~sfData]) - { - if (data->empty() || data->length() > maxDataPayloadLength) - return temMALFORMED; - } + if (!validDataLength(ctx.tx[~sfData], maxDataPayloadLength)) + return temMALFORMED; if (auto const withdrawalPolicy = ctx.tx[~sfWithdrawalPolicy]) { @@ -96,14 +95,14 @@ VaultCreate::preflight(PreflightContext const& ctx) return temMALFORMED; } - return preflight2(ctx); + return tesSUCCESS; } XRPAmount VaultCreate::calculateBaseFee(ReadView const& view, STTx const& tx) { // One reserve increment is typically much greater than one base fee. - return view.fees().increment; + return calculateOwnerReserveFee(view, tx); } TER @@ -112,32 +111,8 @@ VaultCreate::preclaim(PreclaimContext const& ctx) auto const vaultAsset = ctx.tx[sfAsset]; auto const account = ctx.tx[sfAccount]; - if (vaultAsset.native()) - ; // No special checks for XRP - else if (vaultAsset.holds()) - { - auto mptID = vaultAsset.get().getMptID(); - auto issuance = ctx.view.read(keylet::mptIssuance(mptID)); - if (!issuance) - return tecOBJECT_NOT_FOUND; - if (!issuance->isFlag(lsfMPTCanTransfer)) - { - // NOTE: flag lsfMPTCanTransfer is immutable, so this is debug in - // VaultCreate only; in other vault function it's an error. - JLOG(ctx.j.debug()) - << "VaultCreate: vault assets are non-transferable."; - return tecNO_AUTH; - } - } - else if (vaultAsset.holds()) - { - auto const issuer = - ctx.view.read(keylet::account(vaultAsset.getIssuer())); - if (!issuer) - return terNO_ACCOUNT; - else if (!issuer->isFlag(lsfDefaultRipple)) - return terNO_RIPPLE; - } + if (auto const ter = canAddHolding(ctx.view, vaultAsset)) + return ter; // Check for pseudo-account issuers - we do not want a vault to hold such // assets (e.g. MPT shares to other vaults or AMM LPTokens) as they would be diff --git a/src/xrpld/app/tx/detail/VaultCreate.h b/src/xrpld/app/tx/detail/VaultCreate.h index 5555644629..3f952d540a 100644 --- a/src/xrpld/app/tx/detail/VaultCreate.h +++ b/src/xrpld/app/tx/detail/VaultCreate.h @@ -33,6 +33,12 @@ public: { } + static bool + checkExtraFeatures(PreflightContext const& ctx); + + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/VaultDelete.cpp b/src/xrpld/app/tx/detail/VaultDelete.cpp index 5e4e16a99b..ab7db78956 100644 --- a/src/xrpld/app/tx/detail/VaultDelete.cpp +++ b/src/xrpld/app/tx/detail/VaultDelete.cpp @@ -31,22 +31,13 @@ namespace ripple { NotTEC VaultDelete::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureSingleAssetVault)) - return temDISABLED; - - if (auto const ter = preflight1(ctx)) - return ter; - - if (ctx.tx.getFlags() & tfUniversalMask) - return temINVALID_FLAG; - if (ctx.tx[sfVaultID] == beast::zero) { JLOG(ctx.j.debug()) << "VaultDelete: zero/empty vault ID."; return temMALFORMED; } - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/VaultDeposit.cpp b/src/xrpld/app/tx/detail/VaultDeposit.cpp index 3d346d63a2..75cf81b0b0 100644 --- a/src/xrpld/app/tx/detail/VaultDeposit.cpp +++ b/src/xrpld/app/tx/detail/VaultDeposit.cpp @@ -36,15 +36,6 @@ namespace ripple { NotTEC VaultDeposit::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureSingleAssetVault)) - return temDISABLED; - - if (auto const ter = preflight1(ctx)) - return ter; - - if (ctx.tx.getFlags() & tfUniversalMask) - return temINVALID_FLAG; - if (ctx.tx[sfVaultID] == beast::zero) { JLOG(ctx.j.debug()) << "VaultDeposit: zero/empty vault ID."; @@ -54,7 +45,7 @@ VaultDeposit::preflight(PreflightContext const& ctx) if (ctx.tx[sfAmount] <= beast::zero) return temBAD_AMOUNT; - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/VaultSet.cpp b/src/xrpld/app/tx/detail/VaultSet.cpp index 5a519f81cf..6057e40cfa 100644 --- a/src/xrpld/app/tx/detail/VaultSet.cpp +++ b/src/xrpld/app/tx/detail/VaultSet.cpp @@ -30,28 +30,22 @@ namespace ripple { +bool +VaultSet::checkExtraFeatures(PreflightContext const& ctx) +{ + return !ctx.tx.isFieldPresent(sfDomainID) || + ctx.rules.enabled(featurePermissionedDomains); +} + NotTEC VaultSet::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureSingleAssetVault)) - return temDISABLED; - - if (ctx.tx.isFieldPresent(sfDomainID) && - !ctx.rules.enabled(featurePermissionedDomains)) - return temDISABLED; - - if (auto const ter = preflight1(ctx)) - return ter; - if (ctx.tx[sfVaultID] == beast::zero) { JLOG(ctx.j.debug()) << "VaultSet: zero/empty vault ID."; return temMALFORMED; } - if (ctx.tx.getFlags() & tfUniversalMask) - return temINVALID_FLAG; - if (auto const data = ctx.tx[~sfData]) { if (data->empty() || data->length() > maxDataPayloadLength) @@ -78,7 +72,7 @@ VaultSet::preflight(PreflightContext const& ctx) return temMALFORMED; } - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/VaultSet.h b/src/xrpld/app/tx/detail/VaultSet.h index f16aa6c284..b3eecbbab5 100644 --- a/src/xrpld/app/tx/detail/VaultSet.h +++ b/src/xrpld/app/tx/detail/VaultSet.h @@ -33,6 +33,9 @@ public: { } + static bool + checkExtraFeatures(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/VaultWithdraw.cpp b/src/xrpld/app/tx/detail/VaultWithdraw.cpp index 63cc22fe48..509b795058 100644 --- a/src/xrpld/app/tx/detail/VaultWithdraw.cpp +++ b/src/xrpld/app/tx/detail/VaultWithdraw.cpp @@ -33,15 +33,6 @@ namespace ripple { NotTEC VaultWithdraw::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureSingleAssetVault)) - return temDISABLED; - - if (auto const ter = preflight1(ctx)) - return ter; - - if (ctx.tx.getFlags() & tfUniversalMask) - return temINVALID_FLAG; - if (ctx.tx[sfVaultID] == beast::zero) { JLOG(ctx.j.debug()) << "VaultWithdraw: zero/empty vault ID."; @@ -68,7 +59,7 @@ VaultWithdraw::preflight(PreflightContext const& ctx) return temMALFORMED; } - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/XChainBridge.cpp b/src/xrpld/app/tx/detail/XChainBridge.cpp index 92e3c7f625..2587845df5 100644 --- a/src/xrpld/app/tx/detail/XChainBridge.cpp +++ b/src/xrpld/app/tx/detail/XChainBridge.cpp @@ -1210,17 +1210,8 @@ toClaim(STTx const& tx) template NotTEC -attestationPreflight(PreflightContext const& ctx) +attestationpreflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureXChainBridge)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - if (ctx.tx.getFlags() & tfUniversalMask) - return temINVALID_FLAG; - if (!publicKeyType(ctx.tx[sfPublicKey])) return temMALFORMED; @@ -1241,7 +1232,7 @@ attestationPreflight(PreflightContext const& ctx) if (att->sendingAmount.issue() != expectedIssue) return temXCHAIN_BAD_PROOF; - return preflight2(ctx); + return tesSUCCESS; } template @@ -1379,15 +1370,6 @@ attestationDoApply(ApplyContext& ctx) NotTEC XChainCreateBridge::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureXChainBridge)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - if (ctx.tx.getFlags() & tfUniversalMask) - return temINVALID_FLAG; - auto const account = ctx.tx[sfAccount]; auto const reward = ctx.tx[sfSignatureReward]; auto const minAccountCreate = ctx.tx[~sfMinAccountCreateAmount]; @@ -1457,7 +1439,7 @@ XChainCreateBridge::preflight(PreflightContext const& ctx) return temXCHAIN_BRIDGE_BAD_ISSUES; } - return preflight2(ctx); + return tesSUCCESS; } TER @@ -1557,18 +1539,15 @@ XChainCreateBridge::doApply() //------------------------------------------------------------------------------ +std::uint32_t +BridgeModify::getFlagsMask(PreflightContext const& ctx) +{ + return tfBridgeModifyMask; +} + NotTEC BridgeModify::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureXChainBridge)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - if (ctx.tx.getFlags() & tfBridgeModifyMask) - return temINVALID_FLAG; - auto const account = ctx.tx[sfAccount]; auto const reward = ctx.tx[~sfSignatureReward]; auto const minAccountCreate = ctx.tx[~sfMinAccountCreateAmount]; @@ -1607,7 +1586,7 @@ BridgeModify::preflight(PreflightContext const& ctx) return temXCHAIN_BRIDGE_BAD_MIN_ACCOUNT_CREATE_AMOUNT; } - return preflight2(ctx); + return tesSUCCESS; } TER @@ -1670,15 +1649,6 @@ BridgeModify::doApply() NotTEC XChainClaim::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureXChainBridge)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - if (ctx.tx.getFlags() & tfUniversalMask) - return temINVALID_FLAG; - STXChainBridge const bridgeSpec = ctx.tx[sfXChainBridge]; auto const amount = ctx.tx[sfAmount]; @@ -1689,7 +1659,7 @@ XChainClaim::preflight(PreflightContext const& ctx) return temBAD_AMOUNT; } - return preflight2(ctx); + return tesSUCCESS; } TER @@ -1908,15 +1878,6 @@ XChainCommit::makeTxConsequences(PreflightContext const& ctx) NotTEC XChainCommit::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureXChainBridge)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - if (ctx.tx.getFlags() & tfUniversalMask) - return temINVALID_FLAG; - auto const amount = ctx.tx[sfAmount]; auto const bridgeSpec = ctx.tx[sfXChainBridge]; @@ -1927,7 +1888,7 @@ XChainCommit::preflight(PreflightContext const& ctx) amount.issue() != bridgeSpec.issuingChainIssue()) return temBAD_ISSUER; - return preflight2(ctx); + return tesSUCCESS; } TER @@ -2022,21 +1983,12 @@ XChainCommit::doApply() NotTEC XChainCreateClaimID::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureXChainBridge)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - if (ctx.tx.getFlags() & tfUniversalMask) - return temINVALID_FLAG; - auto const reward = ctx.tx[sfSignatureReward]; if (!isXRP(reward) || reward.signum() < 0 || !isLegalNet(reward)) return temXCHAIN_BRIDGE_BAD_REWARD_AMOUNT; - return preflight2(ctx); + return tesSUCCESS; } TER @@ -2137,7 +2089,7 @@ XChainCreateClaimID::doApply() NotTEC XChainAddClaimAttestation::preflight(PreflightContext const& ctx) { - return attestationPreflight(ctx); + return attestationpreflight(ctx); } TER @@ -2157,7 +2109,7 @@ XChainAddClaimAttestation::doApply() NotTEC XChainAddAccountCreateAttestation::preflight(PreflightContext const& ctx) { - return attestationPreflight(ctx); + return attestationpreflight(ctx); } TER @@ -2177,15 +2129,6 @@ XChainAddAccountCreateAttestation::doApply() NotTEC XChainCreateAccountCommit::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featureXChainBridge)) - return temDISABLED; - - if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) - return ret; - - if (ctx.tx.getFlags() & tfUniversalMask) - return temINVALID_FLAG; - auto const amount = ctx.tx[sfAmount]; if (amount.signum() <= 0 || !amount.native()) @@ -2198,7 +2141,7 @@ XChainCreateAccountCommit::preflight(PreflightContext const& ctx) if (reward.issue() != amount.issue()) return temBAD_AMOUNT; - return preflight2(ctx); + return tesSUCCESS; } TER diff --git a/src/xrpld/app/tx/detail/XChainBridge.h b/src/xrpld/app/tx/detail/XChainBridge.h index 82b64cc0e3..0e9c0358d2 100644 --- a/src/xrpld/app/tx/detail/XChainBridge.h +++ b/src/xrpld/app/tx/detail/XChainBridge.h @@ -58,6 +58,9 @@ public: { } + static std::uint32_t + getFlagsMask(PreflightContext const& ctx); + static NotTEC preflight(PreflightContext const& ctx); diff --git a/src/xrpld/app/tx/detail/applySteps.cpp b/src/xrpld/app/tx/detail/applySteps.cpp index 543bedcd47..c2e4e13f08 100644 --- a/src/xrpld/app/tx/detail/applySteps.cpp +++ b/src/xrpld/app/tx/detail/applySteps.cpp @@ -119,7 +119,7 @@ invoke_preflight(PreflightContext const& ctx) try { return with_txn_type(ctx.tx.getTxnType(), [&]() { - auto const tec = T::preflight(ctx); + auto const tec = Transactor::invokePreflight(ctx); return std::make_pair( tec, isTesSuccess(tec) ? consequences_helper(ctx) From 550f90a75ec54a12ad340a4fec24ce5698cdfe3a Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Mon, 29 Sep 2025 18:11:53 -0400 Subject: [PATCH 211/244] refactor: Add support for extra transaction signatures (#5594) * Restructures Transactor signature checking code to be able to handle a `sigObject`, which may be the full transaction, or may be an object field containing a separate signature. Either way, the `sigObject` can be a single- or multi-sign signature. --- src/xrpld/app/tx/detail/Transactor.cpp | 99 ++++++++++++++------------ src/xrpld/app/tx/detail/Transactor.h | 19 ++--- 2 files changed, 64 insertions(+), 54 deletions(-) diff --git a/src/xrpld/app/tx/detail/Transactor.cpp b/src/xrpld/app/tx/detail/Transactor.cpp index 1c98233964..112017ebaf 100644 --- a/src/xrpld/app/tx/detail/Transactor.cpp +++ b/src/xrpld/app/tx/detail/Transactor.cpp @@ -659,16 +659,19 @@ Transactor::apply() } NotTEC -Transactor::checkSign(PreclaimContext const& ctx) +Transactor::checkSign( + PreclaimContext const& ctx, + AccountID const& idAccount, + STObject const& sigObject) { - auto const pkSigner = ctx.tx.getSigningPubKey(); + auto const pkSigner = sigObject.getFieldVL(sfSigningPubKey); // Ignore signature check on batch inner transactions - if (ctx.tx.isFlag(tfInnerBatchTxn) && + if (sigObject.isFlag(tfInnerBatchTxn) && ctx.view.rules().enabled(featureBatch)) { // Defensive Check: These values are also checked in Batch::preflight - if (ctx.tx.isFieldPresent(sfTxnSignature) || !pkSigner.empty() || - ctx.tx.isFieldPresent(sfSigners)) + if (sigObject.isFieldPresent(sfTxnSignature) || !pkSigner.empty() || + sigObject.isFieldPresent(sfSigners)) { return temINVALID_FLAG; // LCOV_EXCL_LINE } @@ -676,34 +679,31 @@ Transactor::checkSign(PreclaimContext const& ctx) } if ((ctx.flags & tapDRY_RUN) && pkSigner.empty() && - !ctx.tx.isFieldPresent(sfSigners)) + !sigObject.isFieldPresent(sfSigners)) { // simulate: skip signature validation when neither SigningPubKey nor // Signers are provided return tesSUCCESS; } - auto const idAccount = ctx.tx[~sfDelegate].value_or(ctx.tx[sfAccount]); - // If the pk is empty and not simulate or simulate and signers, // then we must be multi-signing. if (ctx.tx.isFieldPresent(sfSigners)) { - STArray const& txSigners(ctx.tx.getFieldArray(sfSigners)); - return checkMultiSign(ctx.view, idAccount, txSigners, ctx.flags, ctx.j); + return checkMultiSign(ctx, idAccount, sigObject); } // Check Single Sign XRPL_ASSERT( - !pkSigner.empty(), - "ripple::Transactor::checkSingleSign : non-empty signer or simulation"); + !pkSigner.empty(), "ripple::Transactor::checkSign : non-empty signer"); if (!publicKeyType(makeSlice(pkSigner))) { - JLOG(ctx.j.trace()) - << "checkSingleSign: signing public key type is unknown"; + JLOG(ctx.j.trace()) << "checkSign: signing public key type is unknown"; return tefBAD_AUTH; // FIXME: should be better error! } + + // Look up the account. auto const idSigner = pkSigner.empty() ? idAccount : calcAccountID(PublicKey(makeSlice(pkSigner))); @@ -711,8 +711,16 @@ Transactor::checkSign(PreclaimContext const& ctx) if (!sleAccount) return terNO_ACCOUNT; - return checkSingleSign( - idSigner, idAccount, sleAccount, ctx.view.rules(), ctx.j); + return checkSingleSign(ctx, idSigner, idAccount, sleAccount); +} + +NotTEC +Transactor::checkSign(PreclaimContext const& ctx) +{ + auto const idAccount = ctx.tx.isFieldPresent(sfDelegate) + ? ctx.tx.getAccountID(sfDelegate) + : ctx.tx.getAccountID(sfAccount); + return checkSign(ctx, idAccount, ctx.tx); } NotTEC @@ -727,9 +735,7 @@ Transactor::checkBatchSign(PreclaimContext const& ctx) Blob const& pkSigner = signer.getFieldVL(sfSigningPubKey); if (pkSigner.empty()) { - STArray const& txSigners(signer.getFieldArray(sfSigners)); - if (ret = checkMultiSign( - ctx.view, idAccount, txSigners, ctx.flags, ctx.j); + if (ret = checkMultiSign(ctx, idAccount, signer); !isTesSuccess(ret)) return ret; } @@ -753,8 +759,7 @@ Transactor::checkBatchSign(PreclaimContext const& ctx) return tesSUCCESS; } - if (ret = checkSingleSign( - idSigner, idAccount, sleAccount, ctx.view.rules(), ctx.j); + if (ret = checkSingleSign(ctx, idSigner, idAccount, sleAccount); !isTesSuccess(ret)) return ret; } @@ -764,15 +769,14 @@ Transactor::checkBatchSign(PreclaimContext const& ctx) NotTEC Transactor::checkSingleSign( + PreclaimContext const& ctx, AccountID const& idSigner, AccountID const& idAccount, - std::shared_ptr sleAccount, - Rules const& rules, - beast::Journal j) + std::shared_ptr sleAccount) { bool const isMasterDisabled = sleAccount->isFlag(lsfDisableMaster); - if (rules.enabled(fixMasterKeyAsRegularKey)) + if (ctx.view.rules().enabled(fixMasterKeyAsRegularKey)) { // Signed with regular key. if ((*sleAccount)[~sfRegularKey] == idSigner) @@ -809,14 +813,16 @@ Transactor::checkSingleSign( else if (sleAccount->isFieldPresent(sfRegularKey)) { // Signing key does not match master or regular key. - JLOG(j.trace()) << "checkSingleSign: Not authorized to use account."; + JLOG(ctx.j.trace()) + << "checkSingleSign: Not authorized to use account."; return tefBAD_AUTH; } else { // No regular key on account and signing key does not match master key. // FIXME: Why differentiate this case from tefBAD_AUTH? - JLOG(j.trace()) << "checkSingleSign: Not authorized to use account."; + JLOG(ctx.j.trace()) + << "checkSingleSign: Not authorized to use account."; return tefBAD_AUTH_MASTER; } @@ -825,19 +831,17 @@ Transactor::checkSingleSign( NotTEC Transactor::checkMultiSign( - ReadView const& view, + PreclaimContext const& ctx, AccountID const& id, - STArray const& txSigners, - ApplyFlags const& flags, - beast::Journal j) + STObject const& sigObject) { - // Get mTxnAccountID's SignerList and Quorum. + // Get id's SignerList and Quorum. std::shared_ptr sleAccountSigners = - view.read(keylet::signers(id)); + ctx.view.read(keylet::signers(id)); // If the signer list doesn't exist the account is not multi-signing. if (!sleAccountSigners) { - JLOG(j.trace()) + JLOG(ctx.j.trace()) << "applyTransaction: Invalid: Not a multi-signing account."; return tefNOT_MULTI_SIGNING; } @@ -852,11 +856,12 @@ Transactor::checkMultiSign( "ripple::Transactor::checkMultiSign : signer list ID is 0"); auto accountSigners = - SignerEntries::deserialize(*sleAccountSigners, j, "ledger"); + SignerEntries::deserialize(*sleAccountSigners, ctx.j, "ledger"); if (!accountSigners) return accountSigners.error(); // Get the array of transaction signers. + STArray const& txSigners(sigObject.getFieldArray(sfSigners)); // Walk the accountSigners performing a variety of checks and see if // the quorum is met. @@ -875,7 +880,7 @@ Transactor::checkMultiSign( { if (++iter == accountSigners->end()) { - JLOG(j.trace()) + JLOG(ctx.j.trace()) << "applyTransaction: Invalid SigningAccount.Account."; return tefBAD_SIGNATURE; } @@ -883,7 +888,7 @@ Transactor::checkMultiSign( if (iter->account != txSignerAcctID) { // The SigningAccount is not in the SignerEntries. - JLOG(j.trace()) + JLOG(ctx.j.trace()) << "applyTransaction: Invalid SigningAccount.Account."; return tefBAD_SIGNATURE; } @@ -897,13 +902,13 @@ Transactor::checkMultiSign( // STTx::checkMultiSign if (!spk.empty() && !publicKeyType(makeSlice(spk))) { - JLOG(j.trace()) + JLOG(ctx.j.trace()) << "checkMultiSign: signing public key type is unknown"; return tefBAD_SIGNATURE; } XRPL_ASSERT( - (flags & tapDRY_RUN) || !spk.empty(), + (ctx.flags & tapDRY_RUN) || !spk.empty(), "ripple::Transactor::checkMultiSign : non-empty signer or " "simulation"); AccountID const signingAcctIDFromPubKey = spk.empty() @@ -935,7 +940,8 @@ Transactor::checkMultiSign( // In any of these cases we need to know whether the account is in // the ledger. Determine that now. - auto const sleTxSignerRoot = view.read(keylet::account(txSignerAcctID)); + auto const sleTxSignerRoot = + ctx.view.read(keylet::account(txSignerAcctID)); if (signingAcctIDFromPubKey == txSignerAcctID) { @@ -948,7 +954,7 @@ Transactor::checkMultiSign( if (signerAccountFlags & lsfDisableMaster) { - JLOG(j.trace()) + JLOG(ctx.j.trace()) << "applyTransaction: Signer:Account lsfDisableMaster."; return tefMASTER_DISABLED; } @@ -960,21 +966,21 @@ Transactor::checkMultiSign( // Public key must hash to the account's regular key. if (!sleTxSignerRoot) { - JLOG(j.trace()) << "applyTransaction: Non-phantom signer " - "lacks account root."; + JLOG(ctx.j.trace()) << "applyTransaction: Non-phantom signer " + "lacks account root."; return tefBAD_SIGNATURE; } if (!sleTxSignerRoot->isFieldPresent(sfRegularKey)) { - JLOG(j.trace()) + JLOG(ctx.j.trace()) << "applyTransaction: Account lacks RegularKey."; return tefBAD_SIGNATURE; } if (signingAcctIDFromPubKey != sleTxSignerRoot->getAccountID(sfRegularKey)) { - JLOG(j.trace()) + JLOG(ctx.j.trace()) << "applyTransaction: Account doesn't match RegularKey."; return tefBAD_SIGNATURE; } @@ -986,7 +992,8 @@ Transactor::checkMultiSign( // Cannot perform transaction if quorum is not met. if (weightSum < sleAccountSigners->getFieldU32(sfSignerQuorum)) { - JLOG(j.trace()) << "applyTransaction: Signers failed to meet quorum."; + JLOG(ctx.j.trace()) + << "applyTransaction: Signers failed to meet quorum."; return tefBAD_QUORUM; } diff --git a/src/xrpld/app/tx/detail/Transactor.h b/src/xrpld/app/tx/detail/Transactor.h index 88b0664ea2..429dcec6fc 100644 --- a/src/xrpld/app/tx/detail/Transactor.h +++ b/src/xrpld/app/tx/detail/Transactor.h @@ -281,6 +281,12 @@ protected: static XRPAmount calculateOwnerReserveFee(ReadView const& view, STTx const& tx); + static NotTEC + checkSign( + PreclaimContext const& ctx, + AccountID const& id, + STObject const& sigObject); + // Base class always returns true static bool checkExtraFeatures(PreflightContext const& ctx); @@ -317,18 +323,15 @@ private: payFee(); static NotTEC checkSingleSign( + PreclaimContext const& ctx, AccountID const& idSigner, AccountID const& idAccount, - std::shared_ptr sleAccount, - Rules const& rules, - beast::Journal j); + std::shared_ptr sleAccount); static NotTEC checkMultiSign( - ReadView const& view, - AccountID const& idAccount, - STArray const& txSigners, - ApplyFlags const& flags, - beast::Journal j); + PreclaimContext const& ctx, + AccountID const& id, + STObject const& sigObject); void trapTransaction(uint256) const; From 294e03ecf53e00043c801da4cbe8a2133b91c8e8 Mon Sep 17 00:00:00 2001 From: Ayaz Salikhov Date: Tue, 30 Sep 2025 18:15:24 +0200 Subject: [PATCH 212/244] ci: Upload artifacts during build and test in a separate job (#5817) --- .github/actions/build-test/action.yml | 96 -------------- .github/actions/print-env/action.yml | 43 +++++++ .github/scripts/strategy-matrix/generate.py | 2 +- .github/workflows/on-pr.yml | 5 +- .github/workflows/on-trigger.yml | 21 +-- .../workflows/reusable-build-test-config.yml | 69 ++++++++++ .github/workflows/reusable-build-test.yml | 121 +++--------------- .github/workflows/reusable-build.yml | 115 +++++++++++++++++ .github/workflows/reusable-test.yml | 69 ++++++++++ 9 files changed, 322 insertions(+), 219 deletions(-) delete mode 100644 .github/actions/build-test/action.yml create mode 100644 .github/actions/print-env/action.yml create mode 100644 .github/workflows/reusable-build-test-config.yml create mode 100644 .github/workflows/reusable-build.yml create mode 100644 .github/workflows/reusable-test.yml diff --git a/.github/actions/build-test/action.yml b/.github/actions/build-test/action.yml deleted file mode 100644 index cf1bac16f7..0000000000 --- a/.github/actions/build-test/action.yml +++ /dev/null @@ -1,96 +0,0 @@ -# This action build and tests the binary. The Conan dependencies must have -# already been installed (see the build-deps action). -name: Build and Test -description: "Build and test the binary." - -# Note that actions do not support 'type' and all inputs are strings, see -# https://docs.github.com/en/actions/reference/workflows-and-actions/metadata-syntax#inputs. -inputs: - build_dir: - description: "The directory where to build." - required: true - build_only: - description: 'Whether to only build or to build and test the code ("true", "false").' - required: false - default: "false" - build_type: - description: 'The build type to use ("Debug", "Release").' - required: true - cmake_args: - description: "Additional arguments to pass to CMake." - required: false - default: "" - cmake_target: - description: "The CMake target to build." - required: true - codecov_token: - description: "The Codecov token to use for uploading coverage reports." - required: false - default: "" - os: - description: 'The operating system to use for the build ("linux", "macos", "windows").' - required: true - -runs: - using: composite - steps: - - name: Configure CMake - shell: bash - working-directory: ${{ inputs.build_dir }} - run: | - echo 'Configuring CMake.' - cmake \ - -G '${{ inputs.os == 'windows' && 'Visual Studio 17 2022' || 'Ninja' }}' \ - -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \ - -DCMAKE_BUILD_TYPE=${{ inputs.build_type }} \ - ${{ inputs.cmake_args }} \ - .. - - name: Build the binary - shell: bash - working-directory: ${{ inputs.build_dir }} - run: | - echo 'Building binary.' - cmake \ - --build . \ - --config ${{ inputs.build_type }} \ - --parallel $(nproc) \ - --target ${{ inputs.cmake_target }} - - name: Check linking - if: ${{ inputs.os == 'linux' }} - shell: bash - working-directory: ${{ inputs.build_dir }} - run: | - echo 'Checking linking.' - ldd ./rippled - if [ "$(ldd ./rippled | grep -E '(libstdc\+\+|libgcc)' | wc -l)" -eq 0 ]; then - echo 'The binary is statically linked.' - else - echo 'The binary is dynamically linked.' - exit 1 - fi - - name: Verify voidstar - if: ${{ contains(inputs.cmake_args, '-Dvoidstar=ON') }} - shell: bash - working-directory: ${{ inputs.build_dir }} - run: | - echo 'Verifying presence of instrumentation.' - ./rippled --version | grep libvoidstar - - name: Test the binary - if: ${{ inputs.build_only == 'false' }} - shell: bash - working-directory: ${{ inputs.build_dir }}/${{ inputs.os == 'windows' && inputs.build_type || '' }} - run: | - echo 'Testing binary.' - ./rippled --unittest --unittest-jobs $(nproc) - ctest -j $(nproc) --output-on-failure - - name: Upload coverage report - if: ${{ inputs.cmake_target == 'coverage' }} - uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3 - with: - disable_search: true - disable_telem: true - fail_ci_if_error: true - files: ${{ inputs.build_dir }}/coverage.xml - plugins: noop - token: ${{ inputs.codecov_token }} - verbose: true diff --git a/.github/actions/print-env/action.yml b/.github/actions/print-env/action.yml new file mode 100644 index 0000000000..6019a6de2f --- /dev/null +++ b/.github/actions/print-env/action.yml @@ -0,0 +1,43 @@ +name: Print build environment +description: "Print environment and some tooling versions" + +runs: + using: composite + steps: + - name: Check configuration (Windows) + if: ${{ runner.os == 'Windows' }} + shell: bash + run: | + echo 'Checking environment variables.' + set + + echo 'Checking CMake version.' + cmake --version + + echo 'Checking Conan version.' + conan --version + + - name: Check configuration (Linux and macOS) + if: ${{ runner.os == 'Linux' || runner.os == 'macOS' }} + shell: bash + run: | + echo 'Checking path.' + echo ${PATH} | tr ':' '\n' + + echo 'Checking environment variables.' + env | sort + + echo 'Checking CMake version.' + cmake --version + + echo 'Checking compiler version.' + ${{ runner.os == 'Linux' && '${CC}' || 'clang' }} --version + + echo 'Checking Conan version.' + conan --version + + echo 'Checking Ninja version.' + ninja --version + + echo 'Checking nproc version.' + nproc --version diff --git a/.github/scripts/strategy-matrix/generate.py b/.github/scripts/strategy-matrix/generate.py index ac39803fff..fd05895b0e 100755 --- a/.github/scripts/strategy-matrix/generate.py +++ b/.github/scripts/strategy-matrix/generate.py @@ -162,7 +162,7 @@ def generate_strategy_matrix(all: bool, config: Config) -> list: 'config_name': config_name, 'cmake_args': cmake_args, 'cmake_target': cmake_target, - 'build_only': 'true' if build_only else 'false', + 'build_only': build_only, 'build_type': build_type, 'os': os, 'architecture': architecture, diff --git a/.github/workflows/on-pr.yml b/.github/workflows/on-pr.yml index a206bbf041..47323ee4a7 100644 --- a/.github/workflows/on-pr.yml +++ b/.github/workflows/on-pr.yml @@ -59,8 +59,11 @@ jobs: .github/actions/build-test/** .github/actions/setup-conan/** .github/scripts/strategy-matrix/** + .github/workflows/reusable-build.yml + .github/workflows/reusable-build-test-config.yml .github/workflows/reusable-build-test.yml .github/workflows/reusable-strategy-matrix.yml + .github/workflows/reusable-test.yml .codecov.yml cmake/** conan/** @@ -105,7 +108,7 @@ jobs: with: os: ${{ matrix.os }} secrets: - codecov_token: ${{ secrets.CODECOV_TOKEN }} + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} notify-clio: needs: diff --git a/.github/workflows/on-trigger.yml b/.github/workflows/on-trigger.yml index 7b5bda021f..b06d475a4d 100644 --- a/.github/workflows/on-trigger.yml +++ b/.github/workflows/on-trigger.yml @@ -23,8 +23,11 @@ on: - ".github/actions/build-test/**" - ".github/actions/setup-conan/**" - ".github/scripts/strategy-matrix/**" + - ".github/workflows/reusable-build.yml" + - ".github/workflows/reusable-build-test-config.yml" - ".github/workflows/reusable-build-test.yml" - ".github/workflows/reusable-strategy-matrix.yml" + - ".github/workflows/reusable-test.yml" - ".codecov.yml" - "cmake/**" - "conan/**" @@ -43,22 +46,8 @@ on: schedule: - cron: "32 6 * * 1-5" - # Run when manually triggered via the GitHub UI or API. If `force_upload` is - # true, then the dependencies that were missing (`force_rebuild` is false) or - # rebuilt (`force_rebuild` is true) will be uploaded, overwriting existing - # dependencies if needed. + # Run when manually triggered via the GitHub UI or API. workflow_dispatch: - inputs: - dependencies_force_build: - description: "Force building of all dependencies." - required: false - type: boolean - default: false - dependencies_force_upload: - description: "Force uploading of all dependencies." - required: false - type: boolean - default: false concurrency: group: ${{ github.workflow }}-${{ github.ref }} @@ -82,4 +71,4 @@ jobs: os: ${{ matrix.os }} strategy_matrix: ${{ github.event_name == 'schedule' && 'all' || 'minimal' }} secrets: - codecov_token: ${{ secrets.CODECOV_TOKEN }} + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/reusable-build-test-config.yml b/.github/workflows/reusable-build-test-config.yml new file mode 100644 index 0000000000..3160cef031 --- /dev/null +++ b/.github/workflows/reusable-build-test-config.yml @@ -0,0 +1,69 @@ +name: Build and test configuration + +on: + workflow_call: + inputs: + build_dir: + description: "The directory where to build." + required: true + type: string + build_only: + description: 'Whether to only build or to build and test the code ("true", "false").' + required: true + type: boolean + build_type: + description: 'The build type to use ("Debug", "Release").' + type: string + required: true + cmake_args: + description: "Additional arguments to pass to CMake." + required: false + type: string + default: "" + cmake_target: + description: "The CMake target to build." + type: string + required: true + + runs_on: + description: Runner to run the job on as a JSON string + required: true + type: string + image: + description: "The image to run in (leave empty to run natively)" + required: true + type: string + + config_name: + description: "The configuration string (used for naming artifacts and such)." + required: true + type: string + + secrets: + CODECOV_TOKEN: + description: "The Codecov token to use for uploading coverage reports." + required: true + +jobs: + build: + uses: ./.github/workflows/reusable-build.yml + with: + build_dir: ${{ inputs.build_dir }} + build_type: ${{ inputs.build_type }} + cmake_args: ${{ inputs.cmake_args }} + cmake_target: ${{ inputs.cmake_target }} + runs_on: ${{ inputs.runs_on }} + image: ${{ inputs.image }} + config_name: ${{ inputs.config_name }} + secrets: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} + + test: + needs: build + uses: ./.github/workflows/reusable-test.yml + with: + run_tests: ${{ !inputs.build_only }} + verify_voidstar: ${{ contains(inputs.cmake_args, '-Dvoidstar=ON') }} + runs_on: ${{ inputs.runs_on }} + image: ${{ inputs.image }} + config_name: ${{ inputs.config_name }} diff --git a/.github/workflows/reusable-build-test.yml b/.github/workflows/reusable-build-test.yml index 2197e88a42..c274cf2b21 100644 --- a/.github/workflows/reusable-build-test.yml +++ b/.github/workflows/reusable-build-test.yml @@ -13,16 +13,6 @@ on: required: false type: string default: ".build" - dependencies_force_build: - description: "Force building of all dependencies." - required: false - type: boolean - default: false - dependencies_force_upload: - description: "Force uploading of all dependencies." - required: false - type: boolean - default: false os: description: 'The operating system to use for the build ("linux", "macos", "windows").' required: true @@ -34,17 +24,9 @@ on: type: string default: "minimal" secrets: - codecov_token: + CODECOV_TOKEN: description: "The Codecov token to use for uploading coverage reports." - required: false - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }}-${{ inputs.os }} - cancel-in-progress: true - -defaults: - run: - shell: bash + required: true jobs: # Generate the strategy matrix to be used by the following job. @@ -54,94 +36,23 @@ jobs: os: ${{ inputs.os }} strategy_matrix: ${{ inputs.strategy_matrix }} - # Build and test the binary. - build-test: + # Build and test the binary for each configuration. + build-test-config: needs: - generate-matrix + uses: ./.github/workflows/reusable-build-test-config.yml strategy: fail-fast: false matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }} max-parallel: 10 - runs-on: ${{ matrix.architecture.runner }} - container: ${{ inputs.os == 'linux' && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-5dd7158', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version) || null }} - steps: - - name: Check strategy matrix - run: | - echo 'Operating system distro name: ${{ matrix.os.distro_name }}' - echo 'Operating system distro version: ${{ matrix.os.distro_version }}' - echo 'Operating system compiler name: ${{ matrix.os.compiler_name }}' - echo 'Operating system compiler version: ${{ matrix.os.compiler_version }}' - echo 'Architecture platform: ${{ matrix.architecture.platform }}' - echo 'Architecture runner: ${{ toJson(matrix.architecture.runner) }}' - echo 'Build type: ${{ matrix.build_type }}' - echo 'Build only: ${{ matrix.build_only }}' - echo 'CMake arguments: ${{ matrix.cmake_args }}' - echo 'CMake target: ${{ matrix.cmake_target }}' - echo 'Config name: ${{ matrix.config_name }}' - - - name: Cleanup workspace - if: ${{ runner.os == 'macOS' }} - uses: XRPLF/actions/.github/actions/cleanup-workspace@3f044c7478548e3c32ff68980eeb36ece02b364e - - - name: Checkout repository - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - - name: Prepare runner - uses: XRPLF/actions/.github/actions/prepare-runner@638e0dc11ea230f91bd26622fb542116bb5254d5 - with: - disable_ccache: false - - - name: Check configuration (Windows) - if: ${{ inputs.os == 'windows' }} - run: | - echo 'Checking environment variables.' - set - - echo 'Checking CMake version.' - cmake --version - - echo 'Checking Conan version.' - conan --version - - name: Check configuration (Linux and MacOS) - if: ${{ inputs.os == 'linux' || inputs.os == 'macos' }} - run: | - echo 'Checking path.' - echo ${PATH} | tr ':' '\n' - - echo 'Checking environment variables.' - env | sort - - echo 'Checking CMake version.' - cmake --version - - echo 'Checking compiler version.' - ${{ inputs.os == 'linux' && '${CC}' || 'clang' }} --version - - echo 'Checking Conan version.' - conan --version - - echo 'Checking Ninja version.' - ninja --version - - echo 'Checking nproc version.' - nproc --version - - - name: Setup Conan - uses: ./.github/actions/setup-conan - - - name: Build dependencies - uses: ./.github/actions/build-deps - with: - build_dir: ${{ inputs.build_dir }} - build_type: ${{ matrix.build_type }} - force_build: ${{ inputs.dependencies_force_build }} - - - name: Build and test binary - uses: ./.github/actions/build-test - with: - build_dir: ${{ inputs.build_dir }} - build_only: ${{ matrix.build_only }} - build_type: ${{ matrix.build_type }} - cmake_args: ${{ matrix.cmake_args }} - cmake_target: ${{ matrix.cmake_target }} - codecov_token: ${{ secrets.codecov_token }} - os: ${{ inputs.os }} + with: + build_dir: ${{ inputs.build_dir }} + build_only: ${{ matrix.build_only }} + build_type: ${{ matrix.build_type }} + cmake_args: ${{ matrix.cmake_args }} + cmake_target: ${{ matrix.cmake_target }} + runs_on: ${{ toJSON(matrix.architecture.runner) }} + image: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-5dd7158', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version) || '' }} + config_name: ${{ matrix.config_name }} + secrets: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/reusable-build.yml b/.github/workflows/reusable-build.yml new file mode 100644 index 0000000000..e586917374 --- /dev/null +++ b/.github/workflows/reusable-build.yml @@ -0,0 +1,115 @@ +name: Build rippled + +on: + workflow_call: + inputs: + build_dir: + description: "The directory where to build." + required: true + type: string + build_type: + description: 'The build type to use ("Debug", "Release").' + required: true + type: string + cmake_args: + description: "Additional arguments to pass to CMake." + required: true + type: string + cmake_target: + description: "The CMake target to build." + required: true + type: string + + runs_on: + description: Runner to run the job on as a JSON string + required: true + type: string + image: + description: "The image to run in (leave empty to run natively)" + required: true + type: string + + config_name: + description: "The name of the configuration." + required: true + type: string + + secrets: + CODECOV_TOKEN: + description: "The Codecov token to use for uploading coverage reports." + required: true + +defaults: + run: + shell: bash + +jobs: + build: + name: Build ${{ inputs.config_name }} + runs-on: ${{ fromJSON(inputs.runs_on) }} + container: ${{ inputs.image != '' && inputs.image || null }} + steps: + - name: Cleanup workspace + if: ${{ runner.os == 'macOS' }} + uses: XRPLF/actions/.github/actions/cleanup-workspace@3f044c7478548e3c32ff68980eeb36ece02b364e + + - name: Checkout repository + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 + + - name: Prepare runner + uses: XRPLF/actions/.github/actions/prepare-runner@638e0dc11ea230f91bd26622fb542116bb5254d5 + with: + disable_ccache: false + + - name: Print build environment + uses: ./.github/actions/print-env + + - name: Setup Conan + uses: ./.github/actions/setup-conan + + - name: Build dependencies + uses: ./.github/actions/build-deps + with: + build_dir: ${{ inputs.build_dir }} + build_type: ${{ inputs.build_type }} + + - name: Configure CMake + shell: bash + working-directory: ${{ inputs.build_dir }} + run: | + cmake \ + -G '${{ runner.os == 'Windows' && 'Visual Studio 17 2022' || 'Ninja' }}' \ + -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \ + -DCMAKE_BUILD_TYPE=${{ inputs.build_type }} \ + ${{ inputs.cmake_args }} \ + .. + + - name: Build the binary + shell: bash + working-directory: ${{ inputs.build_dir }} + run: | + cmake \ + --build . \ + --config ${{ inputs.build_type }} \ + --parallel $(nproc) \ + --target ${{ inputs.cmake_target }} + + - name: Upload rippled artifact + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: rippled-${{ inputs.config_name }} + path: ${{ inputs.build_dir }}/${{ runner.os == 'Windows' && inputs.build_type || '' }}/rippled${{ runner.os == 'Windows' && '.exe' || '' }} + retention-days: 3 + if-no-files-found: error + + - name: Upload coverage report + if: ${{ inputs.cmake_target == 'coverage' }} + uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3 + with: + disable_search: true + disable_telem: true + fail_ci_if_error: true + files: ${{ inputs.build_dir }}/coverage.xml + plugins: noop + token: ${{ secrets.CODECOV_TOKEN }} + verbose: true diff --git a/.github/workflows/reusable-test.yml b/.github/workflows/reusable-test.yml new file mode 100644 index 0000000000..1877a19a72 --- /dev/null +++ b/.github/workflows/reusable-test.yml @@ -0,0 +1,69 @@ +name: Test rippled + +on: + workflow_call: + inputs: + verify_voidstar: + description: "Whether to verify the presence of voidstar instrumentation." + required: true + type: boolean + run_tests: + description: "Whether to run unit tests" + required: true + type: boolean + + runs_on: + description: Runner to run the job on as a JSON string + required: true + type: string + image: + description: "The image to run in (leave empty to run natively)" + required: true + type: string + + config_name: + description: "The name of the configuration." + required: true + type: string + +jobs: + test: + name: Test ${{ inputs.config_name }} + runs-on: ${{ fromJSON(inputs.runs_on) }} + container: ${{ inputs.image != '' && inputs.image || null }} + steps: + - name: Download rippled artifact + uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + with: + name: rippled-${{ inputs.config_name }} + + - name: Make binary executable (Linux and macOS) + shell: bash + if: ${{ runner.os == 'Linux' || runner.os == 'macOS' }} + run: | + chmod +x ./rippled + + - name: Check linking (Linux) + if: ${{ runner.os == 'Linux' }} + shell: bash + run: | + ldd ./rippled + if [ "$(ldd ./rippled | grep -E '(libstdc\+\+|libgcc)' | wc -l)" -eq 0 ]; then + echo 'The binary is statically linked.' + else + echo 'The binary is dynamically linked.' + exit 1 + fi + + - name: Verifying presence of instrumentation + if: ${{ inputs.verify_voidstar }} + shell: bash + run: | + ./rippled --version | grep libvoidstar + + - name: Test the binary + if: ${{ inputs.run_tests }} + shell: bash + run: | + ./rippled --unittest --unittest-jobs $(nproc) + ctest -j $(nproc) --output-on-failure From 072b1c442c11230f6c82a7a32017ff699134176a Mon Sep 17 00:00:00 2001 From: Bart Date: Tue, 30 Sep 2025 19:46:10 +0200 Subject: [PATCH 213/244] chore: Set free-form CI inputs as env vars (#5822) This change moves CI values that could be user-provided into environment variables. --- .github/actions/build-deps/action.yml | 16 ++++++++++------ .github/actions/setup-conan/action.yml | 7 +++++-- .github/workflows/reusable-build.yml | 14 ++++++++++---- .github/workflows/reusable-notify-clio.yml | 17 ++++++++++------- .github/workflows/reusable-strategy-matrix.yml | 5 ++++- .github/workflows/upload-conan-deps.yml | 7 +++---- 6 files changed, 42 insertions(+), 24 deletions(-) diff --git a/.github/actions/build-deps/action.yml b/.github/actions/build-deps/action.yml index 351d8a6361..a908c656e8 100644 --- a/.github/actions/build-deps/action.yml +++ b/.github/actions/build-deps/action.yml @@ -20,14 +20,18 @@ runs: steps: - name: Install Conan dependencies shell: bash + env: + BUILD_DIR: ${{ inputs.build_dir }} + BUILD_OPTION: ${{ inputs.force_build == 'true' && '*' || 'missing' }} + BUILD_TYPE: ${{ inputs.build_type }} run: | echo 'Installing dependencies.' - mkdir -p ${{ inputs.build_dir }} - cd ${{ inputs.build_dir }} + mkdir -p '${{ env.BUILD_DIR }}' + cd '${{ env.BUILD_DIR }}' conan install \ --output-folder . \ - --build ${{ inputs.force_build == 'true' && '"*"' || 'missing' }} \ - --options:host '&:tests=True' \ - --options:host '&:xrpld=True' \ - --settings:all build_type=${{ inputs.build_type }} \ + --build=${{ env.BUILD_OPTION }} \ + --options:host='&:tests=True' \ + --options:host='&:xrpld=True' \ + --settings:all build_type='${{ env.BUILD_TYPE }}' \ .. diff --git a/.github/actions/setup-conan/action.yml b/.github/actions/setup-conan/action.yml index d31809dc94..02061c7a64 100644 --- a/.github/actions/setup-conan/action.yml +++ b/.github/actions/setup-conan/action.yml @@ -35,9 +35,12 @@ runs: - name: Set up Conan remote shell: bash + env: + CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }} + CONAN_REMOTE_URL: ${{ inputs.conan_remote_url }} run: | - echo "Adding Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}." - conan remote add --index 0 --force ${{ inputs.conan_remote_name }} ${{ inputs.conan_remote_url }} + echo "Adding Conan remote '${{ env.CONAN_REMOTE_NAME }}' at '${{ env.CONAN_REMOTE_URL }}'." + conan remote add --index 0 --force '${{ env.CONAN_REMOTE_NAME }}' '${{ env.CONAN_REMOTE_URL }}' echo 'Listing Conan remotes.' conan remote list diff --git a/.github/workflows/reusable-build.yml b/.github/workflows/reusable-build.yml index e586917374..9c994598c2 100644 --- a/.github/workflows/reusable-build.yml +++ b/.github/workflows/reusable-build.yml @@ -76,23 +76,29 @@ jobs: - name: Configure CMake shell: bash working-directory: ${{ inputs.build_dir }} + env: + BUILD_TYPE: ${{ inputs.build_type }} + CMAKE_ARGS: ${{ inputs.cmake_args }} run: | cmake \ -G '${{ runner.os == 'Windows' && 'Visual Studio 17 2022' || 'Ninja' }}' \ -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \ - -DCMAKE_BUILD_TYPE=${{ inputs.build_type }} \ - ${{ inputs.cmake_args }} \ + -DCMAKE_BUILD_TYPE=${{ env.BUILD_TYPE }} \ + ${{ env.CMAKE_ARGS }} \ .. - name: Build the binary shell: bash working-directory: ${{ inputs.build_dir }} + env: + BUILD_TYPE: ${{ inputs.build_type }} + CMAKE_TARGET: ${{ inputs.cmake_target }} run: | cmake \ --build . \ - --config ${{ inputs.build_type }} \ + --config ${{ env.BUILD_TYPE }} \ --parallel $(nproc) \ - --target ${{ inputs.cmake_target }} + --target ${{ env.CMAKE_TARGET }} - name: Upload rippled artifact uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 diff --git a/.github/workflows/reusable-notify-clio.yml b/.github/workflows/reusable-notify-clio.yml index 2d6fa63796..99009d953e 100644 --- a/.github/workflows/reusable-notify-clio.yml +++ b/.github/workflows/reusable-notify-clio.yml @@ -46,41 +46,44 @@ jobs: uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - name: Generate outputs id: generate + env: + PR_NUMBER: ${{ github.event.pull_request.number }} run: | echo 'Generating user and channel.' echo "user=clio" >> "${GITHUB_OUTPUT}" - echo "channel=pr_${{ github.event.pull_request.number }}" >> "${GITHUB_OUTPUT}" + echo "channel=pr_${{ env.PR_NUMBER }}" >> "${GITHUB_OUTPUT}" echo 'Extracting version.' echo "version=$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" | awk -F '"' '{print $2}')" >> "${GITHUB_OUTPUT}" - name: Calculate conan reference id: conan_ref run: | echo "conan_ref=${{ steps.generate.outputs.version }}@${{ steps.generate.outputs.user }}/${{ steps.generate.outputs.channel }}" >> "${GITHUB_OUTPUT}" - - name: Set up Conan uses: ./.github/actions/setup-conan with: conan_remote_name: ${{ inputs.conan_remote_name }} conan_remote_url: ${{ inputs.conan_remote_url }} - - name: Log into Conan remote run: conan remote login ${{ inputs.conan_remote_name }} "${{ secrets.conan_remote_username }}" --password "${{ secrets.conan_remote_password }}" - name: Upload package + env: + CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }} run: | conan export --user=${{ steps.generate.outputs.user }} --channel=${{ steps.generate.outputs.channel }} . - conan upload --confirm --check --remote=${{ inputs.conan_remote_name }} xrpl/${{ steps.conan_ref.outputs.conan_ref }} + conan upload --confirm --check --remote=${{ env.CONAN_REMOTE_NAME }} xrpl/${{ steps.conan_ref.outputs.conan_ref }} outputs: conan_ref: ${{ steps.conan_ref.outputs.conan_ref }} notify: needs: upload runs-on: ubuntu-latest - env: - GH_TOKEN: ${{ secrets.clio_notify_token }} steps: - name: Notify Clio + env: + GH_TOKEN: ${{ secrets.clio_notify_token }} + PR_URL: ${{ github.event.pull_request.html_url }} run: | gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \ /repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \ -F "client_payload[conan_ref]=${{ needs.upload.outputs.conan_ref }}" \ - -F "client_payload[pr_url]=${{ github.event.pull_request.html_url }}" + -F "client_payload[pr_url]=${{ env.PR_URL }}" diff --git a/.github/workflows/reusable-strategy-matrix.yml b/.github/workflows/reusable-strategy-matrix.yml index 20a90fc2e3..e8621527c9 100644 --- a/.github/workflows/reusable-strategy-matrix.yml +++ b/.github/workflows/reusable-strategy-matrix.yml @@ -35,4 +35,7 @@ jobs: - name: Generate strategy matrix working-directory: .github/scripts/strategy-matrix id: generate - run: ./generate.py ${{ inputs.strategy_matrix == 'all' && '--all' || '' }} ${{ inputs.os != '' && format('--config={0}.json', inputs.os) || '' }} >> "${GITHUB_OUTPUT}" + env: + GENERATE_CONFIG: ${{ inputs.os != '' && format('--config={0}.json', inputs.os) || '' }} + GENERATE_OPTION: ${{ inputs.strategy_matrix == 'all' && '--all' || '' }} + run: ./generate.py ${{ env.GENERATE_OPTION }} ${{ env.GENERATE_CONFIG }} >> "${GITHUB_OUTPUT}" diff --git a/.github/workflows/upload-conan-deps.yml b/.github/workflows/upload-conan-deps.yml index 98db52a436..cbae8a4c86 100644 --- a/.github/workflows/upload-conan-deps.yml +++ b/.github/workflows/upload-conan-deps.yml @@ -24,13 +24,10 @@ on: branches: [develop] paths: - .github/workflows/upload-conan-deps.yml - - .github/workflows/reusable-strategy-matrix.yml - - .github/actions/build-deps/action.yml - .github/actions/setup-conan/action.yml - ".github/scripts/strategy-matrix/**" - - conanfile.py - conan.lock @@ -88,4 +85,6 @@ jobs: - name: Upload Conan packages if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' && github.event_name != 'schedule' }} - run: conan upload "*" -r=${{ env.CONAN_REMOTE_NAME }} --confirm ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }} + env: + FORCE_OPTION: ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }} + run: conan upload "*" --remote='${{ env.CONAN_REMOTE_NAME }}' --confirm ${{ env.FORCE_OPTION }} From 8e4fda160d9fb66d736b16c909da7d19fdc7d19b Mon Sep 17 00:00:00 2001 From: yinyiqian1 Date: Tue, 30 Sep 2025 14:49:53 -0400 Subject: [PATCH 214/244] Rename flags for DynamicMPT (#5820) --- include/xrpl/protocol/LedgerFormats.h | 16 +++++++-------- include/xrpl/protocol/TxFlags.h | 16 +++++++-------- .../app/tx/detail/MPTokenIssuanceSet.cpp | 20 +++++++++++-------- 3 files changed, 28 insertions(+), 24 deletions(-) diff --git a/include/xrpl/protocol/LedgerFormats.h b/include/xrpl/protocol/LedgerFormats.h index 40c9fce1bb..ff4653e5c7 100644 --- a/include/xrpl/protocol/LedgerFormats.h +++ b/include/xrpl/protocol/LedgerFormats.h @@ -188,14 +188,14 @@ enum LedgerSpecificFlags { lsfMPTCanTransfer = 0x00000020, lsfMPTCanClawback = 0x00000040, - lmfMPTCanMutateCanLock = 0x00000002, - lmfMPTCanMutateRequireAuth = 0x00000004, - lmfMPTCanMutateCanEscrow = 0x00000008, - lmfMPTCanMutateCanTrade = 0x00000010, - lmfMPTCanMutateCanTransfer = 0x00000020, - lmfMPTCanMutateCanClawback = 0x00000040, - lmfMPTCanMutateMetadata = 0x00010000, - lmfMPTCanMutateTransferFee = 0x00020000, + lsmfMPTCanMutateCanLock = 0x00000002, + lsmfMPTCanMutateRequireAuth = 0x00000004, + lsmfMPTCanMutateCanEscrow = 0x00000008, + lsmfMPTCanMutateCanTrade = 0x00000010, + lsmfMPTCanMutateCanTransfer = 0x00000020, + lsmfMPTCanMutateCanClawback = 0x00000040, + lsmfMPTCanMutateMetadata = 0x00010000, + lsmfMPTCanMutateTransferFee = 0x00020000, // ltMPTOKEN lsfMPTAuthorized = 0x00000002, diff --git a/include/xrpl/protocol/TxFlags.h b/include/xrpl/protocol/TxFlags.h index 30d991e680..dcbc10b927 100644 --- a/include/xrpl/protocol/TxFlags.h +++ b/include/xrpl/protocol/TxFlags.h @@ -156,14 +156,14 @@ constexpr std::uint32_t const tfMPTokenIssuanceCreateMask = // MPTokenIssuanceCreate MutableFlags: // Indicating specific fields or flags may be changed after issuance. -constexpr std::uint32_t const tmfMPTCanMutateCanLock = lmfMPTCanMutateCanLock; -constexpr std::uint32_t const tmfMPTCanMutateRequireAuth = lmfMPTCanMutateRequireAuth; -constexpr std::uint32_t const tmfMPTCanMutateCanEscrow = lmfMPTCanMutateCanEscrow; -constexpr std::uint32_t const tmfMPTCanMutateCanTrade = lmfMPTCanMutateCanTrade; -constexpr std::uint32_t const tmfMPTCanMutateCanTransfer = lmfMPTCanMutateCanTransfer; -constexpr std::uint32_t const tmfMPTCanMutateCanClawback = lmfMPTCanMutateCanClawback; -constexpr std::uint32_t const tmfMPTCanMutateMetadata = lmfMPTCanMutateMetadata; -constexpr std::uint32_t const tmfMPTCanMutateTransferFee = lmfMPTCanMutateTransferFee; +constexpr std::uint32_t const tmfMPTCanMutateCanLock = lsmfMPTCanMutateCanLock; +constexpr std::uint32_t const tmfMPTCanMutateRequireAuth = lsmfMPTCanMutateRequireAuth; +constexpr std::uint32_t const tmfMPTCanMutateCanEscrow = lsmfMPTCanMutateCanEscrow; +constexpr std::uint32_t const tmfMPTCanMutateCanTrade = lsmfMPTCanMutateCanTrade; +constexpr std::uint32_t const tmfMPTCanMutateCanTransfer = lsmfMPTCanMutateCanTransfer; +constexpr std::uint32_t const tmfMPTCanMutateCanClawback = lsmfMPTCanMutateCanClawback; +constexpr std::uint32_t const tmfMPTCanMutateMetadata = lsmfMPTCanMutateMetadata; +constexpr std::uint32_t const tmfMPTCanMutateTransferFee = lsmfMPTCanMutateTransferFee; constexpr std::uint32_t const tmfMPTokenIssuanceCreateMutableMask = ~(tmfMPTCanMutateCanLock | tmfMPTCanMutateRequireAuth | tmfMPTCanMutateCanEscrow | tmfMPTCanMutateCanTrade | tmfMPTCanMutateCanTransfer | tmfMPTCanMutateCanClawback | tmfMPTCanMutateMetadata | tmfMPTCanMutateTransferFee); diff --git a/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp b/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp index c406a8ec5f..6fb87711c8 100644 --- a/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp +++ b/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp @@ -51,14 +51,18 @@ struct MPTMutabilityFlags }; static constexpr std::array mptMutabilityFlags = { - {{tmfMPTSetCanLock, tmfMPTClearCanLock, lmfMPTCanMutateCanLock}, - {tmfMPTSetRequireAuth, tmfMPTClearRequireAuth, lmfMPTCanMutateRequireAuth}, - {tmfMPTSetCanEscrow, tmfMPTClearCanEscrow, lmfMPTCanMutateCanEscrow}, - {tmfMPTSetCanTrade, tmfMPTClearCanTrade, lmfMPTCanMutateCanTrade}, - {tmfMPTSetCanTransfer, tmfMPTClearCanTransfer, lmfMPTCanMutateCanTransfer}, + {{tmfMPTSetCanLock, tmfMPTClearCanLock, lsmfMPTCanMutateCanLock}, + {tmfMPTSetRequireAuth, + tmfMPTClearRequireAuth, + lsmfMPTCanMutateRequireAuth}, + {tmfMPTSetCanEscrow, tmfMPTClearCanEscrow, lsmfMPTCanMutateCanEscrow}, + {tmfMPTSetCanTrade, tmfMPTClearCanTrade, lsmfMPTCanMutateCanTrade}, + {tmfMPTSetCanTransfer, + tmfMPTClearCanTransfer, + lsmfMPTCanMutateCanTransfer}, {tmfMPTSetCanClawback, tmfMPTClearCanClawback, - lmfMPTCanMutateCanClawback}}}; + lsmfMPTCanMutateCanClawback}}}; NotTEC MPTokenIssuanceSet::preflight(PreflightContext const& ctx) @@ -243,7 +247,7 @@ MPTokenIssuanceSet::preclaim(PreclaimContext const& ctx) return tecNO_PERMISSION; } - if (!isMutableFlag(lmfMPTCanMutateMetadata) && + if (!isMutableFlag(lsmfMPTCanMutateMetadata) && ctx.tx.isFieldPresent(sfMPTokenMetadata)) return tecNO_PERMISSION; @@ -256,7 +260,7 @@ MPTokenIssuanceSet::preclaim(PreclaimContext const& ctx) if (fee > 0u && !sleMptIssuance->isFlag(lsfMPTCanTransfer)) return tecNO_PERMISSION; - if (!isMutableFlag(lmfMPTCanMutateTransferFee)) + if (!isMutableFlag(lsmfMPTCanMutateTransferFee)) return tecNO_PERMISSION; } From 70d5c624e8cf732a362335642b2f5125ce4b43c1 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Tue, 30 Sep 2025 16:09:11 -0400 Subject: [PATCH 215/244] Set version to 2.6.1 --- src/libxrpl/protocol/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libxrpl/protocol/BuildInfo.cpp b/src/libxrpl/protocol/BuildInfo.cpp index ed9dd68da1..b54c74c80d 100644 --- a/src/libxrpl/protocol/BuildInfo.cpp +++ b/src/libxrpl/protocol/BuildInfo.cpp @@ -36,7 +36,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "2.6.1-rc2" +char const* const versionString = "2.6.1" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From 330a3215bcc26b579a5b2d1587f9273479d558bb Mon Sep 17 00:00:00 2001 From: Valentin Balaschenko <13349202+vlntb@users.noreply.github.com> Date: Wed, 1 Oct 2025 12:57:33 +0100 Subject: [PATCH 216/244] fix: FD/handle guarding + exponential backoff (#5823) --- include/xrpl/server/detail/Door.h | 119 +++++++++++++++++++++++++++++- 1 file changed, 115 insertions(+), 4 deletions(-) diff --git a/include/xrpl/server/detail/Door.h b/include/xrpl/server/detail/Door.h index 7906af2a52..65bc310d4c 100644 --- a/include/xrpl/server/detail/Door.h +++ b/include/xrpl/server/detail/Door.h @@ -30,15 +30,29 @@ #include #include #include +#include #include +#include #include #include #include #include +#include +#if !BOOST_OS_WINDOWS +#include + +#include +#include +#endif + +#include #include +#include #include #include +#include +#include namespace ripple { @@ -98,10 +112,27 @@ private: boost::asio::strand strand_; bool ssl_; bool plain_; + static constexpr std::chrono::milliseconds INITIAL_ACCEPT_DELAY{50}; + static constexpr std::chrono::milliseconds MAX_ACCEPT_DELAY{2000}; + std::chrono::milliseconds accept_delay_{INITIAL_ACCEPT_DELAY}; + boost::asio::steady_timer backoff_timer_; + static constexpr double FREE_FD_THRESHOLD = 0.70; + + struct FDStats + { + std::uint64_t used{0}; + std::uint64_t limit{0}; + }; void reOpen(); + std::optional + query_fd_stats() const; + + bool + should_throttle_for_fds(); + public: Door( Handler& handler, @@ -299,6 +330,7 @@ Door::Door( , plain_( port_.protocol.count("http") > 0 || port_.protocol.count("ws") > 0 || port_.protocol.count("ws2")) + , backoff_timer_(io_context) { reOpen(); } @@ -323,6 +355,7 @@ Door::close() return boost::asio::post( strand_, std::bind(&Door::close, this->shared_from_this())); + backoff_timer_.cancel(); error_code ec; acceptor_.close(ec); } @@ -368,6 +401,17 @@ Door::do_accept(boost::asio::yield_context do_yield) { while (acceptor_.is_open()) { + if (should_throttle_for_fds()) + { + backoff_timer_.expires_after(accept_delay_); + boost::system::error_code tec; + backoff_timer_.async_wait(do_yield[tec]); + accept_delay_ = std::min(accept_delay_ * 2, MAX_ACCEPT_DELAY); + JLOG(j_.warn()) << "Throttling do_accept for " + << accept_delay_.count() << "ms."; + continue; + } + error_code ec; endpoint_type remote_address; stream_type stream(ioc_); @@ -377,15 +421,28 @@ Door::do_accept(boost::asio::yield_context do_yield) { if (ec == boost::asio::error::operation_aborted) break; - JLOG(j_.error()) << "accept: " << ec.message(); - if (ec == boost::asio::error::no_descriptors) + + if (ec == boost::asio::error::no_descriptors || + ec == boost::asio::error::no_buffer_space) { - JLOG(j_.info()) << "re-opening acceptor"; - reOpen(); + JLOG(j_.warn()) << "accept: Too many open files. Pausing for " + << accept_delay_.count() << "ms."; + + backoff_timer_.expires_after(accept_delay_); + boost::system::error_code tec; + backoff_timer_.async_wait(do_yield[tec]); + + accept_delay_ = std::min(accept_delay_ * 2, MAX_ACCEPT_DELAY); + } + else + { + JLOG(j_.error()) << "accept error: " << ec.message(); } continue; } + accept_delay_ = INITIAL_ACCEPT_DELAY; + if (ssl_ && plain_) { if (auto sp = ios().template emplace( @@ -408,6 +465,60 @@ Door::do_accept(boost::asio::yield_context do_yield) } } +template +std::optional::FDStats> +Door::query_fd_stats() const +{ +#if BOOST_OS_WINDOWS + return std::nullopt; +#else + FDStats s; + struct rlimit rl; + if (getrlimit(RLIMIT_NOFILE, &rl) != 0 || rl.rlim_cur == RLIM_INFINITY) + return std::nullopt; + s.limit = static_cast(rl.rlim_cur); +#if BOOST_OS_LINUX + constexpr char const* kFdDir = "/proc/self/fd"; +#else + constexpr char const* kFdDir = "/dev/fd"; +#endif + if (DIR* d = ::opendir(kFdDir)) + { + std::uint64_t cnt = 0; + while (::readdir(d) != nullptr) + ++cnt; + ::closedir(d); + // readdir counts '.', '..', and the DIR* itself shows in the list + s.used = (cnt >= 3) ? (cnt - 3) : 0; + return s; + } + return std::nullopt; +#endif +} + +template +bool +Door::should_throttle_for_fds() +{ +#if BOOST_OS_WINDOWS + return false; +#else + auto const stats = query_fd_stats(); + if (!stats || stats->limit == 0) + return false; + + auto const& s = *stats; + auto const free = (s.limit > s.used) ? (s.limit - s.used) : 0ull; + double const free_ratio = + static_cast(free) / static_cast(s.limit); + if (free_ratio < FREE_FD_THRESHOLD) + { + return true; + } + return false; +#endif +} + } // namespace ripple #endif From 51ef35ab55598eb2be61d2bcc418f8651a3b3f63 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Wed, 1 Oct 2025 16:58:43 -0400 Subject: [PATCH 217/244] fix: Transaction sig checking functions do not get a full context (#5829) Fixes a (currently harmless) bug introduced by PR #5594 --- src/xrpld/app/tx/detail/Transactor.cpp | 78 +++++++++++++------------- src/xrpld/app/tx/detail/Transactor.h | 19 ++++--- 2 files changed, 52 insertions(+), 45 deletions(-) diff --git a/src/xrpld/app/tx/detail/Transactor.cpp b/src/xrpld/app/tx/detail/Transactor.cpp index 112017ebaf..f6b8b3c9d2 100644 --- a/src/xrpld/app/tx/detail/Transactor.cpp +++ b/src/xrpld/app/tx/detail/Transactor.cpp @@ -660,14 +660,15 @@ Transactor::apply() NotTEC Transactor::checkSign( - PreclaimContext const& ctx, + ReadView const& view, + ApplyFlags flags, AccountID const& idAccount, - STObject const& sigObject) + STObject const& sigObject, + beast::Journal const j) { auto const pkSigner = sigObject.getFieldVL(sfSigningPubKey); // Ignore signature check on batch inner transactions - if (sigObject.isFlag(tfInnerBatchTxn) && - ctx.view.rules().enabled(featureBatch)) + if (sigObject.isFlag(tfInnerBatchTxn) && view.rules().enabled(featureBatch)) { // Defensive Check: These values are also checked in Batch::preflight if (sigObject.isFieldPresent(sfTxnSignature) || !pkSigner.empty() || @@ -678,7 +679,7 @@ Transactor::checkSign( return tesSUCCESS; } - if ((ctx.flags & tapDRY_RUN) && pkSigner.empty() && + if ((flags & tapDRY_RUN) && pkSigner.empty() && !sigObject.isFieldPresent(sfSigners)) { // simulate: skip signature validation when neither SigningPubKey nor @@ -688,9 +689,9 @@ Transactor::checkSign( // If the pk is empty and not simulate or simulate and signers, // then we must be multi-signing. - if (ctx.tx.isFieldPresent(sfSigners)) + if (sigObject.isFieldPresent(sfSigners)) { - return checkMultiSign(ctx, idAccount, sigObject); + return checkMultiSign(view, flags, idAccount, sigObject, j); } // Check Single Sign @@ -699,7 +700,7 @@ Transactor::checkSign( if (!publicKeyType(makeSlice(pkSigner))) { - JLOG(ctx.j.trace()) << "checkSign: signing public key type is unknown"; + JLOG(j.trace()) << "checkSign: signing public key type is unknown"; return tefBAD_AUTH; // FIXME: should be better error! } @@ -707,11 +708,11 @@ Transactor::checkSign( auto const idSigner = pkSigner.empty() ? idAccount : calcAccountID(PublicKey(makeSlice(pkSigner))); - auto const sleAccount = ctx.view.read(keylet::account(idAccount)); + auto const sleAccount = view.read(keylet::account(idAccount)); if (!sleAccount) return terNO_ACCOUNT; - return checkSingleSign(ctx, idSigner, idAccount, sleAccount); + return checkSingleSign(view, idSigner, idAccount, sleAccount, j); } NotTEC @@ -720,7 +721,7 @@ Transactor::checkSign(PreclaimContext const& ctx) auto const idAccount = ctx.tx.isFieldPresent(sfDelegate) ? ctx.tx.getAccountID(sfDelegate) : ctx.tx.getAccountID(sfAccount); - return checkSign(ctx, idAccount, ctx.tx); + return checkSign(ctx.view, ctx.flags, idAccount, ctx.tx, ctx.j); } NotTEC @@ -735,7 +736,8 @@ Transactor::checkBatchSign(PreclaimContext const& ctx) Blob const& pkSigner = signer.getFieldVL(sfSigningPubKey); if (pkSigner.empty()) { - if (ret = checkMultiSign(ctx, idAccount, signer); + if (ret = checkMultiSign( + ctx.view, ctx.flags, idAccount, signer, ctx.j); !isTesSuccess(ret)) return ret; } @@ -759,7 +761,8 @@ Transactor::checkBatchSign(PreclaimContext const& ctx) return tesSUCCESS; } - if (ret = checkSingleSign(ctx, idSigner, idAccount, sleAccount); + if (ret = checkSingleSign( + ctx.view, idSigner, idAccount, sleAccount, ctx.j); !isTesSuccess(ret)) return ret; } @@ -769,14 +772,15 @@ Transactor::checkBatchSign(PreclaimContext const& ctx) NotTEC Transactor::checkSingleSign( - PreclaimContext const& ctx, + ReadView const& view, AccountID const& idSigner, AccountID const& idAccount, - std::shared_ptr sleAccount) + std::shared_ptr sleAccount, + beast::Journal const j) { bool const isMasterDisabled = sleAccount->isFlag(lsfDisableMaster); - if (ctx.view.rules().enabled(fixMasterKeyAsRegularKey)) + if (view.rules().enabled(fixMasterKeyAsRegularKey)) { // Signed with regular key. if ((*sleAccount)[~sfRegularKey] == idSigner) @@ -813,16 +817,14 @@ Transactor::checkSingleSign( else if (sleAccount->isFieldPresent(sfRegularKey)) { // Signing key does not match master or regular key. - JLOG(ctx.j.trace()) - << "checkSingleSign: Not authorized to use account."; + JLOG(j.trace()) << "checkSingleSign: Not authorized to use account."; return tefBAD_AUTH; } else { // No regular key on account and signing key does not match master key. // FIXME: Why differentiate this case from tefBAD_AUTH? - JLOG(ctx.j.trace()) - << "checkSingleSign: Not authorized to use account."; + JLOG(j.trace()) << "checkSingleSign: Not authorized to use account."; return tefBAD_AUTH_MASTER; } @@ -831,17 +833,19 @@ Transactor::checkSingleSign( NotTEC Transactor::checkMultiSign( - PreclaimContext const& ctx, + ReadView const& view, + ApplyFlags flags, AccountID const& id, - STObject const& sigObject) + STObject const& sigObject, + beast::Journal const j) { // Get id's SignerList and Quorum. std::shared_ptr sleAccountSigners = - ctx.view.read(keylet::signers(id)); + view.read(keylet::signers(id)); // If the signer list doesn't exist the account is not multi-signing. if (!sleAccountSigners) { - JLOG(ctx.j.trace()) + JLOG(j.trace()) << "applyTransaction: Invalid: Not a multi-signing account."; return tefNOT_MULTI_SIGNING; } @@ -856,7 +860,7 @@ Transactor::checkMultiSign( "ripple::Transactor::checkMultiSign : signer list ID is 0"); auto accountSigners = - SignerEntries::deserialize(*sleAccountSigners, ctx.j, "ledger"); + SignerEntries::deserialize(*sleAccountSigners, j, "ledger"); if (!accountSigners) return accountSigners.error(); @@ -880,7 +884,7 @@ Transactor::checkMultiSign( { if (++iter == accountSigners->end()) { - JLOG(ctx.j.trace()) + JLOG(j.trace()) << "applyTransaction: Invalid SigningAccount.Account."; return tefBAD_SIGNATURE; } @@ -888,7 +892,7 @@ Transactor::checkMultiSign( if (iter->account != txSignerAcctID) { // The SigningAccount is not in the SignerEntries. - JLOG(ctx.j.trace()) + JLOG(j.trace()) << "applyTransaction: Invalid SigningAccount.Account."; return tefBAD_SIGNATURE; } @@ -902,13 +906,13 @@ Transactor::checkMultiSign( // STTx::checkMultiSign if (!spk.empty() && !publicKeyType(makeSlice(spk))) { - JLOG(ctx.j.trace()) + JLOG(j.trace()) << "checkMultiSign: signing public key type is unknown"; return tefBAD_SIGNATURE; } XRPL_ASSERT( - (ctx.flags & tapDRY_RUN) || !spk.empty(), + (flags & tapDRY_RUN) || !spk.empty(), "ripple::Transactor::checkMultiSign : non-empty signer or " "simulation"); AccountID const signingAcctIDFromPubKey = spk.empty() @@ -940,8 +944,7 @@ Transactor::checkMultiSign( // In any of these cases we need to know whether the account is in // the ledger. Determine that now. - auto const sleTxSignerRoot = - ctx.view.read(keylet::account(txSignerAcctID)); + auto const sleTxSignerRoot = view.read(keylet::account(txSignerAcctID)); if (signingAcctIDFromPubKey == txSignerAcctID) { @@ -954,7 +957,7 @@ Transactor::checkMultiSign( if (signerAccountFlags & lsfDisableMaster) { - JLOG(ctx.j.trace()) + JLOG(j.trace()) << "applyTransaction: Signer:Account lsfDisableMaster."; return tefMASTER_DISABLED; } @@ -966,21 +969,21 @@ Transactor::checkMultiSign( // Public key must hash to the account's regular key. if (!sleTxSignerRoot) { - JLOG(ctx.j.trace()) << "applyTransaction: Non-phantom signer " - "lacks account root."; + JLOG(j.trace()) << "applyTransaction: Non-phantom signer " + "lacks account root."; return tefBAD_SIGNATURE; } if (!sleTxSignerRoot->isFieldPresent(sfRegularKey)) { - JLOG(ctx.j.trace()) + JLOG(j.trace()) << "applyTransaction: Account lacks RegularKey."; return tefBAD_SIGNATURE; } if (signingAcctIDFromPubKey != sleTxSignerRoot->getAccountID(sfRegularKey)) { - JLOG(ctx.j.trace()) + JLOG(j.trace()) << "applyTransaction: Account doesn't match RegularKey."; return tefBAD_SIGNATURE; } @@ -992,8 +995,7 @@ Transactor::checkMultiSign( // Cannot perform transaction if quorum is not met. if (weightSum < sleAccountSigners->getFieldU32(sfSignerQuorum)) { - JLOG(ctx.j.trace()) - << "applyTransaction: Signers failed to meet quorum."; + JLOG(j.trace()) << "applyTransaction: Signers failed to meet quorum."; return tefBAD_QUORUM; } diff --git a/src/xrpld/app/tx/detail/Transactor.h b/src/xrpld/app/tx/detail/Transactor.h index 429dcec6fc..17ef62e607 100644 --- a/src/xrpld/app/tx/detail/Transactor.h +++ b/src/xrpld/app/tx/detail/Transactor.h @@ -283,9 +283,11 @@ protected: static NotTEC checkSign( - PreclaimContext const& ctx, - AccountID const& id, - STObject const& sigObject); + ReadView const& view, + ApplyFlags flags, + AccountID const& idAccount, + STObject const& sigObject, + beast::Journal const j); // Base class always returns true static bool @@ -323,15 +325,18 @@ private: payFee(); static NotTEC checkSingleSign( - PreclaimContext const& ctx, + ReadView const& view, AccountID const& idSigner, AccountID const& idAccount, - std::shared_ptr sleAccount); + std::shared_ptr sleAccount, + beast::Journal const j); static NotTEC checkMultiSign( - PreclaimContext const& ctx, + ReadView const& view, + ApplyFlags flags, AccountID const& id, - STObject const& sigObject); + STObject const& sigObject, + beast::Journal const j); void trapTransaction(uint256) const; From 5d79bfc5317093b00b787dfd5e50ce3bee1de0ca Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Thu, 2 Oct 2025 11:54:09 +0100 Subject: [PATCH 218/244] Remove bogus coverage warning (#5838) --- src/libxrpl/ledger/View.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libxrpl/ledger/View.cpp b/src/libxrpl/ledger/View.cpp index 3e27741c2f..10b7e81b4f 100644 --- a/src/libxrpl/ledger/View.cpp +++ b/src/libxrpl/ledger/View.cpp @@ -3126,7 +3126,7 @@ rippleUnlockEscrowMPT( { // LCOV_EXCL_START JLOG(j.error()) << "rippleUnlockEscrowMPT: MPToken not found for " << receiver; - return tecOBJECT_NOT_FOUND; // LCOV_EXCL_LINE + return tecOBJECT_NOT_FOUND; } // LCOV_EXCL_STOP auto current = sle->getFieldU64(sfMPTAmount); From 2df730438db77df3c9d40a0e50ce78f62f532568 Mon Sep 17 00:00:00 2001 From: Bart Date: Tue, 7 Oct 2025 16:28:19 -0400 Subject: [PATCH 219/244] Set version to 3.0.0-b1 (#5859) --- src/libxrpl/protocol/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libxrpl/protocol/BuildInfo.cpp b/src/libxrpl/protocol/BuildInfo.cpp index b54c74c80d..0cee2995db 100644 --- a/src/libxrpl/protocol/BuildInfo.cpp +++ b/src/libxrpl/protocol/BuildInfo.cpp @@ -36,7 +36,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "2.6.1" +char const* const versionString = "3.0.0-b1" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From 176fd2b6e4486198b463d9f9b87375d069b9b7cc Mon Sep 17 00:00:00 2001 From: Mayukha Vadari Date: Wed, 8 Oct 2025 04:25:51 -0400 Subject: [PATCH 220/244] chore: exclude all `UNREACHABLE` blocks from codecov (#5846) --- include/xrpl/basics/IntrusivePointer.ipp | 2 ++ include/xrpl/beast/net/IPAddress.h | 4 ++++ include/xrpl/ledger/ApplyView.h | 2 ++ include/xrpl/protocol/detail/b58_utils.h | 2 ++ include/xrpl/resource/detail/Logic.h | 2 ++ src/libxrpl/basics/Log.cpp | 8 +++++++ src/libxrpl/basics/contract.cpp | 2 ++ src/libxrpl/json/Object.cpp | 4 ++-- src/libxrpl/json/json_value.cpp | 24 +++++++++++++++++++ src/libxrpl/ledger/ApplyStateTable.cpp | 2 ++ src/libxrpl/ledger/ApplyView.cpp | 2 ++ src/libxrpl/ledger/BookDirs.cpp | 4 ++++ src/libxrpl/ledger/View.cpp | 12 ++++++---- src/libxrpl/protocol/STBase.cpp | 2 ++ src/libxrpl/protocol/TxMeta.cpp | 2 ++ src/xrpld/app/ledger/Ledger.cpp | 4 ++++ src/xrpld/app/ledger/detail/InboundLedger.cpp | 4 ++++ src/xrpld/app/ledger/detail/LedgerMaster.cpp | 2 ++ src/xrpld/app/main/Application.cpp | 8 +++++++ src/xrpld/app/misc/NetworkOPs.cpp | 10 ++++++++ src/xrpld/app/misc/detail/ValidatorList.cpp | 14 ++++++----- src/xrpld/app/paths/Pathfinder.cpp | 2 ++ src/xrpld/app/paths/detail/BookStep.cpp | 6 +++++ src/xrpld/app/paths/detail/DirectStep.cpp | 2 ++ src/xrpld/app/paths/detail/FlowDebugInfo.h | 2 ++ src/xrpld/app/paths/detail/PaySteps.cpp | 6 +++++ src/xrpld/app/paths/detail/StrandFlow.h | 8 +++++++ src/xrpld/app/rdb/RelationalDatabase.h | 4 +++- src/xrpld/app/rdb/backend/detail/Node.cpp | 6 +++++ src/xrpld/app/tx/detail/Change.cpp | 2 ++ src/xrpld/app/tx/detail/DeleteAccount.cpp | 2 ++ src/xrpld/app/tx/detail/Offer.h | 2 ++ src/xrpld/app/tx/detail/OfferStream.cpp | 2 ++ src/xrpld/app/tx/detail/SetSignerList.cpp | 2 ++ src/xrpld/app/tx/detail/Transactor.cpp | 2 ++ src/xrpld/app/tx/detail/XChainBridge.cpp | 2 ++ src/xrpld/app/tx/detail/applySteps.cpp | 8 +++++++ src/xrpld/nodestore/backend/NuDBFactory.cpp | 2 ++ .../nodestore/backend/RocksDBFactory.cpp | 2 ++ src/xrpld/overlay/Compression.h | 4 ++++ src/xrpld/overlay/detail/PeerImp.cpp | 6 +++++ src/xrpld/peerfinder/detail/Counts.h | 2 ++ src/xrpld/peerfinder/detail/Logic.h | 2 ++ src/xrpld/perflog/detail/PerfLogImp.cpp | 18 ++++++++++++++ src/xrpld/rpc/detail/Handler.cpp | 2 ++ src/xrpld/rpc/detail/Status.cpp | 2 ++ src/xrpld/rpc/handlers/AccountChannels.cpp | 2 ++ src/xrpld/rpc/handlers/AccountLines.cpp | 2 ++ src/xrpld/rpc/handlers/AccountOffers.cpp | 2 ++ src/xrpld/rpc/handlers/AccountTx.cpp | 4 ++++ src/xrpld/rpc/handlers/Fee1.cpp | 3 +++ src/xrpld/shamap/detail/SHAMap.cpp | 4 ++++ src/xrpld/shamap/detail/SHAMapDelta.cpp | 6 +++++ 53 files changed, 224 insertions(+), 13 deletions(-) diff --git a/include/xrpl/basics/IntrusivePointer.ipp b/include/xrpl/basics/IntrusivePointer.ipp index 1ac3f2bab4..4d037bc329 100644 --- a/include/xrpl/basics/IntrusivePointer.ipp +++ b/include/xrpl/basics/IntrusivePointer.ipp @@ -654,12 +654,14 @@ SharedWeakUnion::convertToWeak() break; case destroy: // We just added a weak ref. How could we destroy? + // LCOV_EXCL_START UNREACHABLE( "ripple::SharedWeakUnion::convertToWeak : destroying freshly " "added ref"); delete p; unsafeSetRawPtr(nullptr); return true; // Should never happen + // LCOV_EXCL_STOP case partialDestroy: // This is a weird case. We just converted the last strong // pointer to a weak pointer. diff --git a/include/xrpl/beast/net/IPAddress.h b/include/xrpl/beast/net/IPAddress.h index fb5dac90ec..f3c7387bb8 100644 --- a/include/xrpl/beast/net/IPAddress.h +++ b/include/xrpl/beast/net/IPAddress.h @@ -94,7 +94,11 @@ hash_append(Hasher& h, beast::IP::Address const& addr) noexcept else if (addr.is_v6()) hash_append(h, addr.to_v6().to_bytes()); else + { + // LCOV_EXCL_START UNREACHABLE("beast::hash_append : invalid address type"); + // LCOV_EXCL_STOP + } } } // namespace beast diff --git a/include/xrpl/ledger/ApplyView.h b/include/xrpl/ledger/ApplyView.h index d8b9028d7c..f90033966a 100644 --- a/include/xrpl/ledger/ApplyView.h +++ b/include/xrpl/ledger/ApplyView.h @@ -284,12 +284,14 @@ public: { if (key.type != ltOFFER) { + // LCOV_EXCL_START UNREACHABLE( "ripple::ApplyView::dirAppend : only Offers are appended to " "book directories"); // Only Offers are appended to book directories. Call dirInsert() // instead return std::nullopt; + // LCOV_EXCL_STOP } return dirAdd(true, directory, key.key, describe); } diff --git a/include/xrpl/protocol/detail/b58_utils.h b/include/xrpl/protocol/detail/b58_utils.h index ecd301524f..3908822661 100644 --- a/include/xrpl/protocol/detail/b58_utils.h +++ b/include/xrpl/protocol/detail/b58_utils.h @@ -129,10 +129,12 @@ inplace_bigint_div_rem(std::span numerator, std::uint64_t divisor) { // should never happen, but if it does then it seems natural to define // the a null set of numbers to be zero, so the remainder is also zero. + // LCOV_EXCL_START UNREACHABLE( "ripple::b58_fast::detail::inplace_bigint_div_rem : empty " "numerator"); return 0; + // LCOV_EXCL_STOP } auto to_u128 = [](std::uint64_t high, diff --git a/include/xrpl/resource/detail/Logic.h b/include/xrpl/resource/detail/Logic.h index b07ee00e73..0fc5a9035a 100644 --- a/include/xrpl/resource/detail/Logic.h +++ b/include/xrpl/resource/detail/Logic.h @@ -436,10 +436,12 @@ public: admin_.erase(admin_.iterator_to(entry)); break; default: + // LCOV_EXCL_START UNREACHABLE( "ripple::Resource::Logic::release : invalid entry " "kind"); break; + // LCOV_EXCL_STOP } inactive_.push_back(entry); entry.whenExpires = m_clock.now() + secondsUntilExpiration; diff --git a/src/libxrpl/basics/Log.cpp b/src/libxrpl/basics/Log.cpp index 14873a3fd7..95419dda20 100644 --- a/src/libxrpl/basics/Log.cpp +++ b/src/libxrpl/basics/Log.cpp @@ -239,9 +239,11 @@ Logs::fromSeverity(beast::severities::Severity level) case kError: return lsERROR; + // LCOV_EXCL_START default: UNREACHABLE("ripple::Logs::fromSeverity : invalid severity"); [[fallthrough]]; + // LCOV_EXCL_STOP case kFatal: break; } @@ -265,9 +267,11 @@ Logs::toSeverity(LogSeverity level) return kWarning; case lsERROR: return kError; + // LCOV_EXCL_START default: UNREACHABLE("ripple::Logs::toSeverity : invalid severity"); [[fallthrough]]; + // LCOV_EXCL_STOP case lsFATAL: break; } @@ -292,9 +296,11 @@ Logs::toString(LogSeverity s) return "Error"; case lsFATAL: return "Fatal"; + // LCOV_EXCL_START default: UNREACHABLE("ripple::Logs::toString : invalid severity"); return "Unknown"; + // LCOV_EXCL_STOP } } @@ -356,9 +362,11 @@ Logs::format( case kError: output += "ERR "; break; + // LCOV_EXCL_START default: UNREACHABLE("ripple::Logs::format : invalid severity"); [[fallthrough]]; + // LCOV_EXCL_STOP case kFatal: output += "FTL "; break; diff --git a/src/libxrpl/basics/contract.cpp b/src/libxrpl/basics/contract.cpp index b5a7b3f368..ea75929be0 100644 --- a/src/libxrpl/basics/contract.cpp +++ b/src/libxrpl/basics/contract.cpp @@ -36,6 +36,7 @@ LogThrow(std::string const& title) [[noreturn]] void LogicError(std::string const& s) noexcept { + // LCOV_EXCL_START JLOG(debugLog().fatal()) << s; std::cerr << "Logic error: " << s << std::endl; // Use a non-standard contract naming here (without namespace) because @@ -45,6 +46,7 @@ LogicError(std::string const& s) noexcept // For the above reasons, we want this contract to stand out. UNREACHABLE("LogicError", {{"message", s}}); std::abort(); + // LCOV_EXCL_STOP } } // namespace ripple diff --git a/src/libxrpl/json/Object.cpp b/src/libxrpl/json/Object.cpp index 62f686e228..55e573c0db 100644 --- a/src/libxrpl/json/Object.cpp +++ b/src/libxrpl/json/Object.cpp @@ -174,7 +174,7 @@ Array::append(Json::Value const& v) return; } } - UNREACHABLE("Json::Array::append : invalid type"); + UNREACHABLE("Json::Array::append : invalid type"); // LCOV_EXCL_LINE } void @@ -209,7 +209,7 @@ Object::set(std::string const& k, Json::Value const& v) return; } } - UNREACHABLE("Json::Object::set : invalid type"); + UNREACHABLE("Json::Object::set : invalid type"); // LCOV_EXCL_LINE } //------------------------------------------------------------------------------ diff --git a/src/libxrpl/json/json_value.cpp b/src/libxrpl/json/json_value.cpp index 1df8f6cf31..7e4d8b6d81 100644 --- a/src/libxrpl/json/json_value.cpp +++ b/src/libxrpl/json/json_value.cpp @@ -213,8 +213,10 @@ Value::Value(ValueType type) : type_(type), allocated_(0) value_.bool_ = false; break; + // LCOV_EXCL_START default: UNREACHABLE("Json::Value::Value(ValueType) : invalid type"); + // LCOV_EXCL_STOP } } @@ -290,8 +292,10 @@ Value::Value(Value const& other) : type_(other.type_) value_.map_ = new ObjectValues(*other.value_.map_); break; + // LCOV_EXCL_START default: UNREACHABLE("Json::Value::Value(Value const&) : invalid type"); + // LCOV_EXCL_STOP } } @@ -318,8 +322,10 @@ Value::~Value() delete value_.map_; break; + // LCOV_EXCL_START default: UNREACHABLE("Json::Value::~Value : invalid type"); + // LCOV_EXCL_STOP } } @@ -419,8 +425,10 @@ operator<(Value const& x, Value const& y) return *x.value_.map_ < *y.value_.map_; } + // LCOV_EXCL_START default: UNREACHABLE("Json::operator<(Value, Value) : invalid type"); + // LCOV_EXCL_STOP } return 0; // unreachable @@ -465,8 +473,10 @@ operator==(Value const& x, Value const& y) return x.value_.map_->size() == y.value_.map_->size() && *x.value_.map_ == *y.value_.map_; + // LCOV_EXCL_START default: UNREACHABLE("Json::operator==(Value, Value) : invalid type"); + // LCOV_EXCL_STOP } return 0; // unreachable @@ -506,8 +516,10 @@ Value::asString() const case objectValue: JSON_ASSERT_MESSAGE(false, "Type is not convertible to string"); + // LCOV_EXCL_START default: UNREACHABLE("Json::Value::asString : invalid type"); + // LCOV_EXCL_STOP } return ""; // unreachable @@ -548,8 +560,10 @@ Value::asInt() const case objectValue: JSON_ASSERT_MESSAGE(false, "Type is not convertible to int"); + // LCOV_EXCL_START default: UNREACHABLE("Json::Value::asInt : invalid type"); + // LCOV_EXCL_STOP } return 0; // unreachable; @@ -590,8 +604,10 @@ Value::asUInt() const case objectValue: JSON_ASSERT_MESSAGE(false, "Type is not convertible to uint"); + // LCOV_EXCL_START default: UNREACHABLE("Json::Value::asUInt : invalid type"); + // LCOV_EXCL_STOP } return 0; // unreachable; @@ -622,8 +638,10 @@ Value::asDouble() const case objectValue: JSON_ASSERT_MESSAGE(false, "Type is not convertible to double"); + // LCOV_EXCL_START default: UNREACHABLE("Json::Value::asDouble : invalid type"); + // LCOV_EXCL_STOP } return 0; // unreachable; @@ -654,8 +672,10 @@ Value::asBool() const case objectValue: return value_.map_->size() != 0; + // LCOV_EXCL_START default: UNREACHABLE("Json::Value::asBool : invalid type"); + // LCOV_EXCL_STOP } return false; // unreachable; @@ -710,8 +730,10 @@ Value::isConvertibleTo(ValueType other) const return other == objectValue || (other == nullValue && value_.map_->size() == 0); + // LCOV_EXCL_START default: UNREACHABLE("Json::Value::isConvertible : invalid type"); + // LCOV_EXCL_STOP } return false; // unreachable; @@ -744,8 +766,10 @@ Value::size() const case objectValue: return Int(value_.map_->size()); + // LCOV_EXCL_START default: UNREACHABLE("Json::Value::size : invalid type"); + // LCOV_EXCL_STOP } return 0; // unreachable; diff --git a/src/libxrpl/ledger/ApplyStateTable.cpp b/src/libxrpl/ledger/ApplyStateTable.cpp index 7b041939d4..aaad056c58 100644 --- a/src/libxrpl/ledger/ApplyStateTable.cpp +++ b/src/libxrpl/ledger/ApplyStateTable.cpp @@ -259,9 +259,11 @@ ApplyStateTable::apply( } else { + // LCOV_EXCL_START UNREACHABLE( "ripple::detail::ApplyStateTable::apply : unsupported " "operation type"); + // LCOV_EXCL_STOP } } diff --git a/src/libxrpl/ledger/ApplyView.cpp b/src/libxrpl/ledger/ApplyView.cpp index 8a0fd51bd8..b2a24f4582 100644 --- a/src/libxrpl/ledger/ApplyView.cpp +++ b/src/libxrpl/ledger/ApplyView.cpp @@ -133,8 +133,10 @@ ApplyView::emptyDirDelete(Keylet const& directory) if (directory.type != ltDIR_NODE || node->getFieldH256(sfRootIndex) != directory.key) { + // LCOV_EXCL_START UNREACHABLE("ripple::ApplyView::emptyDirDelete : invalid node type"); return false; + // LCOV_EXCL_STOP } // The directory still contains entries and so it cannot be removed diff --git a/src/libxrpl/ledger/BookDirs.cpp b/src/libxrpl/ledger/BookDirs.cpp index f777d23aca..61ec160b1b 100644 --- a/src/libxrpl/ledger/BookDirs.cpp +++ b/src/libxrpl/ledger/BookDirs.cpp @@ -36,7 +36,9 @@ BookDirs::BookDirs(ReadView const& view, Book const& book) { if (!cdirFirst(*view_, key_, sle_, entry_, index_)) { + // LCOV_EXCL_START UNREACHABLE("ripple::BookDirs::BookDirs : directory is empty"); + // LCOV_EXCL_STOP } } } @@ -110,9 +112,11 @@ BookDirs::const_iterator::operator++() } else if (!cdirFirst(*view_, cur_key_, sle_, entry_, index_)) { + // LCOV_EXCL_START UNREACHABLE( "ripple::BookDirs::const_iterator::operator++ : directory is " "empty"); + // LCOV_EXCL_STOP } } diff --git a/src/libxrpl/ledger/View.cpp b/src/libxrpl/ledger/View.cpp index 10b7e81b4f..02de872ad3 100644 --- a/src/libxrpl/ledger/View.cpp +++ b/src/libxrpl/ledger/View.cpp @@ -324,10 +324,12 @@ isVaultPseudoAccountFrozen( auto const issuer = mptIssuance->getAccountID(sfIssuer); auto const mptIssuer = view.read(keylet::account(issuer)); if (mptIssuer == nullptr) - { // LCOV_EXCL_START + { + // LCOV_EXCL_START UNREACHABLE("ripple::isVaultPseudoAccountFrozen : null MPToken issuer"); return false; - } // LCOV_EXCL_STOP + // LCOV_EXCL_STOP + } if (!mptIssuer->isFieldPresent(sfVaultID)) return false; // not a Vault pseudo-account, common case @@ -338,7 +340,8 @@ isVaultPseudoAccountFrozen( { // LCOV_EXCL_START UNREACHABLE("ripple::isVaultPseudoAccountFrozen : null vault"); return false; - } // LCOV_EXCL_STOP + // LCOV_EXCL_STOP + } return isAnyFrozen(view, {issuer, account}, vault->at(sfAsset), depth + 1); } @@ -2676,7 +2679,8 @@ enforceMPTokenAuthorization( UNREACHABLE( "ripple::enforceMPTokenAuthorization : condition list is incomplete"); return tefINTERNAL; -} // LCOV_EXCL_STOP + // LCOV_EXCL_STOP +} TER canTransfer( diff --git a/src/libxrpl/protocol/STBase.cpp b/src/libxrpl/protocol/STBase.cpp index 417b7e2302..ecbe6833d0 100644 --- a/src/libxrpl/protocol/STBase.cpp +++ b/src/libxrpl/protocol/STBase.cpp @@ -112,7 +112,9 @@ void STBase::add(Serializer& s) const { // Should never be called + // LCOV_EXCL_START UNREACHABLE("ripple::STBase::add : not implemented"); + // LCOV_EXCL_STOP } bool diff --git a/src/libxrpl/protocol/TxMeta.cpp b/src/libxrpl/protocol/TxMeta.cpp index 2343a6a794..833a0677b9 100644 --- a/src/libxrpl/protocol/TxMeta.cpp +++ b/src/libxrpl/protocol/TxMeta.cpp @@ -238,9 +238,11 @@ TxMeta::getAffectedNode(uint256 const& node) if (n.getFieldH256(sfLedgerIndex) == node) return n; } + // LCOV_EXCL_START UNREACHABLE("ripple::TxMeta::getAffectedNode(uint256) : node not found"); Throw("Affected node not found"); return *(mNodes.begin()); // Silence compiler warning. + // LCOV_EXCL_STOP } STObject diff --git a/src/xrpld/app/ledger/Ledger.cpp b/src/xrpld/app/ledger/Ledger.cpp index 6de4f2cbde..64a6b16cbc 100644 --- a/src/xrpld/app/ledger/Ledger.cpp +++ b/src/xrpld/app/ledger/Ledger.cpp @@ -433,8 +433,10 @@ Ledger::read(Keylet const& k) const { if (k.key == beast::zero) { + // LCOV_EXCL_START UNREACHABLE("ripple::Ledger::read : zero key"); return nullptr; + // LCOV_EXCL_STOP } auto const& item = stateMap_.peekItem(k.key); if (!item) @@ -860,6 +862,7 @@ Ledger::assertSensible(beast::Journal ledgerJ) const return true; } + // LCOV_EXCL_START Json::Value j = getJson({*this, {}}); j[jss::accountTreeHash] = to_string(info_.accountHash); @@ -870,6 +873,7 @@ Ledger::assertSensible(beast::Journal ledgerJ) const UNREACHABLE("ripple::Ledger::assertSensible : ledger is not sensible"); return false; + // LCOV_EXCL_STOP } // update the skip list with the information from our previous ledger diff --git a/src/xrpld/app/ledger/detail/InboundLedger.cpp b/src/xrpld/app/ledger/detail/InboundLedger.cpp index eafa939506..47d546c3af 100644 --- a/src/xrpld/app/ledger/detail/InboundLedger.cpp +++ b/src/xrpld/app/ledger/detail/InboundLedger.cpp @@ -963,8 +963,10 @@ InboundLedger::takeAsRootNode(Slice const& data, SHAMapAddNode& san) if (!mHaveHeader) { + // LCOV_EXCL_START UNREACHABLE("ripple::InboundLedger::takeAsRootNode : no ledger header"); return false; + // LCOV_EXCL_STOP } AccountStateSF filter( @@ -988,8 +990,10 @@ InboundLedger::takeTxRootNode(Slice const& data, SHAMapAddNode& san) if (!mHaveHeader) { + // LCOV_EXCL_START UNREACHABLE("ripple::InboundLedger::takeTxRootNode : no ledger header"); return false; + // LCOV_EXCL_STOP } TransactionStateSF filter( diff --git a/src/xrpld/app/ledger/detail/LedgerMaster.cpp b/src/xrpld/app/ledger/detail/LedgerMaster.cpp index 78f0375b16..f39e8af0b6 100644 --- a/src/xrpld/app/ledger/detail/LedgerMaster.cpp +++ b/src/xrpld/app/ledger/detail/LedgerMaster.cpp @@ -1273,11 +1273,13 @@ LedgerMaster::findNewLedgersToPublish( } else if (hash->isZero()) { + // LCOV_EXCL_START JLOG(m_journal.fatal()) << "Ledger: " << valSeq << " does not have hash for " << seq; UNREACHABLE( "ripple::LedgerMaster::findNewLedgersToPublish : ledger " "not found"); + // LCOV_EXCL_STOP } else { diff --git a/src/xrpld/app/main/Application.cpp b/src/xrpld/app/main/Application.cpp index 05b8f5e5fa..616afc957d 100644 --- a/src/xrpld/app/main/Application.cpp +++ b/src/xrpld/app/main/Application.cpp @@ -1994,11 +1994,13 @@ ApplicationImp::loadOldLedger( if (!loadLedger) { + // LCOV_EXCL_START JLOG(m_journal.fatal()) << "Replay ledger missing/damaged"; UNREACHABLE( "ripple::ApplicationImp::loadOldLedger : replay ledger " "missing/damaged"); return false; + // LCOV_EXCL_STOP } } } @@ -2025,28 +2027,34 @@ ApplicationImp::loadOldLedger( if (loadLedger->info().accountHash.isZero()) { + // LCOV_EXCL_START JLOG(m_journal.fatal()) << "Ledger is empty."; UNREACHABLE( "ripple::ApplicationImp::loadOldLedger : ledger is empty"); return false; + // LCOV_EXCL_STOP } if (!loadLedger->walkLedger(journal("Ledger"), true)) { + // LCOV_EXCL_START JLOG(m_journal.fatal()) << "Ledger is missing nodes."; UNREACHABLE( "ripple::ApplicationImp::loadOldLedger : ledger is missing " "nodes"); return false; + // LCOV_EXCL_STOP } if (!loadLedger->assertSensible(journal("Ledger"))) { + // LCOV_EXCL_START JLOG(m_journal.fatal()) << "Ledger is not sensible."; UNREACHABLE( "ripple::ApplicationImp::loadOldLedger : ledger is not " "sensible"); return false; + // LCOV_EXCL_STOP } m_ledgerMaster->setLedgerRangePresent( diff --git a/src/xrpld/app/misc/NetworkOPs.cpp b/src/xrpld/app/misc/NetworkOPs.cpp index b9069442f8..e72b2732d0 100644 --- a/src/xrpld/app/misc/NetworkOPs.cpp +++ b/src/xrpld/app/misc/NetworkOPs.cpp @@ -1798,11 +1798,13 @@ NetworkOPsImp::getOwnerInfo( case ltACCOUNT_ROOT: case ltDIR_NODE: + // LCOV_EXCL_START default: UNREACHABLE( "ripple::NetworkOPsImp::getOwnerInfo : invalid " "type"); break; + // LCOV_EXCL_STOP } } @@ -3831,12 +3833,14 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo) accountId, minLedger, maxLedger, marker, 0, true}; return db->newestAccountTxPage(options); } + // LCOV_EXCL_START default: { UNREACHABLE( "ripple::NetworkOPsImp::addAccountHistoryJob::" "getMoreTxns : invalid database type"); return {}; } + // LCOV_EXCL_STOP } }; @@ -4030,10 +4034,12 @@ NetworkOPsImp::subAccountHistoryStart( } else { + // LCOV_EXCL_START UNREACHABLE( "ripple::NetworkOPsImp::subAccountHistoryStart : failed to " "access genesis account"); return; + // LCOV_EXCL_STOP } } subInfo.index_->historyLastLedgerSeq_ = ledger->seq(); @@ -4140,7 +4146,11 @@ NetworkOPsImp::subBook(InfoSub::ref isrListener, Book const& book) if (auto listeners = app_.getOrderBookDB().makeBookListeners(book)) listeners->addSubscriber(isrListener); else + { + // LCOV_EXCL_START UNREACHABLE("ripple::NetworkOPsImp::subBook : null book listeners"); + // LCOV_EXCL_STOP + } return true; } diff --git a/src/xrpld/app/misc/detail/ValidatorList.cpp b/src/xrpld/app/misc/detail/ValidatorList.cpp index 2b45cec3be..92095b7211 100644 --- a/src/xrpld/app/misc/detail/ValidatorList.cpp +++ b/src/xrpld/app/misc/detail/ValidatorList.cpp @@ -1167,15 +1167,17 @@ ValidatorList::applyList( } if (!publicKeyType(*pubKeyOpt)) - { // LCOV_EXCL_START - // This is an impossible situation because we will never load an - // invalid public key type (see checks in `ValidatorList::load`) however - // we can only arrive here if the key used by the manifest matched one of - // the loaded keys + { + // This is an impossible situation because we will never load an + // invalid public key type (see checks in `ValidatorList::load`) however + // we can only arrive here if the key used by the manifest matched one + // of the loaded keys + // LCOV_EXCL_START UNREACHABLE( "ripple::ValidatorList::applyList : invalid public key type"); return PublisherListStats{result}; - } // LCOV_EXCL_STOP + // LCOV_EXCL_STOP + } PublicKey pubKey = *pubKeyOpt; if (result > ListDisposition::pending) diff --git a/src/xrpld/app/paths/Pathfinder.cpp b/src/xrpld/app/paths/Pathfinder.cpp index 41a3697888..4bc9304853 100644 --- a/src/xrpld/app/paths/Pathfinder.cpp +++ b/src/xrpld/app/paths/Pathfinder.cpp @@ -648,8 +648,10 @@ Pathfinder::getBestPaths( if (path.empty()) { + // LCOV_EXCL_START UNREACHABLE("ripple::Pathfinder::getBestPaths : path not found"); continue; + // LCOV_EXCL_STOP } bool startsWithIssuer = false; diff --git a/src/xrpld/app/paths/detail/BookStep.cpp b/src/xrpld/app/paths/detail/BookStep.cpp index 97cf87c046..54d0d8d0c9 100644 --- a/src/xrpld/app/paths/detail/BookStep.cpp +++ b/src/xrpld/app/paths/detail/BookStep.cpp @@ -1113,11 +1113,13 @@ BookStep::revImp( { case -1: { // something went very wrong + // LCOV_EXCL_START JLOG(j_.error()) << "BookStep remainingOut < 0 " << to_string(remainingOut); UNREACHABLE("ripple::BookStep::revImp : remaining less than zero"); cache_.emplace(beast::zero, beast::zero); return {beast::zero, beast::zero}; + // LCOV_EXCL_STOP } case 0: { // due to normalization, remainingOut can be zero without @@ -1283,12 +1285,14 @@ BookStep::fwdImp( switch (remainingIn.signum()) { case -1: { + // LCOV_EXCL_START // something went very wrong JLOG(j_.error()) << "BookStep remainingIn < 0 " << to_string(remainingIn); UNREACHABLE("ripple::BookStep::fwdImp : remaining less than zero"); cache_.emplace(beast::zero, beast::zero); return {beast::zero, beast::zero}; + // LCOV_EXCL_STOP } case 0: { // due to normalization, remainingIn can be zero without @@ -1421,8 +1425,10 @@ bookStepEqual(Step const& step, ripple::Book const& book) bool const outXRP = isXRP(book.out.currency); if (inXRP && outXRP) { + // LCOV_EXCL_START UNREACHABLE("ripple::test::bookStepEqual : no XRP to XRP book step"); return false; // no such thing as xrp/xrp book step + // LCOV_EXCL_STOP } if (inXRP && !outXRP) return equalHelper< diff --git a/src/xrpld/app/paths/detail/DirectStep.cpp b/src/xrpld/app/paths/detail/DirectStep.cpp index 03d207e008..a0808985b5 100644 --- a/src/xrpld/app/paths/detail/DirectStep.cpp +++ b/src/xrpld/app/paths/detail/DirectStep.cpp @@ -931,10 +931,12 @@ DirectStepI::check(StrandContext const& ctx) const { if (!ctx.prevStep) { + // LCOV_EXCL_START UNREACHABLE( "ripple::DirectStepI::check : prev seen book without a " "prev step"); return temBAD_PATH_LOOP; + // LCOV_EXCL_STOP } // This is OK if the previous step is a book step that outputs this diff --git a/src/xrpld/app/paths/detail/FlowDebugInfo.h b/src/xrpld/app/paths/detail/FlowDebugInfo.h index eec1d7c5a6..290a201e68 100644 --- a/src/xrpld/app/paths/detail/FlowDebugInfo.h +++ b/src/xrpld/app/paths/detail/FlowDebugInfo.h @@ -126,10 +126,12 @@ struct FlowDebugInfo auto i = timePoints.find(tag); if (i == timePoints.end()) { + // LCOV_EXCL_START UNREACHABLE( "ripple::path::detail::FlowDebugInfo::duration : timepoint not " "found"); return std::chrono::duration(0); + // LCOV_EXCL_STOP } auto const& t = i->second; return std::chrono::duration_cast>( diff --git a/src/xrpld/app/paths/detail/PaySteps.cpp b/src/xrpld/app/paths/detail/PaySteps.cpp index 6eb38eee83..f5621bcdf7 100644 --- a/src/xrpld/app/paths/detail/PaySteps.cpp +++ b/src/xrpld/app/paths/detail/PaySteps.cpp @@ -95,11 +95,13 @@ toStep( if (e1->isOffer() && e2->isAccount()) { + // LCOV_EXCL_START // should already be taken care of JLOG(j.error()) << "Found offer/account payment step. Aborting payment strand."; UNREACHABLE("ripple::toStep : offer/account payment payment strand"); return {temBAD_PATH, std::unique_ptr{}}; + // LCOV_EXCL_STOP } XRPL_ASSERT( @@ -392,8 +394,10 @@ toStrand( next->getCurrency() != curIssue.currency) { // Should never happen + // LCOV_EXCL_START UNREACHABLE("ripple::toStrand : offer currency mismatch"); return {temBAD_PATH, Strand{}}; + // LCOV_EXCL_STOP } auto s = toStep( @@ -457,9 +461,11 @@ toStrand( if (!checkStrand()) { + // LCOV_EXCL_START JLOG(j.warn()) << "Flow check strand failed"; UNREACHABLE("ripple::toStrand : invalid strand"); return {temBAD_PATH, Strand{}}; + // LCOV_EXCL_STOP } return {tesSUCCESS, std::move(result)}; diff --git a/src/xrpld/app/paths/detail/StrandFlow.h b/src/xrpld/app/paths/detail/StrandFlow.h index 47037c5f5e..7ccad9bf4f 100644 --- a/src/xrpld/app/paths/detail/StrandFlow.h +++ b/src/xrpld/app/paths/detail/StrandFlow.h @@ -167,6 +167,7 @@ flow( // Something is very wrong // throwing out the sandbox can only increase liquidity // yet the limiting is still limiting + // LCOV_EXCL_START JLOG(j.fatal()) << "Re-executed limiting step failed. r.first: " << to_string(get(r.first)) @@ -175,6 +176,7 @@ flow( "ripple::flow : first step re-executing the " "limiting step failed"); return Result{strand, std::move(ofrsToRm)}; + // LCOV_EXCL_STOP } } else if (!strand[i]->equalOut(r.second, stepOut)) @@ -202,6 +204,7 @@ flow( // Something is very wrong // throwing out the sandbox can only increase liquidity // yet the limiting is still limiting + // LCOV_EXCL_START #ifndef NDEBUG JLOG(j.fatal()) << "Re-executed limiting step failed. r.second: " @@ -213,6 +216,7 @@ flow( "ripple::flow : limiting step re-executing the " "limiting step failed"); return Result{strand, std::move(ofrsToRm)}; + // LCOV_EXCL_STOP } } @@ -238,6 +242,7 @@ flow( // The limits should already have been found, so executing a // strand forward from the limiting step should not find a // new limit + // LCOV_EXCL_START #ifndef NDEBUG JLOG(j.fatal()) << "Re-executed forward pass failed. r.first: " @@ -249,6 +254,7 @@ flow( "ripple::flow : non-limiting step re-executing the " "forward pass failed"); return Result{strand, std::move(ofrsToRm)}; + // LCOV_EXCL_STOP } stepIn = r.second; } @@ -499,8 +505,10 @@ public: { if (i >= cur_.size()) { + // LCOV_EXCL_START UNREACHABLE("ripple::ActiveStrands::get : input out of range"); return nullptr; + // LCOV_EXCL_STOP } return cur_[i]; } diff --git a/src/xrpld/app/rdb/RelationalDatabase.h b/src/xrpld/app/rdb/RelationalDatabase.h index 25b16f04a1..1a5b2ba830 100644 --- a/src/xrpld/app/rdb/RelationalDatabase.h +++ b/src/xrpld/app/rdb/RelationalDatabase.h @@ -235,12 +235,14 @@ rangeCheckedCast(C c) std::numeric_limits::is_signed && c < std::numeric_limits::lowest())) { - /* This should never happen */ + // This should never happen + // LCOV_EXCL_START UNREACHABLE("ripple::rangeCheckedCast : domain error"); JLOG(debugLog().error()) << "rangeCheckedCast domain error:" << " value = " << c << " min = " << std::numeric_limits::lowest() << " max: " << std::numeric_limits::max(); + // LCOV_EXCL_STOP } return static_cast(c); diff --git a/src/xrpld/app/rdb/backend/detail/Node.cpp b/src/xrpld/app/rdb/backend/detail/Node.cpp index 6a0544091b..88f11a272b 100644 --- a/src/xrpld/app/rdb/backend/detail/Node.cpp +++ b/src/xrpld/app/rdb/backend/detail/Node.cpp @@ -58,9 +58,11 @@ to_string(TableType type) return "Transactions"; case TableType::AccountTransactions: return "AccountTransactions"; + // LCOV_EXCL_START default: UNREACHABLE("ripple::detail::to_string : invalid TableType"); return "Unknown"; + // LCOV_EXCL_STOP } } @@ -202,18 +204,22 @@ saveValidatedLedger( if (!ledger->info().accountHash.isNonZero()) { + // LCOV_EXCL_START JLOG(j.fatal()) << "AH is zero: " << getJson({*ledger, {}}); UNREACHABLE("ripple::detail::saveValidatedLedger : zero account hash"); + // LCOV_EXCL_STOP } if (ledger->info().accountHash != ledger->stateMap().getHash().as_uint256()) { + // LCOV_EXCL_START JLOG(j.fatal()) << "sAL: " << ledger->info().accountHash << " != " << ledger->stateMap().getHash(); JLOG(j.fatal()) << "saveAcceptedLedger: seq=" << seq << ", current=" << current; UNREACHABLE( "ripple::detail::saveValidatedLedger : mismatched account hash"); + // LCOV_EXCL_STOP } XRPL_ASSERT( diff --git a/src/xrpld/app/tx/detail/Change.cpp b/src/xrpld/app/tx/detail/Change.cpp index d6a31024f3..77d098ea0f 100644 --- a/src/xrpld/app/tx/detail/Change.cpp +++ b/src/xrpld/app/tx/detail/Change.cpp @@ -151,9 +151,11 @@ Change::doApply() return applyFee(); case ttUNL_MODIFY: return applyUNLModify(); + // LCOV_EXCL_START default: UNREACHABLE("ripple::Change::doApply : invalid transaction type"); return tefFAILURE; + // LCOV_EXCL_STOP } } diff --git a/src/xrpld/app/tx/detail/DeleteAccount.cpp b/src/xrpld/app/tx/detail/DeleteAccount.cpp index 565d938c83..d52e84d755 100644 --- a/src/xrpld/app/tx/detail/DeleteAccount.cpp +++ b/src/xrpld/app/tx/detail/DeleteAccount.cpp @@ -399,12 +399,14 @@ DeleteAccount::doApply() return {result, SkipEntry::No}; } + // LCOV_EXCL_START UNREACHABLE( "ripple::DeleteAccount::doApply : undeletable item not found " "in preclaim"); JLOG(j_.error()) << "DeleteAccount undeletable item not " "found in preclaim."; return {tecHAS_OBLIGATIONS, SkipEntry::No}; + // LCOV_EXCL_STOP }, ctx_.journal); if (ter != tesSUCCESS) diff --git a/src/xrpld/app/tx/detail/Offer.h b/src/xrpld/app/tx/detail/Offer.h index c214bea23f..58bd65ce46 100644 --- a/src/xrpld/app/tx/detail/Offer.h +++ b/src/xrpld/app/tx/detail/Offer.h @@ -225,11 +225,13 @@ template void TOffer::setFieldAmounts() { + // LCOV_EXCL_START #ifdef _MSC_VER UNREACHABLE("ripple::TOffer::setFieldAmounts : must be specialized"); #else static_assert(sizeof(TOut) == -1, "Must be specialized"); #endif + // LCOV_EXCL_STOP } template diff --git a/src/xrpld/app/tx/detail/OfferStream.cpp b/src/xrpld/app/tx/detail/OfferStream.cpp index 8e1215f5c8..9d43e419d7 100644 --- a/src/xrpld/app/tx/detail/OfferStream.cpp +++ b/src/xrpld/app/tx/detail/OfferStream.cpp @@ -369,10 +369,12 @@ TOfferStreamBase::step() std::is_same_v)) return shouldRmSmallIncreasedQOffer(); } + // LCOV_EXCL_START UNREACHABLE( "rippls::TOfferStreamBase::step::rmSmallIncreasedQOffer : XRP " "vs XRP offer"); return false; + // LCOV_EXCL_STOP }(); if (rmSmallIncreasedQOffer) diff --git a/src/xrpld/app/tx/detail/SetSignerList.cpp b/src/xrpld/app/tx/detail/SetSignerList.cpp index b5d9d4d5b8..ec2f902009 100644 --- a/src/xrpld/app/tx/detail/SetSignerList.cpp +++ b/src/xrpld/app/tx/detail/SetSignerList.cpp @@ -134,8 +134,10 @@ SetSignerList::doApply() default: break; } + // LCOV_EXCL_START UNREACHABLE("ripple::SetSignerList::doApply : invalid operation"); return temMALFORMED; + // LCOV_EXCL_STOP } void diff --git a/src/xrpld/app/tx/detail/Transactor.cpp b/src/xrpld/app/tx/detail/Transactor.cpp index f6b8b3c9d2..920b1a58bc 100644 --- a/src/xrpld/app/tx/detail/Transactor.cpp +++ b/src/xrpld/app/tx/detail/Transactor.cpp @@ -1170,11 +1170,13 @@ Transactor::operator()() if (!s2.isEquivalent(ctx_.tx)) { + // LCOV_EXCL_START JLOG(j_.fatal()) << "Transaction serdes mismatch"; JLOG(j_.info()) << to_string(ctx_.tx.getJson(JsonOptions::none)); JLOG(j_.fatal()) << s2.getJson(JsonOptions::none); UNREACHABLE( "ripple::Transactor::operator() : transaction serdes mismatch"); + // LCOV_EXCL_STOP } } #endif diff --git a/src/xrpld/app/tx/detail/XChainBridge.cpp b/src/xrpld/app/tx/detail/XChainBridge.cpp index 2587845df5..5f5c081e2f 100644 --- a/src/xrpld/app/tx/detail/XChainBridge.cpp +++ b/src/xrpld/app/tx/detail/XChainBridge.cpp @@ -223,10 +223,12 @@ claimHelper( auto i = signersList.find(a.keyAccount); if (i == signersList.end()) { + // LCOV_EXCL_START UNREACHABLE( "ripple::claimHelper : invalid inputs"); // should have already // been checked continue; + // LCOV_EXCL_STOP } weight += i->second; rewardAccounts.push_back(a.rewardAccount); diff --git a/src/xrpld/app/tx/detail/applySteps.cpp b/src/xrpld/app/tx/detail/applySteps.cpp index c2e4e13f08..1cad93eedc 100644 --- a/src/xrpld/app/tx/detail/applySteps.cpp +++ b/src/xrpld/app/tx/detail/applySteps.cpp @@ -129,10 +129,12 @@ invoke_preflight(PreflightContext const& ctx) catch (UnknownTxnType const& e) { // Should never happen + // LCOV_EXCL_START JLOG(ctx.j.fatal()) << "Unknown transaction type in preflight: " << e.txnType; UNREACHABLE("ripple::invoke_preflight : unknown transaction type"); return {temUNKNOWN, TxConsequences{temUNKNOWN}}; + // LCOV_EXCL_STOP } } @@ -183,10 +185,12 @@ invoke_preclaim(PreclaimContext const& ctx) catch (UnknownTxnType const& e) { // Should never happen + // LCOV_EXCL_START JLOG(ctx.j.fatal()) << "Unknown transaction type in preclaim: " << e.txnType; UNREACHABLE("ripple::invoke_preclaim : unknown transaction type"); return temUNKNOWN; + // LCOV_EXCL_STOP } } @@ -217,9 +221,11 @@ invoke_calculateBaseFee(ReadView const& view, STTx const& tx) } catch (UnknownTxnType const& e) { + // LCOV_EXCL_START UNREACHABLE( "ripple::invoke_calculateBaseFee : unknown transaction type"); return XRPAmount{0}; + // LCOV_EXCL_STOP } } @@ -277,10 +283,12 @@ invoke_apply(ApplyContext& ctx) catch (UnknownTxnType const& e) { // Should never happen + // LCOV_EXCL_START JLOG(ctx.journal.fatal()) << "Unknown transaction type in apply: " << e.txnType; UNREACHABLE("ripple::invoke_apply : unknown transaction type"); return {temUNKNOWN, false}; + // LCOV_EXCL_STOP } } diff --git a/src/xrpld/nodestore/backend/NuDBFactory.cpp b/src/xrpld/nodestore/backend/NuDBFactory.cpp index 2f4e9d502e..727dec6f3e 100644 --- a/src/xrpld/nodestore/backend/NuDBFactory.cpp +++ b/src/xrpld/nodestore/backend/NuDBFactory.cpp @@ -121,11 +121,13 @@ public: using namespace boost::filesystem; if (db_.is_open()) { + // LCOV_EXCL_START UNREACHABLE( "ripple::NodeStore::NuDBBackend::open : database is already " "open"); JLOG(j_.error()) << "database is already open"; return; + // LCOV_EXCL_STOP } auto const folder = path(name_); auto const dp = (folder / "nudb.dat").string(); diff --git a/src/xrpld/nodestore/backend/RocksDBFactory.cpp b/src/xrpld/nodestore/backend/RocksDBFactory.cpp index 0e421cd6bd..57c136a10a 100644 --- a/src/xrpld/nodestore/backend/RocksDBFactory.cpp +++ b/src/xrpld/nodestore/backend/RocksDBFactory.cpp @@ -232,11 +232,13 @@ public: { if (m_db) { + // LCOV_EXCL_START UNREACHABLE( "ripple::NodeStore::RocksDBBackend::open : database is already " "open"); JLOG(m_journal.error()) << "database is already open"; return; + // LCOV_EXCL_STOP } rocksdb::DB* db = nullptr; m_options.create_if_missing = createIfMissing; diff --git a/src/xrpld/overlay/Compression.h b/src/xrpld/overlay/Compression.h index 3a278a3403..35af4bf925 100644 --- a/src/xrpld/overlay/Compression.h +++ b/src/xrpld/overlay/Compression.h @@ -60,12 +60,14 @@ decompress( in, inSize, decompressed, decompressedSize); else { + // LCOV_EXCL_START JLOG(debugLog().warn()) << "decompress: invalid compression algorithm " << static_cast(algorithm); UNREACHABLE( "ripple::compression::decompress : invalid compression " "algorithm"); + // LCOV_EXCL_STOP } } catch (...) @@ -98,11 +100,13 @@ compress( in, inSize, std::forward(bf)); else { + // LCOV_EXCL_START JLOG(debugLog().warn()) << "compress: invalid compression algorithm" << static_cast(algorithm); UNREACHABLE( "ripple::compression::compress : invalid compression " "algorithm"); + // LCOV_EXCL_STOP } } catch (...) diff --git a/src/xrpld/overlay/detail/PeerImp.cpp b/src/xrpld/overlay/detail/PeerImp.cpp index 93371f42ab..47d01eb7c5 100644 --- a/src/xrpld/overlay/detail/PeerImp.cpp +++ b/src/xrpld/overlay/detail/PeerImp.cpp @@ -2234,10 +2234,12 @@ PeerImp::onValidatorListMessage( case ListDisposition::invalid: case ListDisposition::unsupported_version: break; + // LCOV_EXCL_START default: UNREACHABLE( "ripple::PeerImp::onValidatorListMessage : invalid best list " "disposition"); + // LCOV_EXCL_STOP } // Charge based on the worst result @@ -2278,10 +2280,12 @@ PeerImp::onValidatorListMessage( // If it happens frequently, that's probably bad. fee_.update(Resource::feeInvalidData, "version"); break; + // LCOV_EXCL_START default: UNREACHABLE( "ripple::PeerImp::onValidatorListMessage : invalid worst list " "disposition"); + // LCOV_EXCL_STOP } // Log based on all the results. @@ -2338,10 +2342,12 @@ PeerImp::onValidatorListMessage( << "Ignored " << count << "invalid " << messageType << "(s) from peer " << remote_address_; break; + // LCOV_EXCL_START default: UNREACHABLE( "ripple::PeerImp::onValidatorListMessage : invalid list " "disposition"); + // LCOV_EXCL_STOP } } } diff --git a/src/xrpld/peerfinder/detail/Counts.h b/src/xrpld/peerfinder/detail/Counts.h index 821431c5bb..a35473ddb5 100644 --- a/src/xrpld/peerfinder/detail/Counts.h +++ b/src/xrpld/peerfinder/detail/Counts.h @@ -295,10 +295,12 @@ private: m_closingCount += n; break; + // LCOV_EXCL_START default: UNREACHABLE( "ripple::PeerFinder::Counts::adjust : invalid input state"); break; + // LCOV_EXCL_STOP }; } diff --git a/src/xrpld/peerfinder/detail/Logic.h b/src/xrpld/peerfinder/detail/Logic.h index 4b92a1d143..74bec8431e 100644 --- a/src/xrpld/peerfinder/detail/Logic.h +++ b/src/xrpld/peerfinder/detail/Logic.h @@ -976,11 +976,13 @@ public: << slot->remote_endpoint(); break; + // LCOV_EXCL_START default: UNREACHABLE( "ripple::PeerFinder::Logic::on_closed : invalid slot " "state"); break; + // LCOV_EXCL_STOP } } diff --git a/src/xrpld/perflog/detail/PerfLogImp.cpp b/src/xrpld/perflog/detail/PerfLogImp.cpp index a34faf885c..583ec095e1 100644 --- a/src/xrpld/perflog/detail/PerfLogImp.cpp +++ b/src/xrpld/perflog/detail/PerfLogImp.cpp @@ -53,9 +53,11 @@ PerfLogImp::Counters::Counters( if (!inserted) { // Ensure that no other function populates this entry. + // LCOV_EXCL_START UNREACHABLE( "ripple::perf::PerfLogImp::Counters::Counters : failed to " "insert label"); + // LCOV_EXCL_STOP } } } @@ -68,9 +70,11 @@ PerfLogImp::Counters::Counters( if (!inserted) { // Ensure that no other function populates this entry. + // LCOV_EXCL_START UNREACHABLE( "ripple::perf::PerfLogImp::Counters::Counters : failed to " "insert job type"); + // LCOV_EXCL_STOP } } } @@ -329,8 +333,10 @@ PerfLogImp::rpcStart(std::string const& method, std::uint64_t const requestId) auto counter = counters_.rpc_.find(method); if (counter == counters_.rpc_.end()) { + // LCOV_EXCL_START UNREACHABLE("ripple::perf::PerfLogImp::rpcStart : valid method input"); return; + // LCOV_EXCL_STOP } { @@ -351,8 +357,10 @@ PerfLogImp::rpcEnd( auto counter = counters_.rpc_.find(method); if (counter == counters_.rpc_.end()) { + // LCOV_EXCL_START UNREACHABLE("ripple::perf::PerfLogImp::rpcEnd : valid method input"); return; + // LCOV_EXCL_STOP } steady_time_point startTime; { @@ -365,8 +373,10 @@ PerfLogImp::rpcEnd( } else { + // LCOV_EXCL_START UNREACHABLE( "ripple::perf::PerfLogImp::rpcEnd : valid requestId input"); + // LCOV_EXCL_STOP } } std::lock_guard lock(counter->second.mutex); @@ -384,9 +394,11 @@ PerfLogImp::jobQueue(JobType const type) auto counter = counters_.jq_.find(type); if (counter == counters_.jq_.end()) { + // LCOV_EXCL_START UNREACHABLE( "ripple::perf::PerfLogImp::jobQueue : valid job type input"); return; + // LCOV_EXCL_STOP } std::lock_guard lock(counter->second.mutex); ++counter->second.value.queued; @@ -402,10 +414,13 @@ PerfLogImp::jobStart( auto counter = counters_.jq_.find(type); if (counter == counters_.jq_.end()) { + // LCOV_EXCL_START UNREACHABLE( "ripple::perf::PerfLogImp::jobStart : valid job type input"); return; + // LCOV_EXCL_STOP } + { std::lock_guard lock(counter->second.mutex); ++counter->second.value.started; @@ -422,10 +437,13 @@ PerfLogImp::jobFinish(JobType const type, microseconds dur, int instance) auto counter = counters_.jq_.find(type); if (counter == counters_.jq_.end()) { + // LCOV_EXCL_START UNREACHABLE( "ripple::perf::PerfLogImp::jobFinish : valid job type input"); return; + // LCOV_EXCL_STOP } + { std::lock_guard lock(counter->second.mutex); ++counter->second.value.finished; diff --git a/src/xrpld/rpc/detail/Handler.cpp b/src/xrpld/rpc/detail/Handler.cpp index 3b32524ee2..d15c5aaed0 100644 --- a/src/xrpld/rpc/detail/Handler.cpp +++ b/src/xrpld/rpc/detail/Handler.cpp @@ -39,8 +39,10 @@ byRef(Function const& f) result = f(context); if (result.type() != Json::objectValue) { + // LCOV_EXCL_START UNREACHABLE("ripple::RPC::byRef : result is object"); result = RPC::makeObjectValue(result); + // LCOV_EXCL_STOP } return Status(); diff --git a/src/xrpld/rpc/detail/Status.cpp b/src/xrpld/rpc/detail/Status.cpp index 738219b6b5..ce15003968 100644 --- a/src/xrpld/rpc/detail/Status.cpp +++ b/src/xrpld/rpc/detail/Status.cpp @@ -51,8 +51,10 @@ Status::codeString() const return sStr.str(); } + // LCOV_EXCL_START UNREACHABLE("ripple::RPC::codeString : invalid type"); return ""; + // LCOV_EXCL_STOP } void diff --git a/src/xrpld/rpc/handlers/AccountChannels.cpp b/src/xrpld/rpc/handlers/AccountChannels.cpp index 1b0046ab64..17e46f052f 100644 --- a/src/xrpld/rpc/handlers/AccountChannels.cpp +++ b/src/xrpld/rpc/handlers/AccountChannels.cpp @@ -169,8 +169,10 @@ doAccountChannels(RPC::JsonContext& context) std::shared_ptr const& sleCur) { if (!sleCur) { + // LCOV_EXCL_START UNREACHABLE("ripple::doAccountChannels : null SLE"); return false; + // LCOV_EXCL_STOP } if (++count == limit) diff --git a/src/xrpld/rpc/handlers/AccountLines.cpp b/src/xrpld/rpc/handlers/AccountLines.cpp index 893ca9a190..146a9527a9 100644 --- a/src/xrpld/rpc/handlers/AccountLines.cpp +++ b/src/xrpld/rpc/handlers/AccountLines.cpp @@ -193,8 +193,10 @@ doAccountLines(RPC::JsonContext& context) std::shared_ptr const& sleCur) { if (!sleCur) { + // LCOV_EXCL_START UNREACHABLE("ripple::doAccountLines : null SLE"); return false; + // LCOV_EXCL_STOP } if (++count == limit) diff --git a/src/xrpld/rpc/handlers/AccountOffers.cpp b/src/xrpld/rpc/handlers/AccountOffers.cpp index e65b39b35b..1f2b76efe4 100644 --- a/src/xrpld/rpc/handlers/AccountOffers.cpp +++ b/src/xrpld/rpc/handlers/AccountOffers.cpp @@ -145,8 +145,10 @@ doAccountOffers(RPC::JsonContext& context) std::shared_ptr const& sle) { if (!sle) { + // LCOV_EXCL_START UNREACHABLE("ripple::doAccountOffers : null SLE"); return false; + // LCOV_EXCL_STOP } if (++count == limit) diff --git a/src/xrpld/rpc/handlers/AccountTx.cpp b/src/xrpld/rpc/handlers/AccountTx.cpp index 6b1dccdba9..e053c2adc0 100644 --- a/src/xrpld/rpc/handlers/AccountTx.cpp +++ b/src/xrpld/rpc/handlers/AccountTx.cpp @@ -353,9 +353,13 @@ populateJsonResponse( jvObj[jss::meta], sttx, *txnMeta); } else + { + // LCOV_EXCL_START UNREACHABLE( "ripple::populateJsonResponse : missing " "transaction medatata"); + // LCOV_EXCL_STOP + } } } } diff --git a/src/xrpld/rpc/handlers/Fee1.cpp b/src/xrpld/rpc/handlers/Fee1.cpp index 6d15a4d95f..ecb4ad4b29 100644 --- a/src/xrpld/rpc/handlers/Fee1.cpp +++ b/src/xrpld/rpc/handlers/Fee1.cpp @@ -32,9 +32,12 @@ doFee(RPC::JsonContext& context) auto result = context.app.getTxQ().doRPC(context.app); if (result.type() == Json::objectValue) return result; + + // LCOV_EXCL_START UNREACHABLE("ripple::doFee : invalid result type"); RPC::inject_error(rpcINTERNAL, context.params); return context.params; + // LCOV_EXCL_STOP } } // namespace ripple diff --git a/src/xrpld/shamap/detail/SHAMap.cpp b/src/xrpld/shamap/detail/SHAMap.cpp index d2415a2ff2..026149be56 100644 --- a/src/xrpld/shamap/detail/SHAMap.cpp +++ b/src/xrpld/shamap/detail/SHAMap.cpp @@ -545,8 +545,10 @@ SHAMap::onlyBelow(SHAMapTreeNode* node) const if (!nextNode) { + // LCOV_EXCL_START UNREACHABLE("ripple::SHAMap::onlyBelow : no next node"); return no_item; + // LCOV_EXCL_STOP } node = nextNode; @@ -922,8 +924,10 @@ SHAMap::updateGiveItem( if (!node || (node->peekItem()->key() != tag)) { + // LCOV_EXCL_START UNREACHABLE("ripple::SHAMap::updateGiveItem : invalid node"); return false; + // LCOV_EXCL_STOP } if (node->getType() != type) diff --git a/src/xrpld/shamap/detail/SHAMapDelta.cpp b/src/xrpld/shamap/detail/SHAMapDelta.cpp index 2adce62efc..ebdaffad14 100644 --- a/src/xrpld/shamap/detail/SHAMapDelta.cpp +++ b/src/xrpld/shamap/detail/SHAMapDelta.cpp @@ -149,8 +149,10 @@ SHAMap::compare(SHAMap const& otherMap, Delta& differences, int maxCount) const if (!ourNode || !otherNode) { + // LCOV_EXCL_START UNREACHABLE("ripple::SHAMap::compare : missing a node"); Throw(type_, uint256()); + // LCOV_EXCL_STOP } if (ourNode->isLeaf() && otherNode->isLeaf()) @@ -230,7 +232,11 @@ SHAMap::compare(SHAMap const& otherMap, Delta& differences, int maxCount) const } } else + { + // LCOV_EXCL_START UNREACHABLE("ripple::SHAMap::compare : invalid node"); + // LCOV_EXCL_STOP + } } return true; From f61086b43c93322c73738a4f30efb54d986070d7 Mon Sep 17 00:00:00 2001 From: Bart Date: Wed, 8 Oct 2025 09:15:24 -0400 Subject: [PATCH 221/244] refactor: Update CI strategy matrix to use new RHEL 9 and RHEL 10 images (#5856) This change uses the new RHEL 9 and 10 images to build and test the binary, and adds support for having different Docker image SHAs per distro-compiler combination. Instead of supporting RHEL each minor version, we are simplifying our pipelines by only supporting RHEL major versions. Our CI Docker images have already been updated accordingly, and we recently added support for RHEL 10 as well. Up until now, the CI Docker images had all been rebuilt at the same time, but that is not necessarily true as the most recent push to the CI repo has shown where the RHEL images now have a different SHA than the Debian and Ubuntu ones. Co-authored-by: Bart Thomee <11445373+bthomee@users.noreply.github.com> --- .github/scripts/strategy-matrix/generate.py | 8 +- .github/scripts/strategy-matrix/linux.json | 92 ++++++++++++-------- .github/scripts/strategy-matrix/macos.json | 3 +- .github/scripts/strategy-matrix/windows.json | 3 +- .github/workflows/reusable-build-test.yml | 2 +- .github/workflows/upload-conan-deps.yml | 5 +- 6 files changed, 66 insertions(+), 47 deletions(-) diff --git a/.github/scripts/strategy-matrix/generate.py b/.github/scripts/strategy-matrix/generate.py index fd05895b0e..025d553b5e 100755 --- a/.github/scripts/strategy-matrix/generate.py +++ b/.github/scripts/strategy-matrix/generate.py @@ -74,14 +74,14 @@ def generate_strategy_matrix(all: bool, config: Config) -> list: continue # RHEL: - # - 9.4 using GCC 12: Debug and Unity on linux/amd64. - # - 9.6 using Clang: Release and no Unity on linux/amd64. + # - 9 using GCC 12: Debug and Unity on linux/amd64. + # - 10 using Clang: Release and no Unity on linux/amd64. if os['distro_name'] == 'rhel': skip = True - if os['distro_version'] == '9.4': + if os['distro_version'] == '9': if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-12' and build_type == 'Debug' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/amd64': skip = False - elif os['distro_version'] == '9.6': + elif os['distro_version'] == '10': if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-any' and build_type == 'Release' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/amd64': skip = False if skip: diff --git a/.github/scripts/strategy-matrix/linux.json b/.github/scripts/strategy-matrix/linux.json index 44eaebd074..08313daf0a 100644 --- a/.github/scripts/strategy-matrix/linux.json +++ b/.github/scripts/strategy-matrix/linux.json @@ -14,139 +14,155 @@ "distro_name": "debian", "distro_version": "bookworm", "compiler_name": "gcc", - "compiler_version": "12" + "compiler_version": "12", + "image_sha": "6f723eb" }, { "distro_name": "debian", "distro_version": "bookworm", "compiler_name": "gcc", - "compiler_version": "13" + "compiler_version": "13", + "image_sha": "6f723eb" }, { "distro_name": "debian", "distro_version": "bookworm", "compiler_name": "gcc", - "compiler_version": "14" + "compiler_version": "14", + "image_sha": "6f723eb" }, { "distro_name": "debian", "distro_version": "bookworm", "compiler_name": "gcc", - "compiler_version": "15" + "compiler_version": "15", + "image_sha": "6f723eb" }, { "distro_name": "debian", "distro_version": "bookworm", "compiler_name": "clang", - "compiler_version": "16" + "compiler_version": "16", + "image_sha": "6f723eb" }, { "distro_name": "debian", "distro_version": "bookworm", "compiler_name": "clang", - "compiler_version": "17" + "compiler_version": "17", + "image_sha": "6f723eb" }, { "distro_name": "debian", "distro_version": "bookworm", "compiler_name": "clang", - "compiler_version": "18" + "compiler_version": "18", + "image_sha": "6f723eb" }, { "distro_name": "debian", "distro_version": "bookworm", "compiler_name": "clang", - "compiler_version": "19" + "compiler_version": "19", + "image_sha": "6f723eb" }, { "distro_name": "debian", "distro_version": "bookworm", "compiler_name": "clang", - "compiler_version": "20" + "compiler_version": "20", + "image_sha": "6f723eb" }, { "distro_name": "rhel", - "distro_version": "9.4", + "distro_version": "9", "compiler_name": "gcc", - "compiler_version": "12" + "compiler_version": "12", + "image_sha": "0ab1e4c" }, { "distro_name": "rhel", - "distro_version": "9.4", + "distro_version": "9", "compiler_name": "gcc", - "compiler_version": "13" + "compiler_version": "13", + "image_sha": "0ab1e4c" }, { "distro_name": "rhel", - "distro_version": "9.4", + "distro_version": "9", "compiler_name": "gcc", - "compiler_version": "14" + "compiler_version": "14", + "image_sha": "0ab1e4c" }, { "distro_name": "rhel", - "distro_version": "9.6", - "compiler_name": "gcc", - "compiler_version": "13" - }, - { - "distro_name": "rhel", - "distro_version": "9.6", - "compiler_name": "gcc", - "compiler_version": "14" - }, - { - "distro_name": "rhel", - "distro_version": "9.4", + "distro_version": "9", "compiler_name": "clang", - "compiler_version": "any" + "compiler_version": "any", + "image_sha": "0ab1e4c" }, { "distro_name": "rhel", - "distro_version": "9.6", + "distro_version": "10", + "compiler_name": "gcc", + "compiler_version": "14", + "image_sha": "0ab1e4c" + }, + { + "distro_name": "rhel", + "distro_version": "10", "compiler_name": "clang", - "compiler_version": "any" + "compiler_version": "any", + "image_sha": "0ab1e4c" }, { "distro_name": "ubuntu", "distro_version": "jammy", "compiler_name": "gcc", - "compiler_version": "12" + "compiler_version": "12", + "image_sha": "6f723eb" }, { "distro_name": "ubuntu", "distro_version": "noble", "compiler_name": "gcc", - "compiler_version": "13" + "compiler_version": "13", + "image_sha": "6f723eb" }, { "distro_name": "ubuntu", "distro_version": "noble", "compiler_name": "gcc", - "compiler_version": "14" + "compiler_version": "14", + "image_sha": "6f723eb" }, { "distro_name": "ubuntu", "distro_version": "noble", "compiler_name": "clang", - "compiler_version": "16" + "compiler_version": "16", + "image_sha": "6f723eb" }, { "distro_name": "ubuntu", "distro_version": "noble", "compiler_name": "clang", - "compiler_version": "17" + "compiler_version": "17", + "image_sha": "6f723eb" }, { "distro_name": "ubuntu", "distro_version": "noble", "compiler_name": "clang", - "compiler_version": "18" + "compiler_version": "18", + "image_sha": "6f723eb" }, { "distro_name": "ubuntu", "distro_version": "noble", "compiler_name": "clang", - "compiler_version": "19" + "compiler_version": "19", + "image_sha": "6f723eb" } ], "build_type": ["Debug", "Release"], diff --git a/.github/scripts/strategy-matrix/macos.json b/.github/scripts/strategy-matrix/macos.json index de37639ddd..14b6089620 100644 --- a/.github/scripts/strategy-matrix/macos.json +++ b/.github/scripts/strategy-matrix/macos.json @@ -10,7 +10,8 @@ "distro_name": "macos", "distro_version": "", "compiler_name": "", - "compiler_version": "" + "compiler_version": "", + "image_sha": "" } ], "build_type": ["Debug", "Release"], diff --git a/.github/scripts/strategy-matrix/windows.json b/.github/scripts/strategy-matrix/windows.json index 08b41e3f89..8637b31012 100644 --- a/.github/scripts/strategy-matrix/windows.json +++ b/.github/scripts/strategy-matrix/windows.json @@ -10,7 +10,8 @@ "distro_name": "windows", "distro_version": "", "compiler_name": "", - "compiler_version": "" + "compiler_version": "", + "image_sha": "" } ], "build_type": ["Debug", "Release"], diff --git a/.github/workflows/reusable-build-test.yml b/.github/workflows/reusable-build-test.yml index c274cf2b21..5bc9cf2557 100644 --- a/.github/workflows/reusable-build-test.yml +++ b/.github/workflows/reusable-build-test.yml @@ -52,7 +52,7 @@ jobs: cmake_args: ${{ matrix.cmake_args }} cmake_target: ${{ matrix.cmake_target }} runs_on: ${{ toJSON(matrix.architecture.runner) }} - image: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-5dd7158', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version) || '' }} + image: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-{4}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version, matrix.os.image_sha) || '' }} config_name: ${{ matrix.config_name }} secrets: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/upload-conan-deps.yml b/.github/workflows/upload-conan-deps.yml index cbae8a4c86..680602d978 100644 --- a/.github/workflows/upload-conan-deps.yml +++ b/.github/workflows/upload-conan-deps.yml @@ -40,11 +40,13 @@ concurrency: cancel-in-progress: true jobs: + # Generate the strategy matrix to be used by the following job. generate-matrix: uses: ./.github/workflows/reusable-strategy-matrix.yml with: strategy_matrix: ${{ github.event_name == 'pull_request' && 'minimal' || 'all' }} + # Build and upload the dependencies for each configuration. run-upload-conan-deps: needs: - generate-matrix @@ -53,8 +55,7 @@ jobs: matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }} max-parallel: 10 runs-on: ${{ matrix.architecture.runner }} - container: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-5dd7158', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version) || null }} - + container: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-{4}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version, matrix.os.image_sha) || null }} steps: - name: Cleanup workspace if: ${{ runner.os == 'macOS' }} From 6b6b213cf543d98eb76af7ac4a6a1379ce26ca4e Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Wed, 8 Oct 2025 14:45:44 +0100 Subject: [PATCH 222/244] chore: Fix release build error (#5864) This change fixes a release build error with GCC 15.2. The `fields` variable is only used in `XRPL_ASSERT`, which evaluates to nothing in a Release build, leaving the variable unused. This change silences the build warning. Co-authored-by: Bart Thomee <11445373+bthomee@users.noreply.github.com> --- src/libxrpl/ledger/View.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libxrpl/ledger/View.cpp b/src/libxrpl/ledger/View.cpp index 02de872ad3..89d8137ac7 100644 --- a/src/libxrpl/ledger/View.cpp +++ b/src/libxrpl/ledger/View.cpp @@ -1134,7 +1134,7 @@ createPseudoAccount( uint256 const& pseudoOwnerKey, SField const& ownerField) { - auto const& fields = getPseudoAccountFields(); + [[maybe_unused]] auto const& fields = getPseudoAccountFields(); XRPL_ASSERT( std::count_if( fields.begin(), From 620fb26823777aedd62fb179f3d39078ac66c5cd Mon Sep 17 00:00:00 2001 From: tequ Date: Wed, 8 Oct 2025 23:36:09 +0900 Subject: [PATCH 223/244] test: Add more tests for Simulate RPC metadata (#5827) --- src/test/rpc/Simulate_test.cpp | 92 ++++++++++++++++++++++------------ 1 file changed, 59 insertions(+), 33 deletions(-) diff --git a/src/test/rpc/Simulate_test.cpp b/src/test/rpc/Simulate_test.cpp index 0a36a8a841..5e7ab5be4d 100644 --- a/src/test/rpc/Simulate_test.cpp +++ b/src/test/rpc/Simulate_test.cpp @@ -136,11 +136,12 @@ class Simulate_test : public beast::unit_test::suite jtx::Env& env, Json::Value const& tx, std::function const& validate, Json::Value const& expectedMetadataKey, - bool testSerialized = true) + Json::Value const& expectedMetadataValue) { env.close(); @@ -149,8 +150,13 @@ class Simulate_test : public beast::unit_test::suite validate( env.rpc("json", "simulate", to_string(params)), tx, - expectedMetadataKey); - validate(env.rpc("simulate", to_string(tx)), tx, expectedMetadataKey); + expectedMetadataKey, + expectedMetadataValue); + validate( + env.rpc("simulate", to_string(tx)), + tx, + expectedMetadataKey, + expectedMetadataValue); BEAST_EXPECTS( env.current()->txCount() == 0, @@ -1218,73 +1224,93 @@ class Simulate_test : public beast::unit_test::suite testcase("Successful transaction with additional metadata"); using namespace jtx; + using namespace std::chrono_literals; Env env{*this, envconfig([&](std::unique_ptr cfg) { cfg->NETWORK_ID = 1025; return cfg; })}; Account const alice("alice"); + Account const bob("bob"); - env.fund(XRP(10000), alice); + env.fund(XRP(10000), alice, bob); env.close(); + // deliver_amount is unavailable in the metadata before 2014-02-01 + // so proceed to 2014-02-01 + env.close(NetClock::time_point{446000000s}); { - auto validateOutput = [&](Json::Value const& resp, - Json::Value const& tx, - Json::Value const& expectedMetadataKey) { - auto result = resp[jss::result]; + auto validateOutput = + [&](Json::Value const& resp, + Json::Value const& tx, + Json::Value const& expectedMetadataKey, + Json::Value const& expectedMetadataValue) { + auto result = resp[jss::result]; - BEAST_EXPECT(result[jss::engine_result] == "tesSUCCESS"); - BEAST_EXPECT(result[jss::engine_result_code] == 0); - BEAST_EXPECT( - result[jss::engine_result_message] == - "The simulated transaction would have been applied."); - - if (BEAST_EXPECT( - result.isMember(jss::meta) || - result.isMember(jss::meta_blob))) - { - Json::Value const metadata = getJsonMetadata(result); - - BEAST_EXPECT(metadata[sfTransactionIndex.jsonName] == 0); + BEAST_EXPECT(result[jss::engine_result] == "tesSUCCESS"); + BEAST_EXPECT(result[jss::engine_result_code] == 0); BEAST_EXPECT( - metadata[sfTransactionResult.jsonName] == "tesSUCCESS"); - BEAST_EXPECT( - metadata.isMember(expectedMetadataKey.asString())); - } - }; + result[jss::engine_result_message] == + "The simulated transaction would have been applied."); + + if (BEAST_EXPECT( + result.isMember(jss::meta) || + result.isMember(jss::meta_blob))) + { + Json::Value const metadata = getJsonMetadata(result); + + BEAST_EXPECT( + metadata[sfTransactionIndex.jsonName] == 0); + BEAST_EXPECT( + metadata[sfTransactionResult.jsonName] == + "tesSUCCESS"); + BEAST_EXPECT( + metadata.isMember(expectedMetadataKey.asString())); + BEAST_EXPECT( + metadata[expectedMetadataKey.asString()] == + expectedMetadataValue); + } + }; { Json::Value tx; - tx[jss::Account] = env.master.human(); + tx[jss::Account] = alice.human(); tx[jss::TransactionType] = jss::Payment; - tx[sfDestination] = alice.human(); + tx[sfDestination] = bob.human(); tx[sfAmount] = "100"; // test delivered amount testTxJsonMetadataField( - env, tx, validateOutput, jss::delivered_amount); + env, tx, validateOutput, jss::delivered_amount, "100"); } { Json::Value tx; - tx[jss::Account] = env.master.human(); + tx[jss::Account] = alice.human(); tx[jss::TransactionType] = jss::NFTokenMint; tx[sfNFTokenTaxon] = 1; + Json::Value nftokenId = + to_string(token::getNextID(env, alice, 1)); // test nft synthetic testTxJsonMetadataField( - env, tx, validateOutput, jss::nftoken_id); + env, tx, validateOutput, jss::nftoken_id, nftokenId); } { Json::Value tx; - tx[jss::Account] = env.master.human(); + tx[jss::Account] = alice.human(); tx[jss::TransactionType] = jss::MPTokenIssuanceCreate; + Json::Value mptIssuanceId = + to_string(makeMptID(env.seq(alice), alice)); // test mpt issuance id testTxJsonMetadataField( - env, tx, validateOutput, jss::mpt_issuance_id); + env, + tx, + validateOutput, + jss::mpt_issuance_id, + mptIssuanceId); } } } From 5ecde3cf39dcb784250253e9d953e9a4d4d2bb34 Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Wed, 8 Oct 2025 16:04:02 +0100 Subject: [PATCH 224/244] Add vault invariants (#5518) This change adds invariants for SingleAssetVault #5224 (XLS-065), which had been intentionally skipped earlier to keep the SAV PR size manageable. --- .../xrpl/protocol/detail/transactions.macro | 12 +- src/test/app/Invariants_test.cpp | 1706 ++++++++++++++++- src/test/app/Vault_test.cpp | 63 + src/xrpld/app/tx/detail/InvariantCheck.cpp | 950 +++++++++ src/xrpld/app/tx/detail/InvariantCheck.h | 73 +- src/xrpld/app/tx/detail/VaultDeposit.cpp | 5 +- src/xrpld/app/tx/detail/VaultSet.cpp | 3 + 7 files changed, 2801 insertions(+), 11 deletions(-) diff --git a/include/xrpl/protocol/detail/transactions.macro b/include/xrpl/protocol/detail/transactions.macro index 3ea4a3bbec..2565372ebc 100644 --- a/include/xrpl/protocol/detail/transactions.macro +++ b/include/xrpl/protocol/detail/transactions.macro @@ -851,7 +851,7 @@ TRANSACTION(ttDELEGATE_SET, 64, DelegateSet, TRANSACTION(ttVAULT_CREATE, 65, VaultCreate, Delegation::delegatable, featureSingleAssetVault, - createPseudoAcct | createMPTIssuance, + createPseudoAcct | createMPTIssuance | mustModifyVault, ({ {sfAsset, soeREQUIRED, soeMPTSupported}, {sfAssetsMaximum, soeOPTIONAL}, @@ -869,7 +869,7 @@ TRANSACTION(ttVAULT_CREATE, 65, VaultCreate, TRANSACTION(ttVAULT_SET, 66, VaultSet, Delegation::delegatable, featureSingleAssetVault, - noPriv, + mustModifyVault, ({ {sfVaultID, soeREQUIRED}, {sfAssetsMaximum, soeOPTIONAL}, @@ -884,7 +884,7 @@ TRANSACTION(ttVAULT_SET, 66, VaultSet, TRANSACTION(ttVAULT_DELETE, 67, VaultDelete, Delegation::delegatable, featureSingleAssetVault, - mustDeleteAcct | destroyMPTIssuance, + mustDeleteAcct | destroyMPTIssuance | mustModifyVault, ({ {sfVaultID, soeREQUIRED}, })) @@ -896,7 +896,7 @@ TRANSACTION(ttVAULT_DELETE, 67, VaultDelete, TRANSACTION(ttVAULT_DEPOSIT, 68, VaultDeposit, Delegation::delegatable, featureSingleAssetVault, - mayAuthorizeMPT, + mayAuthorizeMPT | mustModifyVault, ({ {sfVaultID, soeREQUIRED}, {sfAmount, soeREQUIRED, soeMPTSupported}, @@ -909,7 +909,7 @@ TRANSACTION(ttVAULT_DEPOSIT, 68, VaultDeposit, TRANSACTION(ttVAULT_WITHDRAW, 69, VaultWithdraw, Delegation::delegatable, featureSingleAssetVault, - mayDeleteMPT, + mayDeleteMPT | mustModifyVault, ({ {sfVaultID, soeREQUIRED}, {sfAmount, soeREQUIRED, soeMPTSupported}, @@ -924,7 +924,7 @@ TRANSACTION(ttVAULT_WITHDRAW, 69, VaultWithdraw, TRANSACTION(ttVAULT_CLAWBACK, 70, VaultClawback, Delegation::delegatable, featureSingleAssetVault, - mayDeleteMPT, + mayDeleteMPT | mustModifyVault, ({ {sfVaultID, soeREQUIRED}, {sfHolder, soeREQUIRED}, diff --git a/src/test/app/Invariants_test.cpp b/src/test/app/Invariants_test.cpp index c91149b2f7..8781eee72b 100644 --- a/src/test/app/Invariants_test.cpp +++ b/src/test/app/Invariants_test.cpp @@ -24,9 +24,18 @@ #include #include +#include #include +#include +#include #include +#include +#include #include +#include +#include +#include +#include #include @@ -66,8 +75,9 @@ class Invariants_test : public beast::unit_test::suite * checker. * @preclose See "Preclose" above. Note that @preclose runs *before* * @precheck, but is the last parameter for historical reasons - * + * @setTxAccount optionally set to add sfAccount to tx (either A1 or A2) */ + enum class TxAccount : int { None = 0, A1, A2 }; void doInvariantCheck( std::vector const& expect_logs, @@ -76,7 +86,8 @@ class Invariants_test : public beast::unit_test::suite STTx tx = STTx{ttACCOUNT_SET, [](STObject&) {}}, std::initializer_list ters = {tecINVARIANT_FAILED, tefINVARIANT_FAILED}, - Preclose const& preclose = {}) + Preclose const& preclose = {}, + TxAccount setTxAccount = TxAccount::None) { using namespace test::jtx; FeatureBitset amendments = testable_amendments() | @@ -93,6 +104,9 @@ class Invariants_test : public beast::unit_test::suite OpenView ov{*env.current()}; test::StreamSink sink{beast::severities::kWarning}; beast::Journal jlog{sink}; + if (setTxAccount != TxAccount::None) + tx.setAccountID( + sfAccount, setTxAccount == TxAccount::A1 ? A1.id() : A2.id()); ApplyContext ac{ env.app(), ov, @@ -117,12 +131,13 @@ class Invariants_test : public beast::unit_test::suite BEAST_EXPECT( messages.starts_with("Invariant failed:") || messages.starts_with("Transaction caused an exception")); + // std::cerr << messages << '\n'; for (auto const& m : expect_logs) { if (messages.find(m) == std::string::npos) { // uncomment if you want to log the invariant failure - // message log << " --> " << m << std::endl; + // std::cerr << " --> " << m << std::endl; fail(); } } @@ -1726,6 +1741,1690 @@ class Invariants_test : public beast::unit_test::suite {tecINVARIANT_FAILED, tecINVARIANT_FAILED}); } + void + testVault() + { + using namespace test::jtx; + + struct AccountAmount + { + AccountID account; + int amount; + }; + struct Adjustements + { + std::optional assetsTotal = {}; + std::optional assetsAvailable = {}; + std::optional lossUnrealized = {}; + std::optional assetsMaximum = {}; + std::optional sharesTotal = {}; + std::optional vaultAssets = {}; + std::optional accountAssets = {}; + std::optional accountShares = {}; + }; + auto constexpr adjust = [&](ApplyView& ac, + ripple::Keylet keylet, + Adjustements args) { + auto sleVault = ac.peek(keylet); + if (!sleVault) + return false; + + auto const mptIssuanceID = (*sleVault)[sfShareMPTID]; + auto sleShares = ac.peek(keylet::mptIssuance(mptIssuanceID)); + if (!sleShares) + return false; + + // These two fields are adjusted in absolute terms + if (args.lossUnrealized) + (*sleVault)[sfLossUnrealized] = *args.lossUnrealized; + if (args.assetsMaximum) + (*sleVault)[sfAssetsMaximum] = *args.assetsMaximum; + + // Remaining fields are adjusted in terms of difference + if (args.assetsTotal) + (*sleVault)[sfAssetsTotal] = + *(*sleVault)[sfAssetsTotal] + *args.assetsTotal; + if (args.assetsAvailable) + (*sleVault)[sfAssetsAvailable] = + *(*sleVault)[sfAssetsAvailable] + *args.assetsAvailable; + ac.update(sleVault); + + if (args.sharesTotal) + (*sleShares)[sfOutstandingAmount] = + *(*sleShares)[sfOutstandingAmount] + *args.sharesTotal; + ac.update(sleShares); + + auto const assets = *(*sleVault)[sfAsset]; + auto const pseudoId = *(*sleVault)[sfAccount]; + if (args.vaultAssets) + { + if (assets.native()) + { + auto slePseudoAccount = ac.peek(keylet::account(pseudoId)); + if (!slePseudoAccount) + return false; + (*slePseudoAccount)[sfBalance] = + *(*slePseudoAccount)[sfBalance] + *args.vaultAssets; + ac.update(slePseudoAccount); + } + else if (assets.holds()) + { + auto const mptId = assets.get().getMptID(); + auto sleMPToken = ac.peek(keylet::mptoken(mptId, pseudoId)); + if (!sleMPToken) + return false; + (*sleMPToken)[sfMPTAmount] = + *(*sleMPToken)[sfMPTAmount] + *args.vaultAssets; + ac.update(sleMPToken); + } + else + return false; // Not supporting testing with IOU + } + + if (args.accountAssets) + { + auto const& pair = *args.accountAssets; + if (assets.native()) + { + auto sleAccount = ac.peek(keylet::account(pair.account)); + if (!sleAccount) + return false; + (*sleAccount)[sfBalance] = + *(*sleAccount)[sfBalance] + pair.amount; + ac.update(sleAccount); + } + else if (assets.holds()) + { + auto const mptID = assets.get().getMptID(); + auto sleMPToken = + ac.peek(keylet::mptoken(mptID, pair.account)); + if (!sleMPToken) + return false; + (*sleMPToken)[sfMPTAmount] = + *(*sleMPToken)[sfMPTAmount] + pair.amount; + ac.update(sleMPToken); + } + else + return false; // Not supporting testing with IOU + } + + if (args.accountShares) + { + auto const& pair = *args.accountShares; + auto sleMPToken = + ac.peek(keylet::mptoken(mptIssuanceID, pair.account)); + if (!sleMPToken) + return false; + (*sleMPToken)[sfMPTAmount] = + *(*sleMPToken)[sfMPTAmount] + pair.amount; + ac.update(sleMPToken); + } + return true; + }; + + constexpr auto args = + [](AccountID id, int adjustement, auto fn) -> Adjustements { + Adjustements sample = { + .assetsTotal = adjustement, + .assetsAvailable = adjustement, + .lossUnrealized = 0, + .sharesTotal = adjustement, + .vaultAssets = adjustement, + .accountAssets = // + AccountAmount{id, -adjustement}, + .accountShares = // + AccountAmount{id, adjustement}}; + fn(sample); + return sample; + }; + + Account A3{"A3"}; + Account A4{"A4"}; + auto const precloseXrp = + [&](Account const& A1, Account const& A2, Env& env) -> bool { + env.fund(XRP(1000), A3, A4); + Vault vault{env}; + auto [tx, keylet] = + vault.create({.owner = A1, .asset = xrpIssue()}); + env(tx); + env(vault.deposit( + {.depositor = A1, .id = keylet.key, .amount = XRP(10)})); + env(vault.deposit( + {.depositor = A2, .id = keylet.key, .amount = XRP(10)})); + env(vault.deposit( + {.depositor = A3, .id = keylet.key, .amount = XRP(10)})); + return true; + }; + + testcase << "Vault general checks"; + doInvariantCheck( + {"vault deletion succeeded without deleting a vault"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + auto sleVault = ac.view().peek(keylet); + if (!sleVault) + return false; + ac.view().update(sleVault); + return true; + }, + XRPAmount{}, + STTx{ttVAULT_DELETE, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + [&](Account const& A1, Account const& A2, Env& env) { + Vault vault{env}; + auto [tx, _] = vault.create({.owner = A1, .asset = xrpIssue()}); + env(tx); + return true; + }); + + doInvariantCheck( + {"vault updated by a wrong transaction type"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + auto sleVault = ac.view().peek(keylet); + if (!sleVault) + return false; + ac.view().erase(sleVault); + return true; + }, + XRPAmount{}, + STTx{ttPAYMENT, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + [&](Account const& A1, Account const& A2, Env& env) { + Vault vault{env}; + auto [tx, _] = vault.create({.owner = A1, .asset = xrpIssue()}); + env(tx); + return true; + }); + + doInvariantCheck( + {"vault updated by a wrong transaction type"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + auto sleVault = ac.view().peek(keylet); + if (!sleVault) + return false; + ac.view().update(sleVault); + return true; + }, + XRPAmount{}, + STTx{ttPAYMENT, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + [&](Account const& A1, Account const& A2, Env& env) { + Vault vault{env}; + auto [tx, _] = vault.create({.owner = A1, .asset = xrpIssue()}); + env(tx); + return true; + }); + + doInvariantCheck( + {"vault updated by a wrong transaction type"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const sequence = ac.view().seq(); + auto const vaultKeylet = keylet::vault(A1.id(), sequence); + auto sleVault = std::make_shared(vaultKeylet); + auto const vaultPage = ac.view().dirInsert( + keylet::ownerDir(A1.id()), + sleVault->key(), + describeOwnerDir(A1.id())); + sleVault->setFieldU64(sfOwnerNode, *vaultPage); + ac.view().insert(sleVault); + return true; + }, + XRPAmount{}, + STTx{ttPAYMENT, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}); + + doInvariantCheck( + {"vault deleted by a wrong transaction type"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + auto sleVault = ac.view().peek(keylet); + if (!sleVault) + return false; + ac.view().erase(sleVault); + return true; + }, + XRPAmount{}, + STTx{ttVAULT_SET, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + [&](Account const& A1, Account const& A2, Env& env) { + Vault vault{env}; + auto [tx, _] = vault.create({.owner = A1, .asset = xrpIssue()}); + env(tx); + return true; + }); + + doInvariantCheck( + {"vault operation updated more than single vault"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + auto sleVault = ac.view().peek(keylet); + if (!sleVault) + return false; + ac.view().erase(sleVault); + } + { + auto const keylet = keylet::vault(A2.id(), ac.view().seq()); + auto sleVault = ac.view().peek(keylet); + if (!sleVault) + return false; + ac.view().erase(sleVault); + } + return true; + }, + XRPAmount{}, + STTx{ttVAULT_DELETE, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + [&](Account const& A1, Account const& A2, Env& env) { + Vault vault{env}; + { + auto [tx, _] = + vault.create({.owner = A1, .asset = xrpIssue()}); + env(tx); + } + { + auto [tx, _] = + vault.create({.owner = A2, .asset = xrpIssue()}); + env(tx); + } + return true; + }); + + doInvariantCheck( + {"vault operation updated more than single vault"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const sequence = ac.view().seq(); + auto const insertVault = [&](Account const A) { + auto const vaultKeylet = keylet::vault(A.id(), sequence); + auto sleVault = std::make_shared(vaultKeylet); + auto const vaultPage = ac.view().dirInsert( + keylet::ownerDir(A.id()), + sleVault->key(), + describeOwnerDir(A.id())); + sleVault->setFieldU64(sfOwnerNode, *vaultPage); + ac.view().insert(sleVault); + }; + insertVault(A1); + insertVault(A2); + return true; + }, + XRPAmount{}, + STTx{ttVAULT_CREATE, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}); + + doInvariantCheck( + {"deleted vault must also delete shares"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + auto sleVault = ac.view().peek(keylet); + if (!sleVault) + return false; + ac.view().erase(sleVault); + return true; + }, + XRPAmount{}, + STTx{ttVAULT_DELETE, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + [&](Account const& A1, Account const& A2, Env& env) { + Vault vault{env}; + auto [tx, _] = vault.create({.owner = A1, .asset = xrpIssue()}); + env(tx); + return true; + }); + + doInvariantCheck( + {"deleted vault must have no shares outstanding", + "deleted vault must have no assets outstanding", + "deleted vault must have no assets available"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + auto sleVault = ac.view().peek(keylet); + if (!sleVault) + return false; + auto sleShares = ac.view().peek( + keylet::mptIssuance((*sleVault)[sfShareMPTID])); + if (!sleShares) + return false; + ac.view().erase(sleVault); + ac.view().erase(sleShares); + return true; + }, + XRPAmount{}, + STTx{ttVAULT_DELETE, [](STObject&) {}}, + {tecINVARIANT_FAILED, tefINVARIANT_FAILED}, + [&](Account const& A1, Account const& A2, Env& env) { + Vault vault{env}; + auto [tx, keylet] = + vault.create({.owner = A1, .asset = xrpIssue()}); + env(tx); + env(vault.deposit( + {.depositor = A1, .id = keylet.key, .amount = XRP(10)})); + return true; + }); + + doInvariantCheck( + {"vault operation succeeded without modifying a vault"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + auto sleVault = ac.view().peek(keylet); + if (!sleVault) + return false; + auto sleShares = ac.view().peek( + keylet::mptIssuance((*sleVault)[sfShareMPTID])); + if (!sleShares) + return false; + // Note, such an "orphaned" update of MPT issuance attached to a + // vault is invalid; ttVAULT_SET must also update Vault object. + sleShares->setFieldH256(sfDomainID, uint256(13)); + ac.view().update(sleShares); + return true; + }, + XRPAmount{}, + STTx{ttVAULT_SET, [](STObject& tx) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp, + TxAccount::A2); + + doInvariantCheck( + {"vault operation succeeded without modifying a vault"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + return true; + }, + XRPAmount{}, + STTx{ttVAULT_CREATE, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + [&](Account const& A1, Account const& A2, Env& env) { + Vault vault{env}; + auto [tx, _] = vault.create({.owner = A1, .asset = xrpIssue()}); + env(tx); + return true; + }); + + doInvariantCheck( + {"vault operation succeeded without modifying a vault"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + return true; + }, + XRPAmount{}, + STTx{ttVAULT_DEPOSIT, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + [&](Account const& A1, Account const& A2, Env& env) { + Vault vault{env}; + auto [tx, _] = vault.create({.owner = A1, .asset = xrpIssue()}); + env(tx); + return true; + }); + + doInvariantCheck( + {"vault operation succeeded without modifying a vault"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + return true; + }, + XRPAmount{}, + STTx{ttVAULT_WITHDRAW, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + [&](Account const& A1, Account const& A2, Env& env) { + Vault vault{env}; + auto [tx, _] = vault.create({.owner = A1, .asset = xrpIssue()}); + env(tx); + return true; + }); + + doInvariantCheck( + {"vault operation succeeded without modifying a vault"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + return true; + }, + XRPAmount{}, + STTx{ttVAULT_CLAWBACK, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + [&](Account const& A1, Account const& A2, Env& env) { + Vault vault{env}; + auto [tx, _] = vault.create({.owner = A1, .asset = xrpIssue()}); + env(tx); + return true; + }); + + doInvariantCheck( + {"vault operation succeeded without modifying a vault"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + return true; + }, + XRPAmount{}, + STTx{ttVAULT_DELETE, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + [&](Account const& A1, Account const& A2, Env& env) { + Vault vault{env}; + auto [tx, _] = vault.create({.owner = A1, .asset = xrpIssue()}); + env(tx); + return true; + }); + + doInvariantCheck( + {"updated vault must have shares"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + auto sleVault = ac.view().peek(keylet); + if (!sleVault) + return false; + (*sleVault)[sfAssetsMaximum] = 200; + ac.view().update(sleVault); + + auto sleShares = ac.view().peek( + keylet::mptIssuance((*sleVault)[sfShareMPTID])); + if (!sleShares) + return false; + ac.view().erase(sleShares); + return true; + }, + XRPAmount{}, + STTx{ttVAULT_SET, [](STObject&) {}}, + {tecINVARIANT_FAILED, tefINVARIANT_FAILED}, + [&](Account const& A1, Account const& A2, Env& env) { + Vault vault{env}; + auto [tx, _] = vault.create({.owner = A1, .asset = xrpIssue()}); + env(tx); + return true; + }); + + doInvariantCheck( + {"vault operation succeeded without updating shares", + "assets available must not be greater than assets outstanding"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + auto sleVault = ac.view().peek(keylet); + if (!sleVault) + return false; + (*sleVault)[sfAssetsTotal] = 9; + ac.view().update(sleVault); + return true; + }, + XRPAmount{}, + STTx{ttVAULT_WITHDRAW, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + [&](Account const& A1, Account const& A2, Env& env) { + Vault vault{env}; + auto [tx, keylet] = + vault.create({.owner = A1, .asset = xrpIssue()}); + env(tx); + env(vault.deposit( + {.depositor = A1, .id = keylet.key, .amount = XRP(10)})); + return true; + }); + + doInvariantCheck( + {"set must not change assets outstanding", + "set must not change assets available", + "set must not change shares outstanding", + "set must not change vault balance", + "assets available must be positive", + "assets available must not be greater than assets outstanding", + "assets outstanding must be positive"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + auto sleVault = ac.view().peek(keylet); + if (!sleVault) + return false; + auto slePseudoAccount = + ac.view().peek(keylet::account(*(*sleVault)[sfAccount])); + if (!slePseudoAccount) + return false; + (*slePseudoAccount)[sfBalance] = + *(*slePseudoAccount)[sfBalance] - 10; + ac.view().update(slePseudoAccount); + + // Move 10 drops to A4 to enforce total XRP balance + auto sleA4 = ac.view().peek(keylet::account(A4.id())); + if (!sleA4) + return false; + (*sleA4)[sfBalance] = *(*sleA4)[sfBalance] + 10; + ac.view().update(sleA4); + + return adjust( + ac.view(), + keylet, + args(A2.id(), 0, [&](Adjustements& sample) { + sample.assetsAvailable = (DROPS_PER_XRP * -100).value(); + sample.assetsTotal = (DROPS_PER_XRP * -200).value(); + sample.sharesTotal = -1; + })); + }, + XRPAmount{}, + STTx{ttVAULT_SET, [](STObject& tx) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp, + TxAccount::A2); + + doInvariantCheck( + {"violation of vault immutable data"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + auto sleVault = ac.view().peek(keylet); + if (!sleVault) + return false; + sleVault->setFieldIssue( + sfAsset, STIssue{sfAsset, MPTIssue(MPTID(42))}); + ac.view().update(sleVault); + return true; + }, + XRPAmount{}, + STTx{ttVAULT_SET, [](STObject& tx) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp); + + doInvariantCheck( + {"violation of vault immutable data"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + auto sleVault = ac.view().peek(keylet); + if (!sleVault) + return false; + sleVault->setAccountID(sfAccount, A2.id()); + ac.view().update(sleVault); + return true; + }, + XRPAmount{}, + STTx{ttVAULT_SET, [](STObject& tx) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp); + + doInvariantCheck( + {"violation of vault immutable data"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + auto sleVault = ac.view().peek(keylet); + if (!sleVault) + return false; + (*sleVault)[sfShareMPTID] = MPTID(42); + ac.view().update(sleVault); + return true; + }, + XRPAmount{}, + STTx{ttVAULT_SET, [](STObject& tx) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp); + + doInvariantCheck( + {"vault transaction must not change loss unrealized", + "set must not change assets outstanding"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + return adjust( + ac.view(), + keylet, + args(A2.id(), 0, [&](Adjustements& sample) { + sample.lossUnrealized = 13; + sample.assetsTotal = 20; + })); + }, + XRPAmount{}, + STTx{ttVAULT_SET, [](STObject& tx) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp, + TxAccount::A2); + + doInvariantCheck( + {"loss unrealized must not exceed the difference " + "between assets outstanding and available", + "vault transaction must not change loss unrealized"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + return adjust( + ac.view(), + keylet, + args(A2.id(), 100, [&](Adjustements& sample) { + sample.lossUnrealized = 13; + })); + }, + XRPAmount{}, + STTx{ + ttVAULT_DEPOSIT, + [](STObject& tx) { + tx.setFieldAmount(sfAmount, XRPAmount(200)); + }}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp, + TxAccount::A2); + + doInvariantCheck( + {"set assets outstanding must not exceed assets maximum"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + return adjust( + ac.view(), + keylet, + args(A2.id(), 0, [&](Adjustements& sample) { + sample.assetsMaximum = 1; + })); + }, + XRPAmount{}, + STTx{ttVAULT_SET, [](STObject& tx) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp, + TxAccount::A2); + + doInvariantCheck( + {"assets maximum must be positive"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + return adjust( + ac.view(), + keylet, + args(A2.id(), 0, [&](Adjustements& sample) { + sample.assetsMaximum = -1; + })); + }, + XRPAmount{}, + STTx{ttVAULT_SET, [](STObject& tx) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp, + TxAccount::A2); + + doInvariantCheck( + {"set must not change shares outstanding", + "updated zero sized vault must have no assets outstanding", + "updated zero sized vault must have no assets available"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + auto sleVault = ac.view().peek(keylet); + if (!sleVault) + return false; + ac.view().update(sleVault); + auto sleShares = ac.view().peek( + keylet::mptIssuance((*sleVault)[sfShareMPTID])); + if (!sleShares) + return false; + (*sleShares)[sfOutstandingAmount] = 0; + ac.view().update(sleShares); + return true; + }, + XRPAmount{}, + STTx{ttVAULT_SET, [](STObject& tx) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp, + TxAccount::A2); + + doInvariantCheck( + {"updated shares must not exceed maximum"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + auto sleVault = ac.view().peek(keylet); + if (!sleVault) + return false; + auto sleShares = ac.view().peek( + keylet::mptIssuance((*sleVault)[sfShareMPTID])); + if (!sleShares) + return false; + (*sleShares)[sfMaximumAmount] = 10; + ac.view().update(sleShares); + + return adjust( + ac.view(), keylet, args(A2.id(), 10, [](Adjustements&) {})); + }, + XRPAmount{}, + STTx{ttVAULT_DEPOSIT, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp, + TxAccount::A2); + + doInvariantCheck( + {"updated shares must not exceed maximum"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + adjust( + ac.view(), keylet, args(A2.id(), 10, [](Adjustements&) {})); + + auto sleVault = ac.view().peek(keylet); + if (!sleVault) + return false; + auto sleShares = ac.view().peek( + keylet::mptIssuance((*sleVault)[sfShareMPTID])); + if (!sleShares) + return false; + (*sleShares)[sfOutstandingAmount] = maxMPTokenAmount + 1; + ac.view().update(sleShares); + return true; + }, + XRPAmount{}, + STTx{ttVAULT_DEPOSIT, [](STObject&) {}}, + {tecINVARIANT_FAILED, tefINVARIANT_FAILED}, + precloseXrp, + TxAccount::A2); + + testcase << "Vault create"; + doInvariantCheck( + { + "created vault must be empty", + "updated zero sized vault must have no assets outstanding", + "create operation must not have updated a vault", + }, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + auto sleVault = ac.view().peek(keylet); + if (!sleVault) + return false; + (*sleVault)[sfAssetsTotal] = 9; + ac.view().update(sleVault); + return true; + }, + XRPAmount{}, + STTx{ttVAULT_CREATE, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + [&](Account const& A1, Account const& A2, Env& env) { + Vault vault{env}; + auto [tx, keylet] = + vault.create({.owner = A1, .asset = xrpIssue()}); + env(tx); + return true; + }); + + doInvariantCheck( + { + "created vault must be empty", + "updated zero sized vault must have no assets available", + "assets available must not be greater than assets outstanding", + "create operation must not have updated a vault", + }, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + auto sleVault = ac.view().peek(keylet); + if (!sleVault) + return false; + (*sleVault)[sfAssetsAvailable] = 9; + ac.view().update(sleVault); + return true; + }, + XRPAmount{}, + STTx{ttVAULT_CREATE, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + [&](Account const& A1, Account const& A2, Env& env) { + Vault vault{env}; + auto [tx, keylet] = + vault.create({.owner = A1, .asset = xrpIssue()}); + env(tx); + return true; + }); + + doInvariantCheck( + { + "created vault must be empty", + "loss unrealized must not exceed the difference between assets " + "outstanding and available", + "vault transaction must not change loss unrealized", + "create operation must not have updated a vault", + }, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + auto sleVault = ac.view().peek(keylet); + if (!sleVault) + return false; + (*sleVault)[sfLossUnrealized] = 1; + ac.view().update(sleVault); + return true; + }, + XRPAmount{}, + STTx{ttVAULT_CREATE, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + [&](Account const& A1, Account const& A2, Env& env) { + Vault vault{env}; + auto [tx, keylet] = + vault.create({.owner = A1, .asset = xrpIssue()}); + env(tx); + return true; + }); + + doInvariantCheck( + { + "created vault must be empty", + "create operation must not have updated a vault", + }, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + auto sleVault = ac.view().peek(keylet); + if (!sleVault) + return false; + auto sleShares = ac.view().peek( + keylet::mptIssuance((*sleVault)[sfShareMPTID])); + if (!sleShares) + return false; + ac.view().update(sleVault); + (*sleShares)[sfOutstandingAmount] = 9; + ac.view().update(sleShares); + return true; + }, + XRPAmount{}, + STTx{ttVAULT_CREATE, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + [&](Account const& A1, Account const& A2, Env& env) { + Vault vault{env}; + auto [tx, keylet] = + vault.create({.owner = A1, .asset = xrpIssue()}); + env(tx); + return true; + }); + + doInvariantCheck( + { + "assets maximum must be positive", + "create operation must not have updated a vault", + }, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + auto sleVault = ac.view().peek(keylet); + if (!sleVault) + return false; + (*sleVault)[sfAssetsMaximum] = Number(-1); + ac.view().update(sleVault); + return true; + }, + XRPAmount{}, + STTx{ttVAULT_CREATE, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + [&](Account const& A1, Account const& A2, Env& env) { + Vault vault{env}; + auto [tx, keylet] = + vault.create({.owner = A1, .asset = xrpIssue()}); + env(tx); + return true; + }); + + doInvariantCheck( + {"create operation must not have updated a vault", + "shares issuer and vault pseudo-account must be the same", + "shares issuer must be a pseudo-account", + "shares issuer pseudo-account must point back to the vault"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + auto sleVault = ac.view().peek(keylet); + if (!sleVault) + return false; + auto sleShares = ac.view().peek( + keylet::mptIssuance((*sleVault)[sfShareMPTID])); + if (!sleShares) + return false; + ac.view().update(sleVault); + (*sleShares)[sfIssuer] = A1.id(); + ac.view().update(sleShares); + return true; + }, + XRPAmount{}, + STTx{ttVAULT_CREATE, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + [&](Account const& A1, Account const& A2, Env& env) { + Vault vault{env}; + auto [tx, keylet] = + vault.create({.owner = A1, .asset = xrpIssue()}); + env(tx); + return true; + }); + + doInvariantCheck( + {"vault created by a wrong transaction type", + "account root created illegally"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + // The code below will create a valid vault with (almost) all + // the invariants holding. Except one: it is created by the + // wrong transaction type. + auto const sequence = ac.view().seq(); + auto const vaultKeylet = keylet::vault(A1.id(), sequence); + auto sleVault = std::make_shared(vaultKeylet); + auto const vaultPage = ac.view().dirInsert( + keylet::ownerDir(A1.id()), + sleVault->key(), + describeOwnerDir(A1.id())); + sleVault->setFieldU64(sfOwnerNode, *vaultPage); + + auto pseudoId = + pseudoAccountAddress(ac.view(), vaultKeylet.key); + // Create pseudo-account. + auto sleAccount = + std::make_shared(keylet::account(pseudoId)); + sleAccount->setAccountID(sfAccount, pseudoId); + sleAccount->setFieldAmount(sfBalance, STAmount{}); + std::uint32_t const seqno = // + ac.view().rules().enabled(featureSingleAssetVault) // + ? 0 // + : sequence; + sleAccount->setFieldU32(sfSequence, seqno); + sleAccount->setFieldU32( + sfFlags, + lsfDisableMaster | lsfDefaultRipple | lsfDepositAuth); + sleAccount->setFieldH256(sfVaultID, vaultKeylet.key); + ac.view().insert(sleAccount); + + auto const sharesMptId = makeMptID(sequence, pseudoId); + auto const sharesKeylet = keylet::mptIssuance(sharesMptId); + auto sleShares = std::make_shared(sharesKeylet); + auto const sharesPage = ac.view().dirInsert( + keylet::ownerDir(pseudoId), + sharesKeylet, + describeOwnerDir(pseudoId)); + sleShares->setFieldU64(sfOwnerNode, *sharesPage); + + sleShares->at(sfFlags) = 0; + sleShares->at(sfIssuer) = pseudoId; + sleShares->at(sfOutstandingAmount) = 0; + sleShares->at(sfSequence) = sequence; + + sleVault->at(sfAccount) = pseudoId; + sleVault->at(sfFlags) = 0; + sleVault->at(sfSequence) = sequence; + sleVault->at(sfOwner) = A1.id(); + sleVault->at(sfAssetsTotal) = Number(0); + sleVault->at(sfAssetsAvailable) = Number(0); + sleVault->at(sfLossUnrealized) = Number(0); + sleVault->at(sfShareMPTID) = sharesMptId; + sleVault->at(sfWithdrawalPolicy) = + vaultStrategyFirstComeFirstServe; + + ac.view().insert(sleVault); + ac.view().insert(sleShares); + return true; + }, + XRPAmount{}, + STTx{ttVAULT_SET, [](STObject&) {}}, + {tecINVARIANT_FAILED, tefINVARIANT_FAILED}); + + doInvariantCheck( + {"shares issuer and vault pseudo-account must be the same", + "shares issuer pseudo-account must point back to the vault"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const sequence = ac.view().seq(); + auto const vaultKeylet = keylet::vault(A1.id(), sequence); + auto sleVault = std::make_shared(vaultKeylet); + auto const vaultPage = ac.view().dirInsert( + keylet::ownerDir(A1.id()), + sleVault->key(), + describeOwnerDir(A1.id())); + sleVault->setFieldU64(sfOwnerNode, *vaultPage); + + auto pseudoId = + pseudoAccountAddress(ac.view(), vaultKeylet.key); + // Create pseudo-account. + auto sleAccount = + std::make_shared(keylet::account(pseudoId)); + sleAccount->setAccountID(sfAccount, pseudoId); + sleAccount->setFieldAmount(sfBalance, STAmount{}); + std::uint32_t const seqno = // + ac.view().rules().enabled(featureSingleAssetVault) // + ? 0 // + : sequence; + sleAccount->setFieldU32(sfSequence, seqno); + sleAccount->setFieldU32( + sfFlags, + lsfDisableMaster | lsfDefaultRipple | lsfDepositAuth); + // sleAccount->setFieldH256(sfVaultID, vaultKeylet.key); + // Setting wrong vault key + sleAccount->setFieldH256(sfVaultID, uint256(42)); + ac.view().insert(sleAccount); + + auto const sharesMptId = makeMptID(sequence, pseudoId); + auto const sharesKeylet = keylet::mptIssuance(sharesMptId); + auto sleShares = std::make_shared(sharesKeylet); + auto const sharesPage = ac.view().dirInsert( + keylet::ownerDir(pseudoId), + sharesKeylet, + describeOwnerDir(pseudoId)); + sleShares->setFieldU64(sfOwnerNode, *sharesPage); + + sleShares->at(sfFlags) = 0; + sleShares->at(sfIssuer) = pseudoId; + sleShares->at(sfOutstandingAmount) = 0; + sleShares->at(sfSequence) = sequence; + + // sleVault->at(sfAccount) = pseudoId; + // Setting wrong pseudo acocunt ID + sleVault->at(sfAccount) = A2.id(); + sleVault->at(sfFlags) = 0; + sleVault->at(sfSequence) = sequence; + sleVault->at(sfOwner) = A1.id(); + sleVault->at(sfAssetsTotal) = Number(0); + sleVault->at(sfAssetsAvailable) = Number(0); + sleVault->at(sfLossUnrealized) = Number(0); + sleVault->at(sfShareMPTID) = sharesMptId; + sleVault->at(sfWithdrawalPolicy) = + vaultStrategyFirstComeFirstServe; + + ac.view().insert(sleVault); + ac.view().insert(sleShares); + return true; + }, + XRPAmount{}, + STTx{ttVAULT_CREATE, [](STObject&) {}}, + {tecINVARIANT_FAILED, tefINVARIANT_FAILED}); + + doInvariantCheck( + {"shares issuer and vault pseudo-account must be the same", + "shares issuer must exist"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const sequence = ac.view().seq(); + auto const vaultKeylet = keylet::vault(A1.id(), sequence); + auto sleVault = std::make_shared(vaultKeylet); + auto const vaultPage = ac.view().dirInsert( + keylet::ownerDir(A1.id()), + sleVault->key(), + describeOwnerDir(A1.id())); + sleVault->setFieldU64(sfOwnerNode, *vaultPage); + + auto const sharesMptId = makeMptID(sequence, A2.id()); + auto const sharesKeylet = keylet::mptIssuance(sharesMptId); + auto sleShares = std::make_shared(sharesKeylet); + auto const sharesPage = ac.view().dirInsert( + keylet::ownerDir(A2.id()), + sharesKeylet, + describeOwnerDir(A2.id())); + sleShares->setFieldU64(sfOwnerNode, *sharesPage); + + sleShares->at(sfFlags) = 0; + // Setting wrong pseudo acocunt ID + sleShares->at(sfIssuer) = AccountID(uint160(42)); + sleShares->at(sfOutstandingAmount) = 0; + sleShares->at(sfSequence) = sequence; + + sleVault->at(sfAccount) = A2.id(); + sleVault->at(sfFlags) = 0; + sleVault->at(sfSequence) = sequence; + sleVault->at(sfOwner) = A1.id(); + sleVault->at(sfAssetsTotal) = Number(0); + sleVault->at(sfAssetsAvailable) = Number(0); + sleVault->at(sfLossUnrealized) = Number(0); + sleVault->at(sfShareMPTID) = sharesMptId; + sleVault->at(sfWithdrawalPolicy) = + vaultStrategyFirstComeFirstServe; + + ac.view().insert(sleVault); + ac.view().insert(sleShares); + return true; + }, + XRPAmount{}, + STTx{ttVAULT_CREATE, [](STObject&) {}}, + {tecINVARIANT_FAILED, tefINVARIANT_FAILED}); + + testcase << "Vault deposit"; + doInvariantCheck( + {"deposit must change vault balance"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + return adjust( + ac.view(), + keylet, + args(A2.id(), 0, [&](Adjustements& sample) {})); + }, + XRPAmount{}, + STTx{ttVAULT_DEPOSIT, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp); + + doInvariantCheck( + {"deposit assets outstanding must not exceed assets maximum"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + return adjust( + ac.view(), + keylet, + args(A2.id(), 200, [&](Adjustements& sample) { + sample.assetsMaximum = 1; + })); + }, + XRPAmount{}, + STTx{ + ttVAULT_DEPOSIT, + [](STObject& tx) { + tx.setFieldAmount(sfAmount, XRPAmount(200)); + }}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp, + TxAccount::A2); + + // This really convoluted unit tests makes the zero balance on the + // depositor, by sending them the same amount as the transaction fee. + // The operation makes no sense, but the defensive check in + // ValidVault::finalize is otherwise impossible to trigger. + doInvariantCheck( + {"deposit must increase vault balance", + "deposit must change depositor balance"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + + // Move 10 drops to A4 to enforce total XRP balance + auto sleA4 = ac.view().peek(keylet::account(A4.id())); + if (!sleA4) + return false; + (*sleA4)[sfBalance] = *(*sleA4)[sfBalance] + 10; + ac.view().update(sleA4); + + return adjust( + ac.view(), + keylet, + args(A3.id(), -10, [&](Adjustements& sample) { + sample.accountAssets->amount = -100; + })); + }, + XRPAmount{100}, + STTx{ + ttVAULT_DEPOSIT, + [&](STObject& tx) { + tx[sfFee] = XRPAmount(100); + tx[sfAccount] = A3.id(); + }}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp); + + doInvariantCheck( + {"deposit must increase vault balance", + "deposit must decrease depositor balance", + "deposit must change vault and depositor balance by equal amount", + "deposit and assets outstanding must add up", + "deposit and assets available must add up"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + + // Move 10 drops from A2 to A3 to enforce total XRP balance + auto sleA3 = ac.view().peek(keylet::account(A3.id())); + if (!sleA3) + return false; + (*sleA3)[sfBalance] = *(*sleA3)[sfBalance] + 10; + ac.view().update(sleA3); + + return adjust( + ac.view(), + keylet, + args(A2.id(), 10, [&](Adjustements& sample) { + sample.vaultAssets = -20; + sample.accountAssets->amount = 10; + })); + }, + XRPAmount{}, + STTx{ + ttVAULT_DEPOSIT, + [](STObject& tx) { tx[sfAmount] = XRPAmount(10); }}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp, + TxAccount::A2); + + doInvariantCheck( + {"deposit must change depositor balance"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + + // Move 10 drops from A3 to vault to enforce total XRP balance + auto sleA3 = ac.view().peek(keylet::account(A3.id())); + if (!sleA3) + return false; + (*sleA3)[sfBalance] = *(*sleA3)[sfBalance] - 10; + ac.view().update(sleA3); + + return adjust( + ac.view(), + keylet, + args(A2.id(), 10, [&](Adjustements& sample) { + sample.accountAssets->amount = 0; + })); + }, + XRPAmount{}, + STTx{ + ttVAULT_DEPOSIT, + [](STObject& tx) { tx[sfAmount] = XRPAmount(10); }}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp, + TxAccount::A2); + + doInvariantCheck( + {"deposit must change depositor shares"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + return adjust( + ac.view(), + keylet, + args(A2.id(), 10, [&](Adjustements& sample) { + sample.accountShares->amount = 0; + })); + }, + XRPAmount{}, + STTx{ + ttVAULT_DEPOSIT, + [](STObject& tx) { tx[sfAmount] = XRPAmount(10); }}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp, + TxAccount::A2); + + doInvariantCheck( + {"deposit must change vault shares"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + return adjust( + ac.view(), + keylet, + args(A2.id(), 10, [&](Adjustements& sample) { + sample.sharesTotal = 0; + })); + }, + XRPAmount{}, + STTx{ + ttVAULT_DEPOSIT, + [](STObject& tx) { tx[sfAmount] = XRPAmount(10); }}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp, + TxAccount::A2); + + doInvariantCheck( + {"deposit must increase depositor shares", + "deposit must change depositor and vault shares by equal amount", + "deposit must not change vault balance by more than deposited " + "amount"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + return adjust( + ac.view(), + keylet, + args(A2.id(), 10, [&](Adjustements& sample) { + sample.accountShares->amount = -5; + sample.sharesTotal = -10; + })); + }, + XRPAmount{}, + STTx{ + ttVAULT_DEPOSIT, + [](STObject& tx) { tx[sfAmount] = XRPAmount(5); }}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp, + TxAccount::A2); + + doInvariantCheck( + {"deposit and assets outstanding must add up", + "deposit and assets available must add up"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + return adjust( + ac.view(), + keylet, + args(A2.id(), 10, [&](Adjustements& sample) { + sample.assetsTotal = 7; + sample.assetsAvailable = 7; + })); + }, + XRPAmount{}, + STTx{ + ttVAULT_DEPOSIT, + [](STObject& tx) { tx[sfAmount] = XRPAmount(10); }}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp, + TxAccount::A2); + + testcase << "Vault withdrawal"; + doInvariantCheck( + {"withdrawal must change vault balance"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + return adjust( + ac.view(), + keylet, + args(A2.id(), 0, [&](Adjustements& sample) {})); + }, + XRPAmount{}, + STTx{ttVAULT_WITHDRAW, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp); + + // Almost identical to the really convoluted test for deposit, where the + // depositor spends only the transaction fee. In case of withdrawal, + // this test is almost the same as normal withdrawal where the + // sfDestination would have been A4, but has been omitted. + doInvariantCheck( + {"withdrawal must change one destination balance"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + + // Move 10 drops to A4 to enforce total XRP balance + auto sleA4 = ac.view().peek(keylet::account(A4.id())); + if (!sleA4) + return false; + (*sleA4)[sfBalance] = *(*sleA4)[sfBalance] + 10; + ac.view().update(sleA4); + + return adjust( + ac.view(), + keylet, + args(A3.id(), -10, [&](Adjustements& sample) { + sample.accountAssets->amount = -100; + })); + }, + XRPAmount{100}, + STTx{ + ttVAULT_WITHDRAW, + [&](STObject& tx) { + tx[sfFee] = XRPAmount(100); + tx[sfAccount] = A3.id(); + // This commented out line causes the invariant violation. + // tx[sfDestination] = A4.id(); + }}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp); + + doInvariantCheck( + {"withdrawal must change vault and destination balance by " + "equal amount", + "withdrawal must decrease vault balance", + "withdrawal must increase destination balance", + "withdrawal and assets outstanding must add up", + "withdrawal and assets available must add up"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + + // Move 10 drops from A2 to A3 to enforce total XRP balance + auto sleA3 = ac.view().peek(keylet::account(A3.id())); + if (!sleA3) + return false; + (*sleA3)[sfBalance] = *(*sleA3)[sfBalance] + 10; + ac.view().update(sleA3); + + return adjust( + ac.view(), + keylet, + args(A2.id(), -10, [&](Adjustements& sample) { + sample.vaultAssets = 10; + sample.accountAssets->amount = -20; + })); + }, + XRPAmount{}, + STTx{ttVAULT_WITHDRAW, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp, + TxAccount::A2); + + doInvariantCheck( + {"withdrawal must change one destination balance"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + if (!adjust( + ac.view(), + keylet, + args(A2.id(), -10, [&](Adjustements& sample) { + *sample.vaultAssets -= 5; + }))) + return false; + auto sleA3 = ac.view().peek(keylet::account(A3.id())); + if (!sleA3) + return false; + (*sleA3)[sfBalance] = *(*sleA3)[sfBalance] + 5; + ac.view().update(sleA3); + return true; + }, + XRPAmount{}, + STTx{ + ttVAULT_WITHDRAW, + [&](STObject& tx) { tx.setAccountID(sfDestination, A3.id()); }}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp, + TxAccount::A2); + + doInvariantCheck( + {"withdrawal must change depositor shares"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + return adjust( + ac.view(), + keylet, + args(A2.id(), -10, [&](Adjustements& sample) { + sample.accountShares->amount = 0; + })); + }, + XRPAmount{}, + STTx{ttVAULT_WITHDRAW, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp, + TxAccount::A2); + + doInvariantCheck( + {"withdrawal must change vault shares"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + return adjust( + ac.view(), + keylet, + args(A2.id(), -10, [&](Adjustements& sample) { + sample.sharesTotal = 0; + })); + }, + XRPAmount{}, + STTx{ttVAULT_WITHDRAW, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp, + TxAccount::A2); + + doInvariantCheck( + {"withdrawal must decrease depositor shares", + "withdrawal must change depositor and vault shares by equal " + "amount"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + return adjust( + ac.view(), + keylet, + args(A2.id(), -10, [&](Adjustements& sample) { + sample.accountShares->amount = 5; + sample.sharesTotal = 10; + })); + }, + XRPAmount{}, + STTx{ttVAULT_WITHDRAW, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp, + TxAccount::A2); + + doInvariantCheck( + {"withdrawal and assets outstanding must add up", + "withdrawal and assets available must add up"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + return adjust( + ac.view(), + keylet, + args(A2.id(), -10, [&](Adjustements& sample) { + sample.assetsTotal = -15; + sample.assetsAvailable = -15; + })); + }, + XRPAmount{}, + STTx{ttVAULT_WITHDRAW, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp, + TxAccount::A2); + + auto const precloseMpt = + [&](Account const& A1, Account const& A2, Env& env) -> bool { + env.fund(XRP(1000), A3, A4); + + // Create MPT asset + { + Json::Value jv; + jv[sfAccount] = A3.human(); + jv[sfTransactionType] = jss::MPTokenIssuanceCreate; + jv[sfFlags] = tfMPTCanTransfer; + env(jv); + env.close(); + } + + auto const mptID = makeMptID(env.seq(A3) - 1, A3); + Asset asset = MPTIssue(mptID); + // Authorize A1 A2 A4 + { + Json::Value jv; + jv[sfAccount] = A1.human(); + jv[sfTransactionType] = jss::MPTokenAuthorize; + jv[sfMPTokenIssuanceID] = to_string(mptID); + env(jv); + jv[sfAccount] = A2.human(); + env(jv); + jv[sfAccount] = A4.human(); + env(jv); + + env.close(); + } + // Send tokens to A1 A2 A4 + { + env(pay(A3, A1, asset(1000))); + env(pay(A3, A2, asset(1000))); + env(pay(A3, A4, asset(1000))); + env.close(); + } + + Vault vault{env}; + auto [tx, keylet] = vault.create({.owner = A1, .asset = asset}); + env(tx); + env(vault.deposit( + {.depositor = A1, .id = keylet.key, .amount = asset(10)})); + env(vault.deposit( + {.depositor = A2, .id = keylet.key, .amount = asset(10)})); + env(vault.deposit( + {.depositor = A4, .id = keylet.key, .amount = asset(10)})); + return true; + }; + + doInvariantCheck( + {"withdrawal must decrease depositor shares", + "withdrawal must change depositor and vault shares by equal " + "amount"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq() - 2); + return adjust( + ac.view(), + keylet, + args(A2.id(), -10, [&](Adjustements& sample) { + sample.accountShares->amount = 5; + })); + }, + XRPAmount{}, + STTx{ + ttVAULT_WITHDRAW, + [&](STObject& tx) { tx[sfAccount] = A3.id(); }}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseMpt, + TxAccount::A2); + + testcase << "Vault clawback"; + doInvariantCheck( + {"clawback must change vault balance"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq() - 2); + return adjust( + ac.view(), + keylet, + args(A2.id(), -1, [&](Adjustements& sample) { + sample.vaultAssets = 0; + })); + }, + XRPAmount{}, + STTx{ + ttVAULT_CLAWBACK, + [&](STObject& tx) { tx[sfAccount] = A3.id(); }}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseMpt); + + // Not the same as below check: attempt to clawback XRP + doInvariantCheck( + {"clawback may only be performed by the asset issuer"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq()); + return adjust( + ac.view(), + keylet, + args(A2.id(), 0, [&](Adjustements& sample) {})); + }, + XRPAmount{}, + STTx{ttVAULT_CLAWBACK, [](STObject&) {}}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseXrp); + + // Not the same as above check: attempt to clawback MPT by bad account + doInvariantCheck( + {"clawback may only be performed by the asset issuer"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq() - 2); + return adjust( + ac.view(), + keylet, + args(A2.id(), 0, [&](Adjustements& sample) {})); + }, + XRPAmount{}, + STTx{ + ttVAULT_CLAWBACK, + [&](STObject& tx) { tx[sfAccount] = A4.id(); }}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseMpt); + + doInvariantCheck( + {"clawback must decrease vault balance", + "clawback must decrease holder shares", + "clawback must change vault shares"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq() - 2); + return adjust( + ac.view(), + keylet, + args(A4.id(), 10, [&](Adjustements& sample) { + sample.sharesTotal = 0; + })); + }, + XRPAmount{}, + STTx{ + ttVAULT_CLAWBACK, + [&](STObject& tx) { + tx[sfAccount] = A3.id(); + tx[sfHolder] = A4.id(); + }}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseMpt); + + doInvariantCheck( + {"clawback must change holder shares"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq() - 2); + return adjust( + ac.view(), + keylet, + args(A4.id(), -10, [&](Adjustements& sample) { + sample.accountShares->amount = 0; + })); + }, + XRPAmount{}, + STTx{ + ttVAULT_CLAWBACK, + [&](STObject& tx) { + tx[sfAccount] = A3.id(); + tx[sfHolder] = A4.id(); + }}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseMpt); + + doInvariantCheck( + {"clawback must change holder and vault shares by equal amount", + "clawback and assets outstanding must add up", + "clawback and assets available must add up"}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const keylet = keylet::vault(A1.id(), ac.view().seq() - 2); + return adjust( + ac.view(), + keylet, + args(A4.id(), -10, [&](Adjustements& sample) { + sample.accountShares->amount = -8; + sample.assetsTotal = -7; + sample.assetsAvailable = -7; + })); + }, + XRPAmount{}, + STTx{ + ttVAULT_CLAWBACK, + [&](STObject& tx) { + tx[sfAccount] = A3.id(); + tx[sfHolder] = A4.id(); + }}, + {tecINVARIANT_FAILED, tecINVARIANT_FAILED}, + precloseMpt); + } + public: void run() override @@ -1746,6 +3445,7 @@ public: testPermissionedDomainInvariants(); testValidPseudoAccounts(); testPermissionedDEX(); + testVault(); } }; diff --git a/src/test/app/Vault_test.cpp b/src/test/app/Vault_test.cpp index 159bfd0796..4a731ddd57 100644 --- a/src/test/app/Vault_test.cpp +++ b/src/test/app/Vault_test.cpp @@ -177,6 +177,14 @@ class Vault_test : public beast::unit_test::suite env.close(); } + { + testcase(prefix + " set maximum is idempotent, set it again"); + auto tx = vault.set({.owner = owner, .id = keylet.key}); + tx[sfAssetsMaximum] = asset(150).number(); + env(tx); + env.close(); + } + { testcase(prefix + " set data"); auto tx = vault.set({.owner = owner, .id = keylet.key}); @@ -218,6 +226,7 @@ class Vault_test : public beast::unit_test::suite .id = keylet.key, .amount = asset(1000)}); env(tx, ter(tecINSUFFICIENT_FUNDS)); + env.close(); } { @@ -385,6 +394,27 @@ class Vault_test : public beast::unit_test::suite env.balance(depositor, shares) == share(50 * scale)); } + if (!asset.raw().native()) + { + testcase(prefix + " issuer deposits"); + auto tx = vault.deposit( + {.depositor = issuer, + .id = keylet.key, + .amount = asset(10)}); + env(tx); + env.close(); + BEAST_EXPECT(env.balance(issuer, shares) == share(10 * scale)); + + testcase(prefix + " issuer withdraws"); + tx = vault.withdraw( + {.depositor = issuer, + .id = keylet.key, + .amount = share(10 * scale)}); + env(tx); + env.close(); + BEAST_EXPECT(env.balance(issuer, shares) == share(0 * scale)); + } + { testcase(prefix + " withdraw remaining assets"); auto tx = vault.withdraw( @@ -454,6 +484,8 @@ class Vault_test : public beast::unit_test::suite .amount = asset(10)}); tx[sfDestination] = erin.human(); env(tx); + env.close(); + // Erin returns assets to issuer env(pay(erin, issuer, asset(10))); env.close(); @@ -479,12 +511,14 @@ class Vault_test : public beast::unit_test::suite testcase(prefix + " fail to delete because wrong owner"); auto tx = vault.del({.owner = issuer, .id = keylet.key}); env(tx, ter(tecNO_PERMISSION)); + env.close(); } { testcase(prefix + " delete empty vault"); auto tx = vault.del({.owner = owner, .id = keylet.key}); env(tx); + env.close(); BEAST_EXPECT(!env.le(keylet)); } }; @@ -1328,6 +1362,26 @@ class Vault_test : public beast::unit_test::suite { using namespace test::jtx; { + { + testcase("IOU fail because MPT is disabled"); + Env env{ + *this, + (testable_amendments() - featureMPTokensV1) | + featureSingleAssetVault}; + Account issuer{"issuer"}; + Account owner{"owner"}; + env.fund(XRP(1000), issuer, owner); + env.close(); + + Vault vault{env}; + Asset asset = issuer["IOU"].asset(); + auto [tx, keylet] = + vault.create({.owner = owner, .asset = asset}); + + env(tx, ter(temDISABLED)); + env.close(); + } + { testcase("IOU fail create frozen"); Env env{*this, testable_amendments() | featureSingleAssetVault}; @@ -2878,6 +2932,12 @@ class Vault_test : public beast::unit_test::suite tx[sfDomainID] = to_string(domainId); env(tx); env.close(); + + // Should be idempotent + tx = vault.set({.owner = owner, .id = keylet.key}); + tx[sfDomainID] = to_string(domainId); + env(tx); + env.close(); } } @@ -3033,6 +3093,7 @@ class Vault_test : public beast::unit_test::suite .id = keylet.key, .amount = asset(50)}); env(tx); + env.close(); tx = vault.clawback( {.issuer = issuer, @@ -3047,6 +3108,7 @@ class Vault_test : public beast::unit_test::suite .holder = owner, .amount = asset(0)}); env(tx); + env.close(); tx = vault.del({ .owner = owner, @@ -3093,6 +3155,7 @@ class Vault_test : public beast::unit_test::suite auto tx = vault.deposit( {.depositor = owner, .id = keylet.key, .amount = asset(50)}); env(tx); + env.close(); } { diff --git a/src/xrpld/app/tx/detail/InvariantCheck.cpp b/src/xrpld/app/tx/detail/InvariantCheck.cpp index 87ca9ea6c1..2cfcb6d258 100644 --- a/src/xrpld/app/tx/detail/InvariantCheck.cpp +++ b/src/xrpld/app/tx/detail/InvariantCheck.cpp @@ -24,17 +24,26 @@ #include #include +#include #include #include #include #include +#include +#include +#include +#include #include #include #include +#include #include #include #include +#include +#include + namespace ripple { /* @@ -78,6 +87,8 @@ enum Privilege { // object (except by issuer) mayDeleteMPT = 0x0400, // The transaction MAY delete an MPT object. May not create. + mustModifyVault = + 0x0800, // The transaction must modify, delete or create, a vault }; constexpr Privilege operator|(Privilege lhs, Privilege rhs) @@ -2170,4 +2181,943 @@ ValidAMM::finalize( return true; } +//------------------------------------------------------------------------------ + +ValidVault::Vault +ValidVault::Vault::make(SLE const& from) +{ + XRPL_ASSERT( + from.getType() == ltVAULT, + "ValidVault::Vault::make : from Vault object"); + + ValidVault::Vault self; + self.key = from.key(); + self.asset = from.at(sfAsset); + self.pseudoId = from.getAccountID(sfAccount); + self.shareMPTID = from.getFieldH192(sfShareMPTID); + self.assetsTotal = from.at(sfAssetsTotal); + self.assetsAvailable = from.at(sfAssetsAvailable); + self.assetsMaximum = from.at(sfAssetsMaximum); + self.lossUnrealized = from.at(sfLossUnrealized); + return self; +} + +ValidVault::Shares +ValidVault::Shares::make(SLE const& from) +{ + XRPL_ASSERT( + from.getType() == ltMPTOKEN_ISSUANCE, + "ValidVault::Shares::make : from MPTokenIssuance object"); + + ValidVault::Shares self; + self.share = MPTIssue( + makeMptID(from.getFieldU32(sfSequence), from.getAccountID(sfIssuer))); + self.sharesTotal = from.at(sfOutstandingAmount); + self.sharesMaximum = from[~sfMaximumAmount].value_or(maxMPTokenAmount); + return self; +} + +void +ValidVault::visitEntry( + bool isDelete, + std::shared_ptr const& before, + std::shared_ptr const& after) +{ + // If `before` is empty, this means an object is being created, in which + // case `isDelete` must be false. Otherwise `before` and `after` are set and + // `isDelete` indicates whether an object is being deleted or modified. + XRPL_ASSERT( + after != nullptr && (before != nullptr || !isDelete), + "ripple::ValidVault::visitEntry : some object is available"); + + // `Number balance` will capture the difference (delta) between "before" + // state (zero if created) and "after" state (zero if destroyed), so the + // invariants can validate that the change in account balances matches the + // change in vault balances, stored to deltas_ at the end of this function. + Number balance{}; + + // By default do not add anything to deltas + std::int8_t sign = 0; + if (before) + { + switch (before->getType()) + { + case ltVAULT: + beforeVault_.push_back(Vault::make(*before)); + break; + case ltMPTOKEN_ISSUANCE: + // At this moment we have no way of telling if this object holds + // vault shares or something else. Save it for finalize. + beforeMPTs_.push_back(Shares::make(*before)); + balance = static_cast( + before->getFieldU64(sfOutstandingAmount)); + sign = 1; + break; + case ltMPTOKEN: + balance = + static_cast(before->getFieldU64(sfMPTAmount)); + sign = -1; + break; + case ltACCOUNT_ROOT: + case ltRIPPLE_STATE: + balance = before->getFieldAmount(sfBalance); + sign = -1; + break; + default:; + } + } + + if (!isDelete && after) + { + switch (after->getType()) + { + case ltVAULT: + afterVault_.push_back(Vault::make(*after)); + break; + case ltMPTOKEN_ISSUANCE: + // At this moment we have no way of telling if this object holds + // vault shares or something else. Save it for finalize. + afterMPTs_.push_back(Shares::make(*after)); + balance -= Number(static_cast( + after->getFieldU64(sfOutstandingAmount))); + sign = 1; + break; + case ltMPTOKEN: + balance -= Number( + static_cast(after->getFieldU64(sfMPTAmount))); + sign = -1; + break; + case ltACCOUNT_ROOT: + case ltRIPPLE_STATE: + balance -= Number(after->getFieldAmount(sfBalance)); + sign = -1; + break; + default:; + } + } + + uint256 const key = (before ? before->key() : after->key()); + if (sign && balance != zero) + deltas_[key] = balance * sign; +} + +bool +ValidVault::finalize( + STTx const& tx, + TER const ret, + XRPAmount const fee, + ReadView const& view, + beast::Journal const& j) +{ + bool const enforce = view.rules().enabled(featureSingleAssetVault); + + if (!isTesSuccess(ret)) + return true; // Do not perform checks + + if (afterVault_.empty() && beforeVault_.empty()) + { + if (hasPrivilege(tx, mustModifyVault)) + { + JLOG(j.fatal()) << // + "Invariant failed: vault operation succeeded without modifying " + "a vault"; + XRPL_ASSERT( + enforce, "ripple::ValidVault::finalize : vault noop invariant"); + return !enforce; + } + + return true; // Not a vault operation + } + else if (!hasPrivilege(tx, mustModifyVault)) // TODO: mayModifyVault + { + JLOG(j.fatal()) << // + "Invariant failed: vault updated by a wrong transaction type"; + XRPL_ASSERT( + enforce, + "ripple::ValidVault::finalize : illegal vault transaction " + "invariant"); + return !enforce; // Also not a vault operation + } + + if (beforeVault_.size() > 1 || afterVault_.size() > 1) + { + JLOG(j.fatal()) << // + "Invariant failed: vault operation updated more than single vault"; + XRPL_ASSERT( + enforce, "ripple::ValidVault::finalize : single vault invariant"); + return !enforce; // That's all we can do here + } + + auto const txnType = tx.getTxnType(); + + // We do special handling for ttVAULT_DELETE first, because it's the only + // vault-modifying transaction without an "after" state of the vault + if (afterVault_.empty()) + { + if (txnType != ttVAULT_DELETE) + { + JLOG(j.fatal()) << // + "Invariant failed: vault deleted by a wrong transaction type"; + XRPL_ASSERT( + enforce, + "ripple::ValidVault::finalize : illegal vault deletion " + "invariant"); + return !enforce; // That's all we can do here + } + + // Note, if afterVault_ is empty then we know that beforeVault_ is not + // empty, as enforced at the top of this function + auto const& beforeVault = beforeVault_[0]; + + // At this moment we only know a vault is being deleted and there + // might be some MPTokenIssuance objects which are deleted in the + // same transaction. Find the one matching this vault. + auto const deletedShares = [&]() -> std::optional { + for (auto const& e : beforeMPTs_) + { + if (e.share.getMptID() == beforeVault.shareMPTID) + return std::move(e); + } + return std::nullopt; + }(); + + if (!deletedShares) + { + JLOG(j.fatal()) << "Invariant failed: deleted vault must also " + "delete shares"; + XRPL_ASSERT( + enforce, + "ripple::ValidVault::finalize : shares deletion invariant"); + return !enforce; // That's all we can do here + } + + bool result = true; + if (deletedShares->sharesTotal != 0) + { + JLOG(j.fatal()) << "Invariant failed: deleted vault must have no " + "shares outstanding"; + result = false; + } + if (beforeVault.assetsTotal != zero) + { + JLOG(j.fatal()) << "Invariant failed: deleted vault must have no " + "assets outstanding"; + result = false; + } + if (beforeVault.assetsAvailable != zero) + { + JLOG(j.fatal()) << "Invariant failed: deleted vault must have no " + "assets available"; + result = false; + } + + return result; + } + else if (txnType == ttVAULT_DELETE) + { + JLOG(j.fatal()) << "Invariant failed: vault deletion succeeded without " + "deleting a vault"; + XRPL_ASSERT( + enforce, "ripple::ValidVault::finalize : vault deletion invariant"); + return !enforce; // That's all we can do here + } + + // Note, `afterVault_.empty()` is handled above + auto const& afterVault = afterVault_[0]; + XRPL_ASSERT( + beforeVault_.empty() || beforeVault_[0].key == afterVault.key, + "ripple::ValidVault::finalize : single vault operation"); + + auto const updatedShares = [&]() -> std::optional { + // At this moment we only know that a vault is being updated and there + // might be some MPTokenIssuance objects which are also updated in the + // same transaction. Find the one matching the shares to this vault. + // Note, we expect updatedMPTs collection to be extremely small. For + // such collections linear search is faster than lookup. + for (auto const& e : afterMPTs_) + { + if (e.share.getMptID() == afterVault.shareMPTID) + return e; + } + + auto const sleShares = + view.read(keylet::mptIssuance(afterVault.shareMPTID)); + + return sleShares ? std::optional(Shares::make(*sleShares)) + : std::nullopt; + }(); + + bool result = true; + + // Universal transaction checks + if (!beforeVault_.empty()) + { + auto const& beforeVault = beforeVault_[0]; + if (afterVault.asset != beforeVault.asset || + afterVault.pseudoId != beforeVault.pseudoId || + afterVault.shareMPTID != beforeVault.shareMPTID) + { + JLOG(j.fatal()) + << "Invariant failed: violation of vault immutable data"; + result = false; + } + } + + if (!updatedShares) + { + JLOG(j.fatal()) << "Invariant failed: updated vault must have shares"; + XRPL_ASSERT( + enforce, + "ripple::ValidVault::finalize : vault has shares invariant"); + return !enforce; // That's all we can do here + } + + if (updatedShares->sharesTotal == 0) + { + if (afterVault.assetsTotal != zero) + { + JLOG(j.fatal()) << "Invariant failed: updated zero sized " + "vault must have no assets outstanding"; + result = false; + } + if (afterVault.assetsAvailable != zero) + { + JLOG(j.fatal()) << "Invariant failed: updated zero sized " + "vault must have no assets available"; + result = false; + } + } + else if (updatedShares->sharesTotal > updatedShares->sharesMaximum) + { + JLOG(j.fatal()) // + << "Invariant failed: updated shares must not exceed maximum " + << updatedShares->sharesMaximum; + result = false; + } + + if (afterVault.assetsAvailable < zero) + { + JLOG(j.fatal()) + << "Invariant failed: assets available must be positive"; + result = false; + } + + if (afterVault.assetsAvailable > afterVault.assetsTotal) + { + JLOG(j.fatal()) << "Invariant failed: assets available must " + "not be greater than assets outstanding"; + result = false; + } + else if ( + afterVault.lossUnrealized > + afterVault.assetsTotal - afterVault.assetsAvailable) + { + JLOG(j.fatal()) // + << "Invariant failed: loss unrealized must not exceed " + "the difference between assets outstanding and available"; + result = false; + } + + if (afterVault.assetsTotal < zero) + { + JLOG(j.fatal()) + << "Invariant failed: assets outstanding must be positive"; + result = false; + } + + if (afterVault.assetsMaximum < zero) + { + JLOG(j.fatal()) << "Invariant failed: assets maximum must be positive"; + result = false; + } + + // Thanks to this check we can simply do `assert(!beforeVault_.empty()` when + // enforcing invariants on transaction types other than ttVAULT_CREATE + if (beforeVault_.empty() && txnType != ttVAULT_CREATE) + { + JLOG(j.fatal()) << // + "Invariant failed: vault created by a wrong transaction type"; + XRPL_ASSERT( + enforce, "ripple::ValidVault::finalize : vault creation invariant"); + return !enforce; // That's all we can do here + } + + if (!beforeVault_.empty() && + afterVault.lossUnrealized != beforeVault_[0].lossUnrealized) + { + JLOG(j.fatal()) << // + "Invariant failed: vault transaction must not change loss " + "unrealized"; + result = false; + } + + auto const beforeShares = [&]() -> std::optional { + if (beforeVault_.empty()) + return std::nullopt; + auto const& beforeVault = beforeVault_[0]; + + for (auto const& e : beforeMPTs_) + { + if (e.share.getMptID() == beforeVault.shareMPTID) + return std::move(e); + } + return std::nullopt; + }(); + + if (!beforeShares && + (tx.getTxnType() == ttVAULT_DEPOSIT || // + tx.getTxnType() == ttVAULT_WITHDRAW || // + tx.getTxnType() == ttVAULT_CLAWBACK)) + { + JLOG(j.fatal()) << "Invariant failed: vault operation succeeded " + "without updating shares"; + XRPL_ASSERT( + enforce, "ripple::ValidVault::finalize : shares noop invariant"); + return !enforce; // That's all we can do here + } + + auto const& vaultAsset = afterVault.asset; + auto const deltaAssets = [&](AccountID const& id) -> std::optional { + auto const get = // + [&](auto const& it, std::int8_t sign = 1) -> std::optional { + if (it == deltas_.end()) + return std::nullopt; + + return it->second * sign; + }; + + return std::visit( + [&](TIss const& issue) { + if constexpr (std::is_same_v) + { + if (isXRP(issue)) + return get(deltas_.find(keylet::account(id).key)); + return get( + deltas_.find(keylet::line(id, issue).key), + id > issue.getIssuer() ? -1 : 1); + } + else if constexpr (std::is_same_v) + { + return get(deltas_.find( + keylet::mptoken(issue.getMptID(), id).key)); + } + }, + vaultAsset.value()); + }; + auto const deltaShares = [&](AccountID const& id) -> std::optional { + auto const it = [&]() { + if (id == afterVault.pseudoId) + return deltas_.find( + keylet::mptIssuance(afterVault.shareMPTID).key); + return deltas_.find(keylet::mptoken(afterVault.shareMPTID, id).key); + }(); + + return it != deltas_.end() ? std::optional(it->second) + : std::nullopt; + }; + + // Technically this does not need to be a lambda, but it's more + // convenient thanks to early "return false"; the not-so-nice + // alternatives are several layers of nested if/else or more complex + // (i.e. brittle) if statements. + result &= [&]() { + switch (txnType) + { + case ttVAULT_CREATE: { + bool result = true; + + if (!beforeVault_.empty()) + { + JLOG(j.fatal()) // + << "Invariant failed: create operation must not have " + "updated a vault"; + result = false; + } + + if (afterVault.assetsAvailable != zero || + afterVault.assetsTotal != zero || + afterVault.lossUnrealized != zero || + updatedShares->sharesTotal != 0) + { + JLOG(j.fatal()) // + << "Invariant failed: created vault must be empty"; + result = false; + } + + if (afterVault.pseudoId != updatedShares->share.getIssuer()) + { + JLOG(j.fatal()) // + << "Invariant failed: shares issuer and vault " + "pseudo-account must be the same"; + result = false; + } + + auto const sleSharesIssuer = view.read( + keylet::account(updatedShares->share.getIssuer())); + if (!sleSharesIssuer) + { + JLOG(j.fatal()) // + << "Invariant failed: shares issuer must exist"; + return false; + } + + if (!isPseudoAccount(sleSharesIssuer)) + { + JLOG(j.fatal()) // + << "Invariant failed: shares issuer must be a " + "pseudo-account"; + result = false; + } + + if (auto const vaultId = (*sleSharesIssuer)[~sfVaultID]; + !vaultId || *vaultId != afterVault.key) + { + JLOG(j.fatal()) // + << "Invariant failed: shares issuer pseudo-account " + "must point back to the vault"; + result = false; + } + + return result; + } + case ttVAULT_SET: { + bool result = true; + + XRPL_ASSERT( + !beforeVault_.empty(), + "ripple::ValidVault::finalize : set updated a vault"); + auto const& beforeVault = beforeVault_[0]; + + auto const vaultDeltaAssets = deltaAssets(afterVault.pseudoId); + if (vaultDeltaAssets) + { + JLOG(j.fatal()) << // + "Invariant failed: set must not change vault balance"; + result = false; + } + + if (beforeVault.assetsTotal != afterVault.assetsTotal) + { + JLOG(j.fatal()) << // + "Invariant failed: set must not change assets " + "outstanding"; + result = false; + } + + if (afterVault.assetsMaximum > zero && + afterVault.assetsTotal > afterVault.assetsMaximum) + { + JLOG(j.fatal()) << // + "Invariant failed: set assets outstanding must not " + "exceed assets maximum"; + result = false; + } + + if (beforeVault.assetsAvailable != afterVault.assetsAvailable) + { + JLOG(j.fatal()) << // + "Invariant failed: set must not change assets " + "available"; + result = false; + } + + if (beforeShares && updatedShares && + beforeShares->sharesTotal != updatedShares->sharesTotal) + { + JLOG(j.fatal()) << // + "Invariant failed: set must not change shares " + "outstanding"; + result = false; + } + + return result; + } + case ttVAULT_DEPOSIT: { + bool result = true; + + XRPL_ASSERT( + !beforeVault_.empty(), + "ripple::ValidVault::finalize : deposit updated a vault"); + auto const& beforeVault = beforeVault_[0]; + + auto const vaultDeltaAssets = deltaAssets(afterVault.pseudoId); + + if (!vaultDeltaAssets) + { + JLOG(j.fatal()) << // + "Invariant failed: deposit must change vault balance"; + return false; // That's all we can do + } + + if (*vaultDeltaAssets > tx[sfAmount]) + { + JLOG(j.fatal()) << // + "Invariant failed: deposit must not change vault " + "balance by more than deposited amount"; + result = false; + } + + if (*vaultDeltaAssets <= zero) + { + JLOG(j.fatal()) << // + "Invariant failed: deposit must increase vault balance"; + result = false; + } + + // Any payments (including deposits) made by the issuer + // do not change their balance, but create funds instead. + bool const issuerDeposit = [&]() -> bool { + if (vaultAsset.native()) + return false; + return tx[sfAccount] == vaultAsset.getIssuer(); + }(); + + if (!issuerDeposit) + { + auto const accountDeltaAssets = + [&]() -> std::optional { + if (auto ret = deltaAssets(tx[sfAccount]); ret) + { + // Compensate for transaction fee deduced from + // sfAccount + if (vaultAsset.native()) + *ret += fee.drops(); + if (*ret != zero) + return ret; + } + return std::nullopt; + }(); + + if (!accountDeltaAssets) + { + JLOG(j.fatal()) << // + "Invariant failed: deposit must change depositor " + "balance"; + return false; + } + + if (*accountDeltaAssets >= zero) + { + JLOG(j.fatal()) << // + "Invariant failed: deposit must decrease depositor " + "balance"; + result = false; + } + + if (*accountDeltaAssets * -1 != *vaultDeltaAssets) + { + JLOG(j.fatal()) << // + "Invariant failed: deposit must change vault and " + "depositor balance by equal amount"; + result = false; + } + } + + if (afterVault.assetsMaximum > zero && + afterVault.assetsTotal > afterVault.assetsMaximum) + { + JLOG(j.fatal()) << // + "Invariant failed: deposit assets outstanding must not " + "exceed assets maximum"; + result = false; + } + + auto const accountDeltaShares = deltaShares(tx[sfAccount]); + if (!accountDeltaShares) + { + JLOG(j.fatal()) << // + "Invariant failed: deposit must change depositor " + "shares"; + return false; // That's all we can do + } + + if (*accountDeltaShares <= zero) + { + JLOG(j.fatal()) << // + "Invariant failed: deposit must increase depositor " + "shares"; + result = false; + } + + auto const vaultDeltaShares = deltaShares(afterVault.pseudoId); + if (!vaultDeltaShares) + { + JLOG(j.fatal()) << // + "Invariant failed: deposit must change vault shares"; + return false; // That's all we can do + } + + if (*vaultDeltaShares * -1 != *accountDeltaShares) + { + JLOG(j.fatal()) << // + "Invariant failed: deposit must change depositor and " + "vault shares by equal amount"; + result = false; + } + + if (beforeVault.assetsTotal + *vaultDeltaAssets != + afterVault.assetsTotal) + { + JLOG(j.fatal()) << "Invariant failed: deposit and assets " + "outstanding must add up"; + result = false; + } + if (beforeVault.assetsAvailable + *vaultDeltaAssets != + afterVault.assetsAvailable) + { + JLOG(j.fatal()) << "Invariant failed: deposit and assets " + "available must add up"; + result = false; + } + + return result; + } + case ttVAULT_WITHDRAW: { + bool result = true; + + XRPL_ASSERT( + !beforeVault_.empty(), + "ripple::ValidVault::finalize : withdrawal updated a " + "vault"); + auto const& beforeVault = beforeVault_[0]; + + auto const vaultDeltaAssets = deltaAssets(afterVault.pseudoId); + + if (!vaultDeltaAssets) + { + JLOG(j.fatal()) << "Invariant failed: withdrawal must " + "change vault balance"; + return false; // That's all we can do + } + + if (*vaultDeltaAssets >= zero) + { + JLOG(j.fatal()) << "Invariant failed: withdrawal must " + "decrease vault balance"; + result = false; + } + + // Any payments (including withdrawal) going to the issuer + // do not change their balance, but destroy funds instead. + bool const issuerWithdrawal = [&]() -> bool { + if (vaultAsset.native()) + return false; + auto const destination = + tx[~sfDestination].value_or(tx[sfAccount]); + return destination == vaultAsset.getIssuer(); + }(); + + if (!issuerWithdrawal) + { + auto const accountDeltaAssets = + [&]() -> std::optional { + if (auto ret = deltaAssets(tx[sfAccount]); ret) + { + // Compensate for transaction fee deduced from + // sfAccount + if (vaultAsset.native()) + *ret += fee.drops(); + if (*ret != zero) + return ret; + } + return std::nullopt; + }(); + + auto const otherAccountDelta = + [&]() -> std::optional { + if (auto const destination = tx[~sfDestination]; + destination && *destination != tx[sfAccount]) + return deltaAssets(*destination); + return std::nullopt; + }(); + + if (accountDeltaAssets.has_value() == + otherAccountDelta.has_value()) + { + JLOG(j.fatal()) << // + "Invariant failed: withdrawal must change one " + "destination balance"; + return false; + } + + auto const destinationDelta = // + accountDeltaAssets ? *accountDeltaAssets + : *otherAccountDelta; + + if (destinationDelta <= zero) + { + JLOG(j.fatal()) << // + "Invariant failed: withdrawal must increase " + "destination balance"; + result = false; + } + + if (*vaultDeltaAssets * -1 != destinationDelta) + { + JLOG(j.fatal()) << // + "Invariant failed: withdrawal must change vault " + "and destination balance by equal amount"; + result = false; + } + } + + auto const accountDeltaShares = deltaShares(tx[sfAccount]); + if (!accountDeltaShares) + { + JLOG(j.fatal()) << // + "Invariant failed: withdrawal must change depositor " + "shares"; + return false; + } + + if (*accountDeltaShares >= zero) + { + JLOG(j.fatal()) << // + "Invariant failed: withdrawal must decrease depositor " + "shares"; + result = false; + } + + auto const vaultDeltaShares = deltaShares(afterVault.pseudoId); + if (!vaultDeltaShares) + { + JLOG(j.fatal()) << // + "Invariant failed: withdrawal must change vault shares"; + return false; // That's all we can do + } + + if (*vaultDeltaShares * -1 != *accountDeltaShares) + { + JLOG(j.fatal()) << // + "Invariant failed: withdrawal must change depositor " + "and vault shares by equal amount"; + result = false; + } + + // Note, vaultBalance is negative (see check above) + if (beforeVault.assetsTotal + *vaultDeltaAssets != + afterVault.assetsTotal) + { + JLOG(j.fatal()) << "Invariant failed: withdrawal and " + "assets outstanding must add up"; + result = false; + } + + if (beforeVault.assetsAvailable + *vaultDeltaAssets != + afterVault.assetsAvailable) + { + JLOG(j.fatal()) << "Invariant failed: withdrawal and " + "assets available must add up"; + result = false; + } + + return result; + } + case ttVAULT_CLAWBACK: { + bool result = true; + + XRPL_ASSERT( + !beforeVault_.empty(), + "ripple::ValidVault::finalize : clawback updated a vault"); + auto const& beforeVault = beforeVault_[0]; + + if (vaultAsset.native() || + vaultAsset.getIssuer() != tx[sfAccount]) + { + JLOG(j.fatal()) << // + "Invariant failed: clawback may only be performed by " + "the asset issuer"; + return false; // That's all we can do + } + + auto const vaultDeltaAssets = deltaAssets(afterVault.pseudoId); + + if (!vaultDeltaAssets) + { + JLOG(j.fatal()) << // + "Invariant failed: clawback must change vault balance"; + return false; // That's all we can do + } + + if (*vaultDeltaAssets >= zero) + { + JLOG(j.fatal()) << // + "Invariant failed: clawback must decrease vault " + "balance"; + result = false; + } + + auto const accountDeltaShares = deltaShares(tx[sfHolder]); + if (!accountDeltaShares) + { + JLOG(j.fatal()) << // + "Invariant failed: clawback must change holder shares"; + return false; // That's all we can do + } + + if (*accountDeltaShares >= zero) + { + JLOG(j.fatal()) << // + "Invariant failed: clawback must decrease holder " + "shares"; + result = false; + } + + auto const vaultDeltaShares = deltaShares(afterVault.pseudoId); + if (!vaultDeltaShares) + { + JLOG(j.fatal()) << // + "Invariant failed: clawback must change vault shares"; + return false; // That's all we can do + } + + if (*vaultDeltaShares * -1 != *accountDeltaShares) + { + JLOG(j.fatal()) << // + "Invariant failed: clawback must change holder and " + "vault shares by equal amount"; + result = false; + } + + if (beforeVault.assetsTotal + *vaultDeltaAssets != + afterVault.assetsTotal) + { + JLOG(j.fatal()) << // + "Invariant failed: clawback and assets outstanding " + "must add up"; + result = false; + } + + if (beforeVault.assetsAvailable + *vaultDeltaAssets != + afterVault.assetsAvailable) + { + JLOG(j.fatal()) << // + "Invariant failed: clawback and assets available must " + "add up"; + result = false; + } + + return result; + } + + default: + // LCOV_EXCL_START + UNREACHABLE( + "ripple::ValidVault::finalize : unknown transaction type"); + return false; + // LCOV_EXCL_STOP + } + }(); + + if (!result) + { + // The comment at the top of this file starting with "assert(enforce)" + // explains this assert. + XRPL_ASSERT(enforce, "ripple::ValidVault::finalize : vault invariants"); + return !enforce; + } + + return true; +} + } // namespace ripple diff --git a/src/xrpld/app/tx/detail/InvariantCheck.h b/src/xrpld/app/tx/detail/InvariantCheck.h index 5444f2f3a9..97d51f0fab 100644 --- a/src/xrpld/app/tx/detail/InvariantCheck.h +++ b/src/xrpld/app/tx/detail/InvariantCheck.h @@ -20,8 +20,10 @@ #ifndef RIPPLE_APP_TX_INVARIANTCHECK_H_INCLUDED #define RIPPLE_APP_TX_INVARIANTCHECK_H_INCLUDED +#include #include #include +#include #include #include #include @@ -732,6 +734,74 @@ private: beast::Journal const&) const; }; +/** + * @brief Invariants: Vault object and MPTokenIssuance for vault shares + * + * - vault deleted and vault created is empty + * - vault created must be linked to pseudo-account for shares and assets + * - vault must have MPTokenIssuance for shares + * - vault without shares outstanding must have no shares + * - loss unrealized does not exceed the difference between assets total and + * assets available + * - assets available do not exceed assets total + * - vault deposit increases assets and share issuance, and adds to: + * total assets, assets available, shares outstanding + * - vault withdrawal and clawback reduce assets and share issuance, and + * subtracts from: total assets, assets available, shares outstanding + * - vault set must not alter the vault assets or shares balance + * - no vault transaction can change loss unrealized (it's updated by loan + * transactions) + * + */ +class ValidVault +{ + Number static constexpr zero{}; + + struct Vault final + { + uint256 key = beast::zero; + Asset asset = {}; + AccountID pseudoId = {}; + uint192 shareMPTID = beast::zero; + Number assetsTotal = 0; + Number assetsAvailable = 0; + Number assetsMaximum = 0; + Number lossUnrealized = 0; + + Vault static make(SLE const&); + }; + + struct Shares final + { + MPTIssue share = {}; + std::uint64_t sharesTotal = 0; + std::uint64_t sharesMaximum = 0; + + Shares static make(SLE const&); + }; + + std::vector afterVault_ = {}; + std::vector afterMPTs_ = {}; + std::vector beforeVault_ = {}; + std::vector beforeMPTs_ = {}; + std::unordered_map deltas_ = {}; + +public: + void + visitEntry( + bool, + std::shared_ptr const&, + std::shared_ptr const&); + + bool + finalize( + STTx const&, + TER const, + XRPAmount const, + ReadView const&, + beast::Journal const&); +}; + // additional invariant checks can be declared above and then added to this // tuple using InvariantChecks = std::tuple< @@ -754,7 +824,8 @@ using InvariantChecks = std::tuple< ValidPermissionedDomain, ValidPermissionedDEX, ValidAMM, - ValidPseudoAccounts>; + ValidPseudoAccounts, + ValidVault>; /** * @brief get a tuple of all invariant checks diff --git a/src/xrpld/app/tx/detail/VaultDeposit.cpp b/src/xrpld/app/tx/detail/VaultDeposit.cpp index 75cf81b0b0..80382934ad 100644 --- a/src/xrpld/app/tx/detail/VaultDeposit.cpp +++ b/src/xrpld/app/tx/detail/VaultDeposit.cpp @@ -156,7 +156,10 @@ VaultDeposit::preclaim(PreclaimContext const& ctx) !isTesSuccess(ter)) return ter; - if (accountHolds( + // Asset issuer does not have any balance, they can just create funds by + // depositing in the vault. + if ((vaultAsset.native() || vaultAsset.getIssuer() != account) && + accountHolds( ctx.view, account, vaultAsset, diff --git a/src/xrpld/app/tx/detail/VaultSet.cpp b/src/xrpld/app/tx/detail/VaultSet.cpp index 6057e40cfa..170a850a36 100644 --- a/src/xrpld/app/tx/detail/VaultSet.cpp +++ b/src/xrpld/app/tx/detail/VaultSet.cpp @@ -183,6 +183,9 @@ VaultSet::doApply() view().update(sleIssuance); } + // Note, we must update Vault object even if only DomainID is being updated + // in Issuance object. Otherwise it's really difficult for Vault invariants + // to verify the operation. view().update(vault); return tesSUCCESS; From 46ba8a28fecd0665900e3a1caf276a426ad6640a Mon Sep 17 00:00:00 2001 From: Bart Date: Thu, 9 Oct 2025 13:27:26 -0400 Subject: [PATCH 225/244] refactor: Update Conan dependencies: OpenSSL (#5873) This change bumps OpenSSL from 1.1.1w to 3.6.0. --- .github/scripts/strategy-matrix/linux.json | 12 ++++++------ cmake/RippledCompiler.cmake | 17 ++++++++++------- conan.lock | 2 +- conanfile.py | 2 +- 4 files changed, 18 insertions(+), 15 deletions(-) diff --git a/.github/scripts/strategy-matrix/linux.json b/.github/scripts/strategy-matrix/linux.json index 08313daf0a..bae5c57087 100644 --- a/.github/scripts/strategy-matrix/linux.json +++ b/.github/scripts/strategy-matrix/linux.json @@ -78,42 +78,42 @@ "distro_version": "9", "compiler_name": "gcc", "compiler_version": "12", - "image_sha": "0ab1e4c" + "image_sha": "d133ce3" }, { "distro_name": "rhel", "distro_version": "9", "compiler_name": "gcc", "compiler_version": "13", - "image_sha": "0ab1e4c" + "image_sha": "d133ce3" }, { "distro_name": "rhel", "distro_version": "9", "compiler_name": "gcc", "compiler_version": "14", - "image_sha": "0ab1e4c" + "image_sha": "d133ce3" }, { "distro_name": "rhel", "distro_version": "9", "compiler_name": "clang", "compiler_version": "any", - "image_sha": "0ab1e4c" + "image_sha": "d133ce3" }, { "distro_name": "rhel", "distro_version": "10", "compiler_name": "gcc", "compiler_version": "14", - "image_sha": "0ab1e4c" + "image_sha": "d133ce3" }, { "distro_name": "rhel", "distro_version": "10", "compiler_name": "clang", "compiler_version": "any", - "image_sha": "0ab1e4c" + "image_sha": "d133ce3" }, { "distro_name": "ubuntu", diff --git a/cmake/RippledCompiler.cmake b/cmake/RippledCompiler.cmake index bc3a62a48c..4d16222cbe 100644 --- a/cmake/RippledCompiler.cmake +++ b/cmake/RippledCompiler.cmake @@ -16,13 +16,16 @@ set(CMAKE_CXX_EXTENSIONS OFF) target_compile_definitions (common INTERFACE $<$:DEBUG _DEBUG> - $<$,$>>:NDEBUG>) - # ^^^^ NOTE: CMAKE release builds already have NDEBUG - # defined, so no need to add it explicitly except for - # this special case of (profile ON) and (assert OFF) - # -- presumably this is because we don't want profile - # builds asserting unless asserts were specifically - # requested + #[===[ + NOTE: CMAKE release builds already have NDEBUG defined, so no need to add it + explicitly except for the special case of (profile ON) and (assert OFF). + Presumably this is because we don't want profile builds asserting unless + asserts were specifically requested. + ]===] + $<$,$>>:NDEBUG> + # TODO: Remove once we have migrated functions from OpenSSL 1.x to 3.x. + OPENSSL_SUPPRESS_DEPRECATED +) if (MSVC) # remove existing exception flag since we set it to -EHa diff --git a/conan.lock b/conan.lock index ec790e16ce..9f52c606a7 100644 --- a/conan.lock +++ b/conan.lock @@ -9,7 +9,7 @@ "rocksdb/10.0.1#85537f46e538974d67da0c3977de48ac%1756234304.347", "re2/20230301#dfd6e2bf050eb90ddd8729cfb4c844a4%1756234257.976", "protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1756234251.614", - "openssl/1.1.1w#a8f0792d7c5121b954578a7149d23e03%1756223730.729", + "openssl/3.6.0#89e8af1d4a21afcac0557079d23d8890%1759746682.365", "nudb/2.0.9#c62cfd501e57055a7e0d8ee3d5e5427d%1756234237.107", "lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1756234228.999", "libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1756223727.64", diff --git a/conanfile.py b/conanfile.py index 3146b887e0..4cd9bd3c5a 100644 --- a/conanfile.py +++ b/conanfile.py @@ -27,7 +27,7 @@ class Xrpl(ConanFile): 'grpc/1.50.1', 'libarchive/3.8.1', 'nudb/2.0.9', - 'openssl/1.1.1w', + 'openssl/3.6.0', 'soci/4.0.3', 'zlib/1.3.1', ] From 3c88786bb0d2c10a04ab991472224aa492d15aee Mon Sep 17 00:00:00 2001 From: Bart Date: Fri, 10 Oct 2025 10:18:24 -0400 Subject: [PATCH 226/244] refactor: Downgrades OpenSSL to 3.5.4 (#5878) This change downgrades OpenSSL 3.6.0 to 3.5.4. To avoid potential zero-day issues in a new major version of OpenSSL, 3.6.0, it is safer to stick with 3.5.4. While 3.6.0 has some nice new features, such as improved SHA512 hashing, it also introduces new features that could contain bugs. In contrast, 3.5.4 has seen quite a few bug fixes over 3.5.0 and has been used in the wild for a while now. --- conan.lock | 2 +- conanfile.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/conan.lock b/conan.lock index 9f52c606a7..cb25777423 100644 --- a/conan.lock +++ b/conan.lock @@ -9,7 +9,7 @@ "rocksdb/10.0.1#85537f46e538974d67da0c3977de48ac%1756234304.347", "re2/20230301#dfd6e2bf050eb90ddd8729cfb4c844a4%1756234257.976", "protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1756234251.614", - "openssl/3.6.0#89e8af1d4a21afcac0557079d23d8890%1759746682.365", + "openssl/3.5.4#a1d5835cc6ed5c5b8f3cd5b9b5d24205%1759746684.671", "nudb/2.0.9#c62cfd501e57055a7e0d8ee3d5e5427d%1756234237.107", "lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1756234228.999", "libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1756223727.64", diff --git a/conanfile.py b/conanfile.py index 4cd9bd3c5a..7f8ab24fbd 100644 --- a/conanfile.py +++ b/conanfile.py @@ -27,7 +27,7 @@ class Xrpl(ConanFile): 'grpc/1.50.1', 'libarchive/3.8.1', 'nudb/2.0.9', - 'openssl/3.6.0', + 'openssl/3.5.4', 'soci/4.0.3', 'zlib/1.3.1', ] From 8456b8275e9d0491be77e2c59c831bdabab27510 Mon Sep 17 00:00:00 2001 From: Bart Date: Fri, 10 Oct 2025 12:22:42 -0400 Subject: [PATCH 227/244] chore: Add wildcard to support triggering for release pipelines (#5879) This change adds a wildcard to the release branch in the CI pipeline spec. Namely, after adopting an improved release process, with release branches that now look like release-X.Y, the trigger pipeline was no longer running as it only searched for an exact match to release. --- .github/workflows/on-trigger.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/on-trigger.yml b/.github/workflows/on-trigger.yml index b06d475a4d..9d2ea81520 100644 --- a/.github/workflows/on-trigger.yml +++ b/.github/workflows/on-trigger.yml @@ -9,9 +9,9 @@ name: Trigger on: push: branches: - - develop - - release - - master + - "develop" + - "release*" + - "master" paths: # These paths are unique to `on-trigger.yml`. - ".github/workflows/reusable-check-missing-commits.yml" From 8637d606a4dca70fa2e3a4229aa8361e99febb6e Mon Sep 17 00:00:00 2001 From: Mayukha Vadari Date: Mon, 13 Oct 2025 10:56:18 -0400 Subject: [PATCH 228/244] chore: Exclude code/unreachable transaction code from Codecov (#5847) This change excludes from Codecov unreachable/difficult-to-test transaction code (such as `tecINTERNAL`) and old code (from amendments that have been enabled for a long time that are only around for ledger replay reasons). This removes about 200 lines of misses and increases the Codecov coverage by 0.3% (79.2% to 79.5%). --- src/libxrpl/ledger/CredentialHelpers.cpp | 12 ++-- src/libxrpl/ledger/View.cpp | 63 ++++++++++++------- src/xrpld/app/misc/NetworkOPs.cpp | 21 ++++++- src/xrpld/app/tx/detail/AMMBid.cpp | 2 + src/xrpld/app/tx/detail/AMMCreate.cpp | 4 +- src/xrpld/app/tx/detail/AMMWithdraw.cpp | 4 +- src/xrpld/app/tx/detail/CancelCheck.cpp | 4 ++ src/xrpld/app/tx/detail/CancelOffer.cpp | 2 +- src/xrpld/app/tx/detail/CashCheck.cpp | 10 +++ src/xrpld/app/tx/detail/Clawback.cpp | 2 +- src/xrpld/app/tx/detail/CreateCheck.cpp | 6 +- src/xrpld/app/tx/detail/CreateOffer.cpp | 4 ++ src/xrpld/app/tx/detail/CreateTicket.cpp | 6 +- src/xrpld/app/tx/detail/Credentials.cpp | 12 ++-- src/xrpld/app/tx/detail/DID.cpp | 8 ++- src/xrpld/app/tx/detail/DeleteAccount.cpp | 6 +- src/xrpld/app/tx/detail/DepositPreauth.cpp | 14 +++-- src/xrpld/app/tx/detail/Escrow.cpp | 46 ++++++++++---- src/xrpld/app/tx/detail/LedgerStateFix.cpp | 4 +- src/xrpld/app/tx/detail/MPTokenAuthorize.cpp | 2 +- .../app/tx/detail/MPTokenIssuanceDestroy.cpp | 4 +- .../app/tx/detail/MPTokenIssuanceSet.cpp | 2 +- .../app/tx/detail/NFTokenAcceptOffer.cpp | 12 ++-- .../app/tx/detail/NFTokenCancelOffer.cpp | 2 + src/xrpld/app/tx/detail/NFTokenMint.cpp | 2 +- src/xrpld/app/tx/detail/NFTokenUtils.cpp | 4 +- src/xrpld/app/tx/detail/PayChan.cpp | 14 +++-- src/xrpld/app/tx/detail/Payment.cpp | 2 +- .../tx/detail/PermissionedDomainDelete.cpp | 8 ++- src/xrpld/app/tx/detail/SetAccount.cpp | 2 +- src/xrpld/app/tx/detail/SetRegularKey.cpp | 2 +- src/xrpld/app/tx/detail/SetSignerList.cpp | 8 ++- src/xrpld/app/tx/detail/SetTrust.cpp | 2 +- src/xrpld/app/tx/detail/Transactor.cpp | 8 +++ src/xrpld/app/tx/detail/XChainBridge.cpp | 55 +++++++++------- src/xrpld/rpc/detail/ServerHandler.cpp | 4 ++ src/xrpld/rpc/handlers/PayChanClaim.cpp | 2 + src/xrpld/rpc/handlers/Random.cpp | 2 +- 38 files changed, 246 insertions(+), 121 deletions(-) diff --git a/src/libxrpl/ledger/CredentialHelpers.cpp b/src/libxrpl/ledger/CredentialHelpers.cpp index 965d6f6911..4c625f5eee 100644 --- a/src/libxrpl/ledger/CredentialHelpers.cpp +++ b/src/libxrpl/ledger/CredentialHelpers.cpp @@ -77,19 +77,23 @@ deleteSLE( AccountID const& account, SField const& node, bool isOwner) -> TER { auto const sleAccount = view.peek(keylet::account(account)); if (!sleAccount) - { // LCOV_EXCL_START + { + // LCOV_EXCL_START JLOG(j.fatal()) << "Internal error: can't retrieve Owner account."; return tecINTERNAL; - } // LCOV_EXCL_STOP + // LCOV_EXCL_STOP + } // Remove object from owner directory std::uint64_t const page = sleCredential->getFieldU64(node); if (!view.dirRemove( keylet::ownerDir(account), page, sleCredential->key(), false)) - { // LCOV_EXCL_START + { + // LCOV_EXCL_START JLOG(j.fatal()) << "Unable to delete Credential from owner."; return tefBAD_LEDGER; - } // LCOV_EXCL_STOP + // LCOV_EXCL_STOP + } if (isOwner) adjustOwnerCount(view, sleAccount, -1, j); diff --git a/src/libxrpl/ledger/View.cpp b/src/libxrpl/ledger/View.cpp index 89d8137ac7..ace7a34f81 100644 --- a/src/libxrpl/ledger/View.cpp +++ b/src/libxrpl/ledger/View.cpp @@ -1292,7 +1292,7 @@ authorizeMPToken( { auto const sleAcct = view.peek(keylet::account(account)); if (!sleAcct) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE // If the account that submitted the tx is a holder // Note: `account_` is holder's account @@ -1357,17 +1357,17 @@ authorizeMPToken( auto const sleMptIssuance = view.read(keylet::mptIssuance(mptIssuanceID)); if (!sleMptIssuance) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE // If the account that submitted this tx is the issuer of the MPT // Note: `account_` is issuer's account // `holderID` is holder's account if (account != (*sleMptIssuance)[sfIssuer]) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE auto const sleMpt = view.peek(keylet::mptoken(mptIssuanceID, *holderID)); if (!sleMpt) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE std::uint32_t const flagsIn = sleMpt->getFieldU32(sfFlags); std::uint32_t flagsOut = flagsIn; @@ -1424,7 +1424,7 @@ trustCreate( describeOwnerDir(uLowAccountID)); if (!lowNode) - return tecDIR_FULL; + return tecDIR_FULL; // LCOV_EXCL_LINE auto highNode = view.dirInsert( keylet::ownerDir(uHighAccountID), @@ -1432,14 +1432,14 @@ trustCreate( describeOwnerDir(uHighAccountID)); if (!highNode) - return tecDIR_FULL; + return tecDIR_FULL; // LCOV_EXCL_LINE bool const bSetDst = saLimit.getIssuer() == uDstAccountID; bool const bSetHigh = bSrcHigh ^ bSetDst; XRPL_ASSERT(sleAccount, "ripple::trustCreate : non-null SLE"); if (!sleAccount) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE XRPL_ASSERT( sleAccount->getAccountID(sfAccount) == @@ -1518,10 +1518,12 @@ removeEmptyHolding( { auto const sle = view.read(keylet::account(accountID)); if (!sle) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE + auto const balance = sle->getFieldAmount(sfBalance); if (balance.xrp() != 0) return tecHAS_OBLIGATIONS; + return tesSUCCESS; } @@ -1539,7 +1541,8 @@ removeEmptyHolding( auto sleLowAccount = view.peek(keylet::account(line->at(sfLowLimit)->getIssuer())); if (!sleLowAccount) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE + adjustOwnerCount(view, sleLowAccount, -1, journal); // It's not really necessary to clear the reserve flag, since the line // is about to be deleted, but this will make the metadata reflect an @@ -1553,7 +1556,8 @@ removeEmptyHolding( auto sleHighAccount = view.peek(keylet::account(line->at(sfHighLimit)->getIssuer())); if (!sleHighAccount) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE + adjustOwnerCount(view, sleHighAccount, -1, journal); // It's not really necessary to clear the reserve flag, since the line // is about to be deleted, but this will make the metadata reflect an @@ -1613,7 +1617,7 @@ trustDelete( sleRippleState->key(), false)) { - return tefBAD_LEDGER; + return tefBAD_LEDGER; // LCOV_EXCL_LINE } JLOG(j.trace()) << "trustDelete: Deleting ripple line: high"; @@ -1624,7 +1628,7 @@ trustDelete( sleRippleState->key(), false)) { - return tefBAD_LEDGER; + return tefBAD_LEDGER; // LCOV_EXCL_LINE } JLOG(j.trace()) << "trustDelete: Deleting ripple line: state"; @@ -1650,7 +1654,7 @@ offerDelete(ApplyView& view, std::shared_ptr const& sle, beast::Journal j) offerIndex, false)) { - return tefBAD_LEDGER; + return tefBAD_LEDGER; // LCOV_EXCL_LINE } if (!view.dirRemove( @@ -1659,7 +1663,7 @@ offerDelete(ApplyView& view, std::shared_ptr const& sle, beast::Journal j) offerIndex, false)) { - return tefBAD_LEDGER; + return tefBAD_LEDGER; // LCOV_EXCL_LINE } if (sle->isFieldPresent(sfAdditionalBooks)) @@ -1823,7 +1827,7 @@ rippleCreditIOU( auto const sleAccount = view.peek(keylet::account(uReceiverID)); if (!sleAccount) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE bool const noRipple = (sleAccount->getFlags() & lsfDefaultRipple) == 0; @@ -1913,14 +1917,16 @@ accountSendIOU( { if (saAmount < beast::zero || saAmount.holds()) { - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE } } else { + // LCOV_EXCL_START XRPL_ASSERT( saAmount >= beast::zero && !saAmount.holds(), "ripple::accountSendIOU : minimum amount and not MPT"); + // LCOV_EXCL_STOP } /* If we aren't sending anything or if the sender is the same as the @@ -1977,8 +1983,10 @@ accountSendIOU( { // VFALCO Its laborious to have to mutate the // TER based on params everywhere + // LCOV_EXCL_START terResult = view.open() ? TER{telFAILED_PROCESSING} : TER{tecFAILED_PROCESSING}; + // LCOV_EXCL_STOP } else { @@ -2065,7 +2073,7 @@ rippleCreditMPT( view.update(sleIssuance); } else - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE } else { @@ -2325,7 +2333,7 @@ issueIOU( auto const receiverAccount = view.peek(keylet::account(account)); if (!receiverAccount) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE bool noRipple = (receiverAccount->getFlags() & lsfDefaultRipple) == 0; @@ -2413,11 +2421,13 @@ redeemIOU( // In order to hold an IOU, a trust line *MUST* exist to track the // balance. If it doesn't, then something is very wrong. Don't try // to continue. + // LCOV_EXCL_START JLOG(j.fatal()) << "redeemIOU: " << to_string(account) << " attempts to redeem " << amount.getFullText() << " but no trust line exists!"; return tefINTERNAL; + // LCOV_EXCL_STOP } TER @@ -2437,7 +2447,7 @@ transferXRP( SLE::pointer const sender = view.peek(keylet::account(from)); SLE::pointer const receiver = view.peek(keylet::account(to)); if (!sender || !receiver) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE JLOG(j.trace()) << "transferXRP: " << to_string(from) << " -> " << to_string(to) << ") : " << amount.getFullText(); @@ -2447,8 +2457,10 @@ transferXRP( // VFALCO Its unfortunate we have to keep // mutating these TER everywhere // FIXME: this logic should be moved to callers maybe? + // LCOV_EXCL_START return view.open() ? TER{telFAILED_PROCESSING} : TER{tecFAILED_PROCESSING}; + // LCOV_EXCL_STOP } // Decrement XRP balance. @@ -2729,11 +2741,13 @@ cleanupOnAccountDelete( if (!sleItem) { // Directory node has an invalid index. Bail out. + // LCOV_EXCL_START JLOG(j.fatal()) << "DeleteAccount: Directory node in ledger " << view.seq() << " has index to object that is missing: " << to_string(dirEntry); return tefBAD_LEDGER; + // LCOV_EXCL_STOP } LedgerEntryType const nodeType{safe_cast( @@ -2766,9 +2780,11 @@ cleanupOnAccountDelete( "ripple::cleanupOnAccountDelete : minimum dir entries"); if (uDirEntry == 0) { + // LCOV_EXCL_START JLOG(j.error()) << "DeleteAccount iterator re-validation failed."; return tefBAD_LEDGER; + // LCOV_EXCL_STOP } if (skipEntry == SkipEntry::No) uDirEntry--; @@ -2788,7 +2804,7 @@ deleteAMMTrustLine( beast::Journal j) { if (!sleState || sleState->getType() != ltRIPPLE_STATE) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE auto const& [low, high] = std::minmax( sleState->getFieldAmount(sfLowLimit).getIssuer(), @@ -2796,13 +2812,14 @@ deleteAMMTrustLine( auto sleLow = view.peek(keylet::account(low)); auto sleHigh = view.peek(keylet::account(high)); if (!sleLow || !sleHigh) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE + bool const ammLow = sleLow->isFieldPresent(sfAMMID); bool const ammHigh = sleHigh->isFieldPresent(sfAMMID); // can't both be AMM if (ammLow && ammHigh) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE // at least one must be if (!ammLow && !ammHigh) @@ -2822,7 +2839,7 @@ deleteAMMTrustLine( auto const uFlags = !ammLow ? lsfLowReserve : lsfHighReserve; if (!(sleState->getFlags() & uFlags)) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE adjustOwnerCount(view, !ammLow ? sleLow : sleHigh, -1, j); diff --git a/src/xrpld/app/misc/NetworkOPs.cpp b/src/xrpld/app/misc/NetworkOPs.cpp index e72b2732d0..3154426696 100644 --- a/src/xrpld/app/misc/NetworkOPs.cpp +++ b/src/xrpld/app/misc/NetworkOPs.cpp @@ -3728,6 +3728,9 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo) if (databaseType == DatabaseType::None) { + // LCOV_EXCL_START + UNREACHABLE( + "ripple::NetworkOPsImp::addAccountHistoryJob : no database"); JLOG(m_journal.error()) << "AccountHistory job for account " << toBase58(subInfo.index_->accountId_) << " no database"; @@ -3737,6 +3740,7 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo) unsubAccountHistory(sptr, subInfo.index_->accountId_, false); } return; + // LCOV_EXCL_STOP } app_.getJobQueue().addJob( @@ -3836,7 +3840,7 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo) // LCOV_EXCL_START default: { UNREACHABLE( - "ripple::NetworkOPsImp::addAccountHistoryJob::" + "ripple::NetworkOPsImp::addAccountHistoryJob : " "getMoreTxns : invalid database type"); return {}; } @@ -3901,11 +3905,16 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo) getMoreTxns(startLedgerSeq, lastLedgerSeq, marker); if (!dbResult) { + // LCOV_EXCL_START + UNREACHABLE( + "ripple::NetworkOPsImp::addAccountHistoryJob : " + "getMoreTxns failed"); JLOG(m_journal.debug()) << "AccountHistory job for account " << toBase58(accountId) << " getMoreTxns failed."; send(rpcError(rpcINTERNAL), true); return; + // LCOV_EXCL_STOP } auto const& txns = dbResult->first; @@ -3928,22 +3937,32 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo) tx->getLedger()); if (!curTxLedger) { + // LCOV_EXCL_START + UNREACHABLE( + "ripple::NetworkOPsImp::addAccountHistoryJob : " + "getLedgerBySeq failed"); JLOG(m_journal.debug()) << "AccountHistory job for account " << toBase58(accountId) << " no ledger."; send(rpcError(rpcINTERNAL), true); return; + // LCOV_EXCL_STOP } std::shared_ptr stTxn = tx->getSTransaction(); if (!stTxn) { + // LCOV_EXCL_START + UNREACHABLE( + "NetworkOPsImp::addAccountHistoryJob : " + "getSTransaction failed"); JLOG(m_journal.debug()) << "AccountHistory job for account " << toBase58(accountId) << " getSTransaction failed."; send(rpcError(rpcINTERNAL), true); return; + // LCOV_EXCL_STOP } auto const mRef = std::ref(*meta); diff --git a/src/xrpld/app/tx/detail/AMMBid.cpp b/src/xrpld/app/tx/detail/AMMBid.cpp index 769668b07b..028c1aac8b 100644 --- a/src/xrpld/app/tx/detail/AMMBid.cpp +++ b/src/xrpld/app/tx/detail/AMMBid.cpp @@ -247,10 +247,12 @@ applyBid( if (saBurn >= lptAMMBalance) { // This error case should never occur. + // LCOV_EXCL_START JLOG(ctx_.journal.fatal()) << "AMM Bid: LP Token burn exceeds AMM balance " << burn << " " << lptAMMBalance; return tecINTERNAL; + // LCOV_EXCL_STOP } auto res = redeemIOU(sb, account_, saBurn, lpTokens.issue(), ctx_.journal); diff --git a/src/xrpld/app/tx/detail/AMMCreate.cpp b/src/xrpld/app/tx/detail/AMMCreate.cpp index 63e20b42fb..a515783fb6 100644 --- a/src/xrpld/app/tx/detail/AMMCreate.cpp +++ b/src/xrpld/app/tx/detail/AMMCreate.cpp @@ -197,7 +197,7 @@ AMMCreate::preclaim(PreclaimContext const& ctx) return tesSUCCESS; if (auto const sle = ctx.view.read(keylet::account(issue.account)); !sle) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE else if (sle->getFlags() & lsfAllowTrustLineClawback) return tecNO_PERMISSION; return tesSUCCESS; @@ -291,7 +291,7 @@ applyCreate( if (SLE::pointer sleRippleState = sb.peek(keylet::line(accountId, amount.issue())); !sleRippleState) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE else { auto const flags = sleRippleState->getFlags(); diff --git a/src/xrpld/app/tx/detail/AMMWithdraw.cpp b/src/xrpld/app/tx/detail/AMMWithdraw.cpp index f5af9dfb9c..8b6b58013a 100644 --- a/src/xrpld/app/tx/detail/AMMWithdraw.cpp +++ b/src/xrpld/app/tx/detail/AMMWithdraw.cpp @@ -196,9 +196,11 @@ AMMWithdraw::preclaim(PreclaimContext const& ctx) if (amountBalance <= beast::zero || amount2Balance <= beast::zero || lptAMMBalance < beast::zero) { + // LCOV_EXCL_START JLOG(ctx.j.debug()) << "AMM Withdraw: reserves or tokens balance is zero."; - return tecINTERNAL; // LCOV_EXCL_LINE + return tecINTERNAL; + // LCOV_EXCL_STOP } auto const ammAccountID = ammSle->getAccountID(sfAccount); diff --git a/src/xrpld/app/tx/detail/CancelCheck.cpp b/src/xrpld/app/tx/detail/CancelCheck.cpp index f1a9b42a89..9dc792307c 100644 --- a/src/xrpld/app/tx/detail/CancelCheck.cpp +++ b/src/xrpld/app/tx/detail/CancelCheck.cpp @@ -93,8 +93,10 @@ CancelCheck::doApply() if (!view().dirRemove( keylet::ownerDir(dstId), page, sleCheck->key(), true)) { + // LCOV_EXCL_START JLOG(j_.fatal()) << "Unable to delete check from destination."; return tefBAD_LEDGER; + // LCOV_EXCL_STOP } } { @@ -102,8 +104,10 @@ CancelCheck::doApply() if (!view().dirRemove( keylet::ownerDir(srcId), page, sleCheck->key(), true)) { + // LCOV_EXCL_START JLOG(j_.fatal()) << "Unable to delete check from owner."; return tefBAD_LEDGER; + // LCOV_EXCL_STOP } } diff --git a/src/xrpld/app/tx/detail/CancelOffer.cpp b/src/xrpld/app/tx/detail/CancelOffer.cpp index e7ec28ce17..ff5bfd085b 100644 --- a/src/xrpld/app/tx/detail/CancelOffer.cpp +++ b/src/xrpld/app/tx/detail/CancelOffer.cpp @@ -68,7 +68,7 @@ CancelOffer::doApply() auto const sle = view().read(keylet::account(account_)); if (!sle) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE if (auto sleOffer = view().peek(keylet::offer(account_, offerSequence))) { diff --git a/src/xrpld/app/tx/detail/CashCheck.cpp b/src/xrpld/app/tx/detail/CashCheck.cpp index f8ab6189a3..73dedba170 100644 --- a/src/xrpld/app/tx/detail/CashCheck.cpp +++ b/src/xrpld/app/tx/detail/CashCheck.cpp @@ -87,8 +87,10 @@ CashCheck::preclaim(PreclaimContext const& ctx) { // They wrote a check to themselves. This should be caught when // the check is created, but better late than never. + // LCOV_EXCL_START JLOG(ctx.j.error()) << "Malformed transaction: Cashing check to self."; return tecINTERNAL; + // LCOV_EXCL_STOP } { auto const sleSrc = ctx.view.read(keylet::account(srcId)); @@ -245,17 +247,21 @@ CashCheck::doApply() auto sleCheck = psb.peek(keylet::check(ctx_.tx[sfCheckID])); if (!sleCheck) { + // LCOV_EXCL_START JLOG(j_.fatal()) << "Precheck did not verify check's existence."; return tecFAILED_PROCESSING; + // LCOV_EXCL_STOP } AccountID const srcId{sleCheck->getAccountID(sfAccount)}; if (!psb.exists(keylet::account(srcId)) || !psb.exists(keylet::account(account_))) { + // LCOV_EXCL_START JLOG(ctx_.journal.fatal()) << "Precheck did not verify source or destination's existence."; return tecFAILED_PROCESSING; + // LCOV_EXCL_STOP } // Preclaim already checked that source has at least the requested @@ -478,8 +484,10 @@ CashCheck::doApply() sleCheck->key(), true)) { + // LCOV_EXCL_START JLOG(j_.fatal()) << "Unable to delete check from destination."; return tefBAD_LEDGER; + // LCOV_EXCL_STOP } // Remove check from check owner's directory. @@ -489,8 +497,10 @@ CashCheck::doApply() sleCheck->key(), true)) { + // LCOV_EXCL_START JLOG(j_.fatal()) << "Unable to delete check from owner."; return tefBAD_LEDGER; + // LCOV_EXCL_STOP } // If we succeeded, update the check owner's reserve. diff --git a/src/xrpld/app/tx/detail/Clawback.cpp b/src/xrpld/app/tx/detail/Clawback.cpp index b346e4a1c1..1c279389ec 100644 --- a/src/xrpld/app/tx/detail/Clawback.cpp +++ b/src/xrpld/app/tx/detail/Clawback.cpp @@ -235,7 +235,7 @@ applyHelper(ApplyContext& ctx) // Replace the `issuer` field with issuer's account clawAmount.setIssuer(issuer); if (holder == issuer) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE // Get the spendable balance. Must use `accountHolds`. STAmount const spendableAmount = accountHolds( diff --git a/src/xrpld/app/tx/detail/CreateCheck.cpp b/src/xrpld/app/tx/detail/CreateCheck.cpp index 57f3a92255..54bbdbdc25 100644 --- a/src/xrpld/app/tx/detail/CreateCheck.cpp +++ b/src/xrpld/app/tx/detail/CreateCheck.cpp @@ -159,7 +159,7 @@ CreateCheck::doApply() { auto const sle = view().peek(keylet::account(account_)); if (!sle) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE // A check counts against the reserve of the issuing account, but we // check the starting balance because we want to allow dipping into the @@ -209,7 +209,7 @@ CreateCheck::doApply() << (page ? "success" : "failure"); if (!page) - return tecDIR_FULL; + return tecDIR_FULL; // LCOV_EXCL_LINE sleCheck->setFieldU64(sfDestinationNode, *page); } @@ -225,7 +225,7 @@ CreateCheck::doApply() << (page ? "success" : "failure"); if (!page) - return tecDIR_FULL; + return tecDIR_FULL; // LCOV_EXCL_LINE sleCheck->setFieldU64(sfOwnerNode, *page); } diff --git a/src/xrpld/app/tx/detail/CreateOffer.cpp b/src/xrpld/app/tx/detail/CreateOffer.cpp index 86750eb51d..a503f913fa 100644 --- a/src/xrpld/app/tx/detail/CreateOffer.cpp +++ b/src/xrpld/app/tx/detail/CreateOffer.cpp @@ -848,9 +848,11 @@ CreateOffer::applyGuts(Sandbox& sb, Sandbox& sbCancel) if (!ownerNode) { + // LCOV_EXCL_START JLOG(j_.debug()) << "final result: failed to add offer to owner's directory"; return {tecDIR_FULL, true}; + // LCOV_EXCL_STOP } // Update owner count. @@ -894,8 +896,10 @@ CreateOffer::applyGuts(Sandbox& sb, Sandbox& sbCancel) if (!bookNode) { + // LCOV_EXCL_START JLOG(j_.debug()) << "final result: failed to add offer to book"; return {tecDIR_FULL, true}; + // LCOV_EXCL_STOP } auto sleOffer = std::make_shared(offer_index); diff --git a/src/xrpld/app/tx/detail/CreateTicket.cpp b/src/xrpld/app/tx/detail/CreateTicket.cpp index d48da2d780..f0b62bc67f 100644 --- a/src/xrpld/app/tx/detail/CreateTicket.cpp +++ b/src/xrpld/app/tx/detail/CreateTicket.cpp @@ -76,7 +76,7 @@ CreateTicket::doApply() { SLE::pointer const sleAccountRoot = view().peek(keylet::account(account_)); if (!sleAccountRoot) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE // Each ticket counts against the reserve of the issuing account, but we // check the starting balance because we want to allow dipping into the @@ -102,7 +102,7 @@ CreateTicket::doApply() // increment the account root Sequence. if (std::uint32_t const txSeq = ctx_.tx[sfSequence]; txSeq != 0 && txSeq != (firstTicketSeq - 1)) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE for (std::uint32_t i = 0; i < ticketCount; ++i) { @@ -123,7 +123,7 @@ CreateTicket::doApply() << ": " << (page ? "success" : "failure"); if (!page) - return tecDIR_FULL; + return tecDIR_FULL; // LCOV_EXCL_LINE sleTicket->setFieldU64(sfOwnerNode, *page); } diff --git a/src/xrpld/app/tx/detail/Credentials.cpp b/src/xrpld/app/tx/detail/Credentials.cpp index 4b77163c5d..b6471d6a20 100644 --- a/src/xrpld/app/tx/detail/Credentials.cpp +++ b/src/xrpld/app/tx/detail/Credentials.cpp @@ -117,7 +117,7 @@ CredentialCreate::doApply() auto const sleCred = std::make_shared(credentialKey); if (!sleCred) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE auto const optExp = ctx_.tx[~sfExpiration]; if (optExp) @@ -137,7 +137,7 @@ CredentialCreate::doApply() auto const sleIssuer = view().peek(keylet::account(account_)); if (!sleIssuer) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE { STAmount const reserve{view().fees().accountReserve( @@ -162,7 +162,7 @@ CredentialCreate::doApply() << to_string(credentialKey.key) << ": " << (page ? "success" : "failure"); if (!page) - return tecDIR_FULL; + return tecDIR_FULL; // LCOV_EXCL_LINE sleCred->setFieldU64(sfIssuerNode, *page); adjustOwnerCount(view(), sleIssuer, 1, j_); @@ -182,7 +182,7 @@ CredentialCreate::doApply() << to_string(credentialKey.key) << ": " << (page ? "success" : "failure"); if (!page) - return tecDIR_FULL; + return tecDIR_FULL; // LCOV_EXCL_LINE sleCred->setFieldU64(sfSubjectNode, *page); view().update(view().peek(keylet::account(subject))); } @@ -258,7 +258,7 @@ CredentialDelete::doApply() auto const sleCred = view().peek(keylet::credential(subject, issuer, credType)); if (!sleCred) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE if ((subject != account_) && (issuer != account_) && !checkExpired(sleCred, ctx_.view().info().parentCloseTime)) @@ -342,7 +342,7 @@ CredentialAccept::doApply() auto const sleIssuer = view().peek(keylet::account(issuer)); if (!sleSubject || !sleIssuer) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE { STAmount const reserve{view().fees().accountReserve( diff --git a/src/xrpld/app/tx/detail/DID.cpp b/src/xrpld/app/tx/detail/DID.cpp index b38b207d36..f466777920 100644 --- a/src/xrpld/app/tx/detail/DID.cpp +++ b/src/xrpld/app/tx/detail/DID.cpp @@ -76,7 +76,7 @@ addSLE( { auto const sleAccount = ctx.view().peek(keylet::account(owner)); if (!sleAccount) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE // Check reserve availability for new object creation { @@ -96,7 +96,7 @@ addSLE( auto page = ctx.view().dirInsert( keylet::ownerDir(owner), sle->key(), describeOwnerDir(owner)); if (!page) - return tecDIR_FULL; + return tecDIR_FULL; // LCOV_EXCL_LINE (*sle)[sfOwnerNode] = *page; } adjustOwnerCount(ctx.view(), sleAccount, 1, ctx.journal); @@ -189,13 +189,15 @@ DIDDelete::deleteSLE( if (!view.dirRemove( keylet::ownerDir(owner), (*sle)[sfOwnerNode], sle->key(), true)) { + // LCOV_EXCL_START JLOG(j.fatal()) << "Unable to delete DID Token from owner."; return tefBAD_LEDGER; + // LCOV_EXCL_STOP } auto const sleOwner = view.peek(keylet::account(owner)); if (!sleOwner) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE adjustOwnerCount(view, sleOwner, -1, j); view.update(sleOwner); diff --git a/src/xrpld/app/tx/detail/DeleteAccount.cpp b/src/xrpld/app/tx/detail/DeleteAccount.cpp index d52e84d755..805c023c04 100644 --- a/src/xrpld/app/tx/detail/DeleteAccount.cpp +++ b/src/xrpld/app/tx/detail/DeleteAccount.cpp @@ -141,7 +141,7 @@ removeNFTokenOfferFromLedger( beast::Journal) { if (!nft::deleteTokenOffer(view, sleDel)) - return tefBAD_LEDGER; + return tefBAD_LEDGER; // LCOV_EXCL_LINE return tesSUCCESS; } @@ -336,11 +336,13 @@ DeleteAccount::preclaim(PreclaimContext const& ctx) if (!sleItem) { // Directory node has an invalid index. Bail out. + // LCOV_EXCL_START JLOG(ctx.j.fatal()) << "DeleteAccount: directory node in ledger " << ctx.view.seq() << " has index to object that is missing: " << to_string(dirEntry); return tefBAD_LEDGER; + // LCOV_EXCL_STOP } LedgerEntryType const nodeType{ @@ -373,7 +375,7 @@ DeleteAccount::doApply() dst, "ripple::DeleteAccount::doApply : non-null destination account"); if (!src || !dst) - return tefBAD_LEDGER; + return tefBAD_LEDGER; // LCOV_EXCL_LINE if (ctx_.view().rules().enabled(featureDepositAuth) && ctx_.tx.isFieldPresent(sfCredentialIDs)) diff --git a/src/xrpld/app/tx/detail/DepositPreauth.cpp b/src/xrpld/app/tx/detail/DepositPreauth.cpp index 236b59a173..6f0e079290 100644 --- a/src/xrpld/app/tx/detail/DepositPreauth.cpp +++ b/src/xrpld/app/tx/detail/DepositPreauth.cpp @@ -138,7 +138,7 @@ DepositPreauth::preclaim(PreclaimContext const& ctx) return tecNO_ISSUER; auto [it, ins] = sorted.emplace(issuer, o[sfCredentialType]); if (!ins) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE } // Verify that the Preauth entry they asked to add is not already @@ -198,7 +198,7 @@ DepositPreauth::doApply() << (page ? "success" : "failure"); if (!page) - return tecDIR_FULL; + return tecDIR_FULL; // LCOV_EXCL_LINE slePreauth->setFieldU64(sfOwnerNode, *page); @@ -216,7 +216,7 @@ DepositPreauth::doApply() { auto const sleOwner = view().peek(keylet::account(account_)); if (!sleOwner) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE // A preauth counts against the reserve of the issuing account, but we // check the starting balance because we want to allow dipping into the @@ -246,7 +246,7 @@ DepositPreauth::doApply() Keylet const preauthKey = keylet::depositPreauth(account_, sortedTX); auto slePreauth = std::make_shared(preauthKey); if (!slePreauth) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE slePreauth->setAccountID(sfAccount, account_); slePreauth->peekFieldArray(sfAuthorizeCredentials) = @@ -262,7 +262,7 @@ DepositPreauth::doApply() << (page ? "success" : "failure"); if (!page) - return tecDIR_FULL; + return tecDIR_FULL; // LCOV_EXCL_LINE slePreauth->setFieldU64(sfOwnerNode, *page); @@ -299,14 +299,16 @@ DepositPreauth::removeFromLedger( std::uint64_t const page{(*slePreauth)[sfOwnerNode]}; if (!view.dirRemove(keylet::ownerDir(account), page, preauthIndex, false)) { + // LCOV_EXCL_START JLOG(j.fatal()) << "Unable to delete DepositPreauth from owner."; return tefBAD_LEDGER; + // LCOV_EXCL_STOP } // If we succeeded, update the DepositPreauth owner's reserve. auto const sleOwner = view.peek(keylet::account(account)); if (!sleOwner) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE adjustOwnerCount(view, sleOwner, -1, j); diff --git a/src/xrpld/app/tx/detail/Escrow.cpp b/src/xrpld/app/tx/detail/Escrow.cpp index 969fd4dd4c..eb468626a4 100644 --- a/src/xrpld/app/tx/detail/Escrow.cpp +++ b/src/xrpld/app/tx/detail/Escrow.cpp @@ -414,10 +414,8 @@ escrowLockApplyHelper( beast::Journal journal) { // Defensive: Issuer cannot create an escrow - // LCOV_EXCL_START if (issuer == sender) - return tecINTERNAL; - // LCOV_EXCL_STOP + return tecINTERNAL; // LCOV_EXCL_LINE auto const ter = rippleCredit( view, @@ -441,10 +439,8 @@ escrowLockApplyHelper( beast::Journal journal) { // Defensive: Issuer cannot create an escrow - // LCOV_EXCL_START if (issuer == sender) - return tecINTERNAL; - // LCOV_EXCL_STOP + return tecINTERNAL; // LCOV_EXCL_LINE auto const ter = rippleLockEscrowMPT(view, sender, amount, journal); if (ter != tesSUCCESS) @@ -472,6 +468,9 @@ EscrowCreate::doApply() } else { + // This is old code that needs to stay to support replaying old ledgers, + // but does not need to be covered by new tests. + // LCOV_EXCL_START if (ctx_.tx[~sfCancelAfter]) { auto const cancelAfter = ctx_.tx[sfCancelAfter]; @@ -487,6 +486,7 @@ EscrowCreate::doApply() if (closeTime.time_since_epoch().count() >= finishAfter) return tecNO_PERMISSION; } + // LCOV_EXCL_STOP } auto const sle = ctx_.view().peek(keylet::account(account_)); @@ -514,12 +514,12 @@ EscrowCreate::doApply() auto const sled = ctx_.view().read(keylet::account(ctx_.tx[sfDestination])); if (!sled) - return tecNO_DST; + return tecNO_DST; // LCOV_EXCL_LINE if (((*sled)[sfFlags] & lsfRequireDestTag) && !ctx_.tx[~sfDestinationTag]) return tecDST_TAG_NEEDED; - // Obeying the lsfDissalowXRP flag was a bug. Piggyback on + // Obeying the lsfDisallowXRP flag was a bug. Piggyback on // featureDepositAuth to remove the bug. if (!ctx_.view().rules().enabled(featureDepositAuth) && ((*sled)[sfFlags] & lsfDisallowXRP)) @@ -601,7 +601,9 @@ EscrowCreate::doApply() }, amount.asset().value()); !isTesSuccess(ret)) + { return ret; // LCOV_EXCL_LINE + } } // increment owner count @@ -837,10 +839,8 @@ escrowUnlockApplyHelper( bool const receiverIssuer = issuer == receiver; bool const issuerHigh = issuer > receiver; - // LCOV_EXCL_START if (senderIssuer) - return tecINTERNAL; - // LCOV_EXCL_STOP + return tecINTERNAL; // LCOV_EXCL_LINE if (receiverIssuer) return tesSUCCESS; @@ -1057,6 +1057,9 @@ EscrowFinish::doApply() } else { + // This is old code that needs to stay to support replaying old ledgers, + // but does not need to be covered by new tests. + // LCOV_EXCL_START // Too soon? if ((*slep)[~sfFinishAfter] && ctx_.view().info().parentCloseTime.time_since_epoch().count() <= @@ -1068,6 +1071,7 @@ EscrowFinish::doApply() ctx_.view().info().parentCloseTime.time_since_epoch().count() <= (*slep)[sfCancelAfter]) return tecNO_PERMISSION; + // LCOV_EXCL_STOP } // Check cryptocondition fulfillment @@ -1082,6 +1086,7 @@ EscrowFinish::doApply() // simply re-run the check. if (cb && !any(flags & (SF_CF_INVALID | SF_CF_VALID))) { + // LCOV_EXCL_START auto const fb = ctx_.tx[~sfFulfillment]; if (!fb) @@ -1093,6 +1098,7 @@ EscrowFinish::doApply() flags = SF_CF_INVALID; ctx_.app.getHashRouter().setFlags(id, flags); + // LCOV_EXCL_STOP } // If the check failed, then simply return an error @@ -1139,8 +1145,10 @@ EscrowFinish::doApply() if (!ctx_.view().dirRemove( keylet::ownerDir(account), page, k.key, true)) { + // LCOV_EXCL_START JLOG(j_.fatal()) << "Unable to delete Escrow from owner."; return tefBAD_LEDGER; + // LCOV_EXCL_STOP } } @@ -1150,8 +1158,10 @@ EscrowFinish::doApply() if (!ctx_.view().dirRemove( keylet::ownerDir(destID), *optPage, k.key, true)) { + // LCOV_EXCL_START JLOG(j_.fatal()) << "Unable to delete Escrow from recipient."; return tefBAD_LEDGER; + // LCOV_EXCL_STOP } } @@ -1193,8 +1203,10 @@ EscrowFinish::doApply() if (!ctx_.view().dirRemove( keylet::ownerDir(issuer), *optPage, k.key, true)) { + // LCOV_EXCL_START JLOG(j_.fatal()) << "Unable to delete Escrow from recipient."; return tefBAD_LEDGER; // LCOV_EXCL_LINE + // LCOV_EXCL_STOP } } } @@ -1338,11 +1350,15 @@ EscrowCancel::doApply() } else { + // This is old code that needs to stay to support replaying old ledgers, + // but does not need to be covered by new tests. + // LCOV_EXCL_START // Too soon? if (!(*slep)[~sfCancelAfter] || ctx_.view().info().parentCloseTime.time_since_epoch().count() <= (*slep)[sfCancelAfter]) return tecNO_PERMISSION; + // LCOV_EXCL_STOP } AccountID const account = (*slep)[sfAccount]; @@ -1353,8 +1369,10 @@ EscrowCancel::doApply() if (!ctx_.view().dirRemove( keylet::ownerDir(account), page, k.key, true)) { + // LCOV_EXCL_START JLOG(j_.fatal()) << "Unable to delete Escrow from owner."; return tefBAD_LEDGER; + // LCOV_EXCL_STOP } } @@ -1367,8 +1385,10 @@ EscrowCancel::doApply() k.key, true)) { + // LCOV_EXCL_START JLOG(j_.fatal()) << "Unable to delete Escrow from recipient."; return tefBAD_LEDGER; + // LCOV_EXCL_STOP } } @@ -1409,8 +1429,10 @@ EscrowCancel::doApply() if (!ctx_.view().dirRemove( keylet::ownerDir(issuer), *optPage, k.key, true)) { + // LCOV_EXCL_START JLOG(j_.fatal()) << "Unable to delete Escrow from recipient."; - return tefBAD_LEDGER; // LCOV_EXCL_LINE + return tefBAD_LEDGER; + // LCOV_EXCL_STOP } } } diff --git a/src/xrpld/app/tx/detail/LedgerStateFix.cpp b/src/xrpld/app/tx/detail/LedgerStateFix.cpp index 6059e15313..017f67f2ee 100644 --- a/src/xrpld/app/tx/detail/LedgerStateFix.cpp +++ b/src/xrpld/app/tx/detail/LedgerStateFix.cpp @@ -67,7 +67,7 @@ LedgerStateFix::preclaim(PreclaimContext const& ctx) } // preflight is supposed to verify that only valid FixTypes get to preclaim. - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE } TER @@ -83,7 +83,7 @@ LedgerStateFix::doApply() } // preflight is supposed to verify that only valid FixTypes get to doApply. - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE } } // namespace ripple diff --git a/src/xrpld/app/tx/detail/MPTokenAuthorize.cpp b/src/xrpld/app/tx/detail/MPTokenAuthorize.cpp index edeb12e5c0..b8728d23cf 100644 --- a/src/xrpld/app/tx/detail/MPTokenAuthorize.cpp +++ b/src/xrpld/app/tx/detail/MPTokenAuthorize.cpp @@ -75,7 +75,7 @@ MPTokenAuthorize::preclaim(PreclaimContext const& ctx) auto const sleMptIssuance = ctx.view.read( keylet::mptIssuance(ctx.tx[sfMPTokenIssuanceID])); if (!sleMptIssuance) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE return tecHAS_OBLIGATIONS; } diff --git a/src/xrpld/app/tx/detail/MPTokenIssuanceDestroy.cpp b/src/xrpld/app/tx/detail/MPTokenIssuanceDestroy.cpp index 4c502f1106..1dd3fbfe68 100644 --- a/src/xrpld/app/tx/detail/MPTokenIssuanceDestroy.cpp +++ b/src/xrpld/app/tx/detail/MPTokenIssuanceDestroy.cpp @@ -66,11 +66,11 @@ MPTokenIssuanceDestroy::doApply() auto const mpt = view().peek(keylet::mptIssuance(ctx_.tx[sfMPTokenIssuanceID])); if (account_ != mpt->getAccountID(sfIssuer)) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE if (!view().dirRemove( keylet::ownerDir(account_), (*mpt)[sfOwnerNode], mpt->key(), false)) - return tefBAD_LEDGER; + return tefBAD_LEDGER; // LCOV_EXCL_LINE view().erase(mpt); diff --git a/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp b/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp index 6fb87711c8..f2c9bd8a96 100644 --- a/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp +++ b/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp @@ -282,7 +282,7 @@ MPTokenIssuanceSet::doApply() sle = view().peek(keylet::mptIssuance(mptIssuanceID)); if (!sle) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE std::uint32_t const flagsIn = sle->getFieldU32(sfFlags); std::uint32_t flagsOut = flagsIn; diff --git a/src/xrpld/app/tx/detail/NFTokenAcceptOffer.cpp b/src/xrpld/app/tx/detail/NFTokenAcceptOffer.cpp index 3b4a27ffd7..23874ee3e0 100644 --- a/src/xrpld/app/tx/detail/NFTokenAcceptOffer.cpp +++ b/src/xrpld/app/tx/detail/NFTokenAcceptOffer.cpp @@ -356,7 +356,7 @@ NFTokenAcceptOffer::preclaim(PreclaimContext const& ctx) auto const& offer = bo ? bo : so; if (!offer) // Purely defensive, should be caught in preflight. - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE auto const& tokenID = offer->at(sfNFTokenID); auto const& amount = offer->at(sfAmount); @@ -428,7 +428,7 @@ NFTokenAcceptOffer::transferNFToken( auto tokenAndPage = nft::findTokenAndPage(view(), seller, nftokenID); if (!tokenAndPage) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE if (auto const ret = nft::removeToken( view(), seller, nftokenID, std::move(tokenAndPage->page)); @@ -437,7 +437,7 @@ NFTokenAcceptOffer::transferNFToken( auto const sleBuyer = view().read(keylet::account(buyer)); if (!sleBuyer) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE std::uint32_t const buyerOwnerCountBefore = sleBuyer->getFieldU32(sfOwnerCount); @@ -523,16 +523,20 @@ NFTokenAcceptOffer::doApply() if (bo && !nft::deleteTokenOffer(view(), bo)) { + // LCOV_EXCL_START JLOG(j_.fatal()) << "Unable to delete buy offer '" << to_string(bo->key()) << "': ignoring"; return tecINTERNAL; + // LCOV_EXCL_STOP } if (so && !nft::deleteTokenOffer(view(), so)) { + // LCOV_EXCL_START JLOG(j_.fatal()) << "Unable to delete sell offer '" << to_string(so->key()) << "': ignoring"; return tecINTERNAL; + // LCOV_EXCL_STOP } // Bridging two different offers @@ -603,7 +607,7 @@ NFTokenAcceptOffer::doApply() if (so) return acceptOffer(so); - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE } } // namespace ripple diff --git a/src/xrpld/app/tx/detail/NFTokenCancelOffer.cpp b/src/xrpld/app/tx/detail/NFTokenCancelOffer.cpp index 86e804b1a5..2d3eb74775 100644 --- a/src/xrpld/app/tx/detail/NFTokenCancelOffer.cpp +++ b/src/xrpld/app/tx/detail/NFTokenCancelOffer.cpp @@ -101,9 +101,11 @@ NFTokenCancelOffer::doApply() if (auto offer = view().peek(keylet::nftoffer(id)); offer && !nft::deleteTokenOffer(view(), offer)) { + // LCOV_EXCL_START JLOG(j_.fatal()) << "Unable to delete token offer " << id << " (ledger " << view().seq() << ")"; return tefBAD_LEDGER; + // LCOV_EXCL_STOP } } diff --git a/src/xrpld/app/tx/detail/NFTokenMint.cpp b/src/xrpld/app/tx/detail/NFTokenMint.cpp index 8149d3b59d..dd82443fee 100644 --- a/src/xrpld/app/tx/detail/NFTokenMint.cpp +++ b/src/xrpld/app/tx/detail/NFTokenMint.cpp @@ -306,7 +306,7 @@ NFTokenMint::doApply() if (nfTokenTemplate == nullptr) // Should never happen. - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE auto const nftokenID = createNFTokenID( extractNFTokenFlagsFromTxFlags(ctx_.tx.getFlags()), diff --git a/src/xrpld/app/tx/detail/NFTokenUtils.cpp b/src/xrpld/app/tx/detail/NFTokenUtils.cpp index ad3e6f4d35..f246e89e65 100644 --- a/src/xrpld/app/tx/detail/NFTokenUtils.cpp +++ b/src/xrpld/app/tx/detail/NFTokenUtils.cpp @@ -1046,7 +1046,7 @@ tokenOfferCreateApply( keylet::ownerDir(acctID), offerID, describeOwnerDir(acctID)); if (!ownerNode) - return tecDIR_FULL; + return tecDIR_FULL; // LCOV_EXCL_LINE bool const isSellOffer = txFlags & tfSellNFToken; @@ -1063,7 +1063,7 @@ tokenOfferCreateApply( }); if (!offerNode) - return tecDIR_FULL; + return tecDIR_FULL; // LCOV_EXCL_LINE std::uint32_t sleFlags = 0; diff --git a/src/xrpld/app/tx/detail/PayChan.cpp b/src/xrpld/app/tx/detail/PayChan.cpp index 32c0abeb93..b495e00b3f 100644 --- a/src/xrpld/app/tx/detail/PayChan.cpp +++ b/src/xrpld/app/tx/detail/PayChan.cpp @@ -126,9 +126,11 @@ closeChannel( auto const page = (*slep)[sfOwnerNode]; if (!view.dirRemove(keylet::ownerDir(src), page, key, true)) { + // LCOV_EXCL_START JLOG(j.fatal()) << "Could not remove paychan from src owner directory"; return tefBAD_LEDGER; + // LCOV_EXCL_STOP } } @@ -139,16 +141,18 @@ closeChannel( auto const dst = (*slep)[sfDestination]; if (!view.dirRemove(keylet::ownerDir(dst), *page, key, true)) { + // LCOV_EXCL_START JLOG(j.fatal()) << "Could not remove paychan from dst owner directory"; return tefBAD_LEDGER; + // LCOV_EXCL_STOP } } // Transfer amount back to owner, decrement owner count auto const sle = view.peek(keylet::account(src)); if (!sle) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE XRPL_ASSERT( (*slep)[sfAmount] >= (*slep)[sfBalance], @@ -257,7 +261,7 @@ PayChanCreate::doApply() auto const account = ctx_.tx[sfAccount]; auto const sle = ctx_.view().peek(keylet::account(account)); if (!sle) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE if (ctx_.view().rules().enabled(fixPayChanCancelAfter)) { @@ -301,7 +305,7 @@ PayChanCreate::doApply() payChanKeylet, describeOwnerDir(account)); if (!page) - return tecDIR_FULL; + return tecDIR_FULL; // LCOV_EXCL_LINE (*slep)[sfOwnerNode] = *page; } @@ -311,7 +315,7 @@ PayChanCreate::doApply() auto const page = ctx_.view().dirInsert( keylet::ownerDir(dst), payChanKeylet, describeOwnerDir(dst)); if (!page) - return tecDIR_FULL; + return tecDIR_FULL; // LCOV_EXCL_LINE (*slep)[sfDestinationNode] = *page; } @@ -389,7 +393,7 @@ PayChanFund::doApply() auto const sle = ctx_.view().peek(keylet::account(txAccount)); if (!sle) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE { // Check reserve and funds availability diff --git a/src/xrpld/app/tx/detail/Payment.cpp b/src/xrpld/app/tx/detail/Payment.cpp index 8bc0e891d0..81a083e336 100644 --- a/src/xrpld/app/tx/detail/Payment.cpp +++ b/src/xrpld/app/tx/detail/Payment.cpp @@ -632,7 +632,7 @@ Payment::doApply() auto const sleSrc = view().peek(keylet::account(account_)); if (!sleSrc) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE // ownerCount is the number of entries in this ledger for this // account that require a reserve. diff --git a/src/xrpld/app/tx/detail/PermissionedDomainDelete.cpp b/src/xrpld/app/tx/detail/PermissionedDomainDelete.cpp index 9fe48ba515..097a2d7bad 100644 --- a/src/xrpld/app/tx/detail/PermissionedDomainDelete.cpp +++ b/src/xrpld/app/tx/detail/PermissionedDomainDelete.cpp @@ -66,9 +66,11 @@ PermissionedDomainDelete::doApply() if (!view().dirRemove(keylet::ownerDir(account_), page, slePd->key(), true)) { - JLOG(j_.fatal()) // LCOV_EXCL_LINE - << "Unable to delete permissioned domain directory entry."; // LCOV_EXCL_LINE - return tefBAD_LEDGER; // LCOV_EXCL_LINE + // LCOV_EXCL_START + JLOG(j_.fatal()) + << "Unable to delete permissioned domain directory entry."; + return tefBAD_LEDGER; + // LCOV_EXCL_STOP } auto const ownerSle = view().peek(keylet::account(account_)); diff --git a/src/xrpld/app/tx/detail/SetAccount.cpp b/src/xrpld/app/tx/detail/SetAccount.cpp index c2129ba1e1..7c60ec646a 100644 --- a/src/xrpld/app/tx/detail/SetAccount.cpp +++ b/src/xrpld/app/tx/detail/SetAccount.cpp @@ -311,7 +311,7 @@ SetAccount::doApply() { auto const sle = view().peek(keylet::account(account_)); if (!sle) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE std::uint32_t const uFlagsIn = sle->getFieldU32(sfFlags); std::uint32_t uFlagsOut = uFlagsIn; diff --git a/src/xrpld/app/tx/detail/SetRegularKey.cpp b/src/xrpld/app/tx/detail/SetRegularKey.cpp index 4e063e7d1f..93d5899861 100644 --- a/src/xrpld/app/tx/detail/SetRegularKey.cpp +++ b/src/xrpld/app/tx/detail/SetRegularKey.cpp @@ -66,7 +66,7 @@ SetRegularKey::doApply() { auto const sle = view().peek(keylet::account(account_)); if (!sle) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE if (!minimumFee(ctx_.app, ctx_.baseFee, view().fees(), view().flags())) sle->setFlag(lsfPasswordSpent); diff --git a/src/xrpld/app/tx/detail/SetSignerList.cpp b/src/xrpld/app/tx/detail/SetSignerList.cpp index ec2f902009..010b6baa96 100644 --- a/src/xrpld/app/tx/detail/SetSignerList.cpp +++ b/src/xrpld/app/tx/detail/SetSignerList.cpp @@ -226,8 +226,10 @@ removeSignersFromLedger( if (!view.dirRemove(ownerDirKeylet, hint, signerListKeylet.key, false)) { + // LCOV_EXCL_START JLOG(j.fatal()) << "Unable to delete SignerList from owner."; return tefBAD_LEDGER; + // LCOV_EXCL_STOP } adjustOwnerCount( @@ -349,7 +351,7 @@ SetSignerList::replaceSignerList() auto const sle = view().peek(accountKeylet); if (!sle) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE // Compute new reserve. Verify the account has funds to meet the reserve. std::uint32_t const oldOwnerCount{(*sle)[sfOwnerCount]}; @@ -387,7 +389,7 @@ SetSignerList::replaceSignerList() << ": " << (page ? "success" : "failure"); if (!page) - return tecDIR_FULL; + return tecDIR_FULL; // LCOV_EXCL_LINE signerList->setFieldU64(sfOwnerNode, *page); @@ -405,7 +407,7 @@ SetSignerList::destroySignerList() // is enabled or there is a regular key. SLE::pointer ledgerEntry = view().peek(accountKeylet); if (!ledgerEntry) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE if ((ledgerEntry->isFlag(lsfDisableMaster)) && (!ledgerEntry->isFieldPresent(sfRegularKey))) diff --git a/src/xrpld/app/tx/detail/SetTrust.cpp b/src/xrpld/app/tx/detail/SetTrust.cpp index 21d4534f93..d881425960 100644 --- a/src/xrpld/app/tx/detail/SetTrust.cpp +++ b/src/xrpld/app/tx/detail/SetTrust.cpp @@ -377,7 +377,7 @@ SetTrust::doApply() auto const sle = view().peek(keylet::account(account_)); if (!sle) - return tefINTERNAL; + return tefINTERNAL; // LCOV_EXCL_LINE std::uint32_t const uOwnerCount = sle->getFieldU32(sfOwnerCount); diff --git a/src/xrpld/app/tx/detail/Transactor.cpp b/src/xrpld/app/tx/detail/Transactor.cpp index 920b1a58bc..2f62a142c0 100644 --- a/src/xrpld/app/tx/detail/Transactor.cpp +++ b/src/xrpld/app/tx/detail/Transactor.cpp @@ -571,15 +571,19 @@ Transactor::ticketDelete( SLE::pointer const sleTicket = view.peek(keylet::ticket(ticketIndex)); if (!sleTicket) { + // LCOV_EXCL_START JLOG(j.fatal()) << "Ticket disappeared from ledger."; return tefBAD_LEDGER; + // LCOV_EXCL_STOP } std::uint64_t const page{(*sleTicket)[sfOwnerNode]}; if (!view.dirRemove(keylet::ownerDir(account), page, ticketIndex, true)) { + // LCOV_EXCL_START JLOG(j.fatal()) << "Unable to delete Ticket from owner."; return tefBAD_LEDGER; + // LCOV_EXCL_STOP } // Update the account root's TicketCount. If the ticket count drops to @@ -587,8 +591,10 @@ Transactor::ticketDelete( auto sleAccount = view.peek(keylet::account(account)); if (!sleAccount) { + // LCOV_EXCL_START JLOG(j.fatal()) << "Could not find Ticket owner account root."; return tefBAD_LEDGER; + // LCOV_EXCL_STOP } if (auto ticketCount = (*sleAccount)[~sfTicketCount]) @@ -600,8 +606,10 @@ Transactor::ticketDelete( } else { + // LCOV_EXCL_START JLOG(j.fatal()) << "TicketCount field missing from account root."; return tefBAD_LEDGER; + // LCOV_EXCL_STOP } // Update the Ticket owner's reserve. diff --git a/src/xrpld/app/tx/detail/XChainBridge.cpp b/src/xrpld/app/tx/detail/XChainBridge.cpp index 5f5c081e2f..d7731d8d98 100644 --- a/src/xrpld/app/tx/detail/XChainBridge.cpp +++ b/src/xrpld/app/tx/detail/XChainBridge.cpp @@ -444,7 +444,7 @@ transferHelper( auto const sleSrc = psb.peek(keylet::account(src)); XRPL_ASSERT(sleSrc, "ripple::transferHelper : non-null source account"); if (!sleSrc) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE { auto const ownerCount = sleSrc->getFieldU32(sfOwnerCount); @@ -714,7 +714,7 @@ finalizeClaimHelper( } if (distributed > rewardPool) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE return tesSUCCESS; }(); @@ -1152,7 +1152,7 @@ applyCreateAccountAttestations( // subsequent claim ids auto const sleBridge = psb.peek(bridgeK); if (!sleBridge) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE (*sleBridge)[sfXChainAccountClaimCount] = attBegin->createCount; psb.update(sleBridge); } @@ -1172,12 +1172,12 @@ applyCreateAccountAttestations( claimIDKeylet, describeOwnerDir(doorAccount)); if (!page) - return tecDIR_FULL; + return tecDIR_FULL; // LCOV_EXCL_LINE (*createdSleClaimID)[sfOwnerNode] = *page; auto const sleDoor = psb.peek(doorK); if (!sleDoor) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE // Reserve was already checked adjustOwnerCount(psb, sleDoor, 1, j); @@ -1242,8 +1242,9 @@ TER attestationPreclaim(PreclaimContext const& ctx) { auto const att = toClaim(ctx.tx); + // checked in preflight if (!att) - return tecINTERNAL; // checked in preflight + return tecINTERNAL; // LCOV_EXCL_LINE STXChainBridge const bridgeSpec = ctx.tx[sfXChainBridge]; auto const sleBridge = readBridge(ctx.view, bridgeSpec); @@ -1274,7 +1275,7 @@ attestationDoApply(ApplyContext& ctx) auto const att = toClaim(ctx.tx); if (!att) // Should already be checked in preflight - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE STXChainBridge const bridgeSpec = ctx.tx[sfXChainBridge]; @@ -1505,7 +1506,7 @@ XChainCreateBridge::doApply() auto const sleAcct = ctx_.view().peek(keylet::account(account)); if (!sleAcct) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE STXChainBridge::ChainType const chainType = STXChainBridge::srcChain(account == bridgeSpec.lockingChainDoor()); @@ -1527,7 +1528,7 @@ XChainCreateBridge::doApply() auto const page = ctx_.view().dirInsert( keylet::ownerDir(account), bridgeKeylet, describeOwnerDir(account)); if (!page) - return tecDIR_FULL; + return tecDIR_FULL; // LCOV_EXCL_LINE (*sleBridge)[sfOwnerNode] = *page; } @@ -1620,7 +1621,7 @@ BridgeModify::doApply() auto const sleAcct = ctx_.view().peek(keylet::account(account)); if (!sleAcct) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE STXChainBridge::ChainType const chainType = STXChainBridge::srcChain(account == bridgeSpec.lockingChainDoor()); @@ -1628,7 +1629,7 @@ BridgeModify::doApply() auto const sleBridge = ctx_.view().peek(keylet::bridge(bridgeSpec, chainType)); if (!sleBridge) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE if (reward) (*sleBridge)[sfSignatureReward] = *reward; @@ -1691,7 +1692,7 @@ XChainClaim::preclaim(PreclaimContext const& ctx) else if (thisDoor == bridgeSpec.issuingChainDoor()) isLockingChain = false; else - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE } { @@ -1715,7 +1716,7 @@ XChainClaim::preclaim(PreclaimContext const& ctx) // Should have been caught when creating the bridge // Detect here so `otherChainAmount` doesn't switch from IOU -> XRP // and the numeric issues that need to be addressed with that. - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE } auto const otherChainAmount = [&]() -> STAmount { @@ -1921,7 +1922,7 @@ XChainCommit::preclaim(PreclaimContext const& ctx) else if (thisDoor == bridgeSpec.issuingChainDoor()) isLockingChain = false; else - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE } if (isLockingChain) @@ -1948,11 +1949,11 @@ XChainCommit::doApply() auto const bridgeSpec = ctx_.tx[sfXChainBridge]; if (!psb.read(keylet::account(account))) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE auto const sleBridge = readBridge(psb, bridgeSpec); if (!sleBridge) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE auto const dst = (*sleBridge)[sfAccount]; @@ -2040,21 +2041,27 @@ XChainCreateClaimID::doApply() auto const sleAcct = ctx_.view().peek(keylet::account(account)); if (!sleAcct) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE auto const sleBridge = peekBridge(ctx_.view(), bridgeSpec); if (!sleBridge) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE std::uint32_t const claimID = (*sleBridge)[sfXChainClaimID] + 1; if (claimID == 0) - return tecINTERNAL; // overflow + { + // overflow + return tecINTERNAL; // LCOV_EXCL_LINE + } (*sleBridge)[sfXChainClaimID] = claimID; Keylet const claimIDKeylet = keylet::xChainClaimID(bridgeSpec, claimID); if (ctx_.view().exists(claimIDKeylet)) - return tecINTERNAL; // already checked out!?! + { + // already checked out!?! + return tecINTERNAL; // LCOV_EXCL_LINE + } auto const sleClaimID = std::make_shared(claimIDKeylet); @@ -2073,7 +2080,7 @@ XChainCreateClaimID::doApply() claimIDKeylet, describeOwnerDir(account)); if (!page) - return tecDIR_FULL; + return tecDIR_FULL; // LCOV_EXCL_LINE (*sleClaimID)[sfOwnerNode] = *page; } @@ -2191,7 +2198,7 @@ XChainCreateAccountCommit::preclaim(PreclaimContext const& ctx) else if (thisDoor == bridgeSpec.issuingChainDoor()) srcChain = STXChainBridge::ChainType::issuing; else - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE } STXChainBridge::ChainType const dstChain = STXChainBridge::otherChain(srcChain); @@ -2217,11 +2224,11 @@ XChainCreateAccountCommit::doApply() auto const sle = psb.peek(keylet::account(account)); if (!sle) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE auto const sleBridge = peekBridge(psb, bridge); if (!sleBridge) - return tecINTERNAL; + return tecINTERNAL; // LCOV_EXCL_LINE auto const dst = (*sleBridge)[sfAccount]; diff --git a/src/xrpld/rpc/detail/ServerHandler.cpp b/src/xrpld/rpc/detail/ServerHandler.cpp index f5f5e53238..66f4efffc6 100644 --- a/src/xrpld/rpc/detail/ServerHandler.cpp +++ b/src/xrpld/rpc/detail/ServerHandler.cpp @@ -507,10 +507,12 @@ ServerHandler::processSession( } catch (std::exception const& ex) { + // LCOV_EXCL_START jr[jss::result] = RPC::make_error(rpcINTERNAL); JLOG(m_journal.error()) << "Exception while processing WS: " << ex.what() << "\n" << "Input JSON: " << Json::Compact{Json::Value{jv}}; + // LCOV_EXCL_STOP } is->getConsumer().charge(loadType); @@ -904,10 +906,12 @@ ServerHandler::processRequest( } catch (std::exception const& ex) { + // LCOV_EXCL_START result = RPC::make_error(rpcINTERNAL); JLOG(m_journal.error()) << "Internal error : " << ex.what() << " when processing request: " << Json::Compact{Json::Value{params}}; + // LCOV_EXCL_STOP } auto end = std::chrono::system_clock::now(); diff --git a/src/xrpld/rpc/handlers/PayChanClaim.cpp b/src/xrpld/rpc/handlers/PayChanClaim.cpp index 6945d2a051..6238af3ae3 100644 --- a/src/xrpld/rpc/handlers/PayChanClaim.cpp +++ b/src/xrpld/rpc/handlers/PayChanClaim.cpp @@ -94,9 +94,11 @@ doChannelAuthorize(RPC::JsonContext& context) } catch (std::exception const& ex) { + // LCOV_EXCL_START result = RPC::make_error( rpcINTERNAL, "Exception occurred during signing: " + std::string(ex.what())); + // LCOV_EXCL_STOP } return result; } diff --git a/src/xrpld/rpc/handlers/Random.cpp b/src/xrpld/rpc/handlers/Random.cpp index cea83a616c..c2a984a2c4 100644 --- a/src/xrpld/rpc/handlers/Random.cpp +++ b/src/xrpld/rpc/handlers/Random.cpp @@ -51,7 +51,7 @@ doRandom(RPC::JsonContext& context) } catch (std::exception const&) { - return rpcError(rpcINTERNAL); + return rpcError(rpcINTERNAL); // LCOV_EXCL_LINE } } From 459d0da010626e5669d53966ff14cd1ae9e77957 Mon Sep 17 00:00:00 2001 From: Ayaz Salikhov Date: Tue, 14 Oct 2025 16:18:34 +0100 Subject: [PATCH 229/244] chore: Support CMake 4 without workarounds (#5866) --- .github/scripts/strategy-matrix/linux.json | 44 +++++++++++----------- BUILD.md | 22 +---------- conan/profiles/default | 3 -- 3 files changed, 23 insertions(+), 46 deletions(-) diff --git a/.github/scripts/strategy-matrix/linux.json b/.github/scripts/strategy-matrix/linux.json index bae5c57087..b8da322118 100644 --- a/.github/scripts/strategy-matrix/linux.json +++ b/.github/scripts/strategy-matrix/linux.json @@ -15,154 +15,154 @@ "distro_version": "bookworm", "compiler_name": "gcc", "compiler_version": "12", - "image_sha": "6f723eb" + "image_sha": "6948666" }, { "distro_name": "debian", "distro_version": "bookworm", "compiler_name": "gcc", "compiler_version": "13", - "image_sha": "6f723eb" + "image_sha": "6948666" }, { "distro_name": "debian", "distro_version": "bookworm", "compiler_name": "gcc", "compiler_version": "14", - "image_sha": "6f723eb" + "image_sha": "6948666" }, { "distro_name": "debian", "distro_version": "bookworm", "compiler_name": "gcc", "compiler_version": "15", - "image_sha": "6f723eb" + "image_sha": "6948666" }, { "distro_name": "debian", "distro_version": "bookworm", "compiler_name": "clang", "compiler_version": "16", - "image_sha": "6f723eb" + "image_sha": "6948666" }, { "distro_name": "debian", "distro_version": "bookworm", "compiler_name": "clang", "compiler_version": "17", - "image_sha": "6f723eb" + "image_sha": "6948666" }, { "distro_name": "debian", "distro_version": "bookworm", "compiler_name": "clang", "compiler_version": "18", - "image_sha": "6f723eb" + "image_sha": "6948666" }, { "distro_name": "debian", "distro_version": "bookworm", "compiler_name": "clang", "compiler_version": "19", - "image_sha": "6f723eb" + "image_sha": "6948666" }, { "distro_name": "debian", "distro_version": "bookworm", "compiler_name": "clang", "compiler_version": "20", - "image_sha": "6f723eb" + "image_sha": "6948666" }, { "distro_name": "rhel", "distro_version": "9", "compiler_name": "gcc", "compiler_version": "12", - "image_sha": "d133ce3" + "image_sha": "6948666" }, { "distro_name": "rhel", "distro_version": "9", "compiler_name": "gcc", "compiler_version": "13", - "image_sha": "d133ce3" + "image_sha": "6948666" }, { "distro_name": "rhel", "distro_version": "9", "compiler_name": "gcc", "compiler_version": "14", - "image_sha": "d133ce3" + "image_sha": "6948666" }, { "distro_name": "rhel", "distro_version": "9", "compiler_name": "clang", "compiler_version": "any", - "image_sha": "d133ce3" + "image_sha": "6948666" }, { "distro_name": "rhel", "distro_version": "10", "compiler_name": "gcc", "compiler_version": "14", - "image_sha": "d133ce3" + "image_sha": "6948666" }, { "distro_name": "rhel", "distro_version": "10", "compiler_name": "clang", "compiler_version": "any", - "image_sha": "d133ce3" + "image_sha": "6948666" }, { "distro_name": "ubuntu", "distro_version": "jammy", "compiler_name": "gcc", "compiler_version": "12", - "image_sha": "6f723eb" + "image_sha": "6948666" }, { "distro_name": "ubuntu", "distro_version": "noble", "compiler_name": "gcc", "compiler_version": "13", - "image_sha": "6f723eb" + "image_sha": "6948666" }, { "distro_name": "ubuntu", "distro_version": "noble", "compiler_name": "gcc", "compiler_version": "14", - "image_sha": "6f723eb" + "image_sha": "6948666" }, { "distro_name": "ubuntu", "distro_version": "noble", "compiler_name": "clang", "compiler_version": "16", - "image_sha": "6f723eb" + "image_sha": "6948666" }, { "distro_name": "ubuntu", "distro_version": "noble", "compiler_name": "clang", "compiler_version": "17", - "image_sha": "6f723eb" + "image_sha": "6948666" }, { "distro_name": "ubuntu", "distro_version": "noble", "compiler_name": "clang", "compiler_version": "18", - "image_sha": "6f723eb" + "image_sha": "6948666" }, { "distro_name": "ubuntu", "distro_version": "noble", "compiler_name": "clang", "compiler_version": "19", - "image_sha": "6f723eb" + "image_sha": "6948666" } ], "build_type": ["Debug", "Release"], diff --git a/BUILD.md b/BUILD.md index fd7a0b855d..ef6b5259df 100644 --- a/BUILD.md +++ b/BUILD.md @@ -39,17 +39,12 @@ found here](./docs/build/environment.md). - [Python 3.11](https://www.python.org/downloads/), or higher - [Conan 2.17](https://conan.io/downloads.html)[^1], or higher -- [CMake 3.22](https://cmake.org/download/)[^2], or higher +- [CMake 3.22](https://cmake.org/download/), or higher [^1]: It is possible to build with Conan 1.60+, but the instructions are significantly different, which is why we are not recommending it. -[^2]: - CMake 4 is not yet supported by all dependencies required by this project. - If you are affected by this issue, follow [conan workaround for cmake - 4](#workaround-for-cmake-4) - `rippled` is written in the C++20 dialect and includes the `` header. The [minimum compiler versions][2] required are: @@ -282,21 +277,6 @@ sed -i.bak -e 's|^arch=.*$|arch=x86_64|' $(conan config home)/profiles/default sed -i.bak -e 's|^compiler\.runtime=.*$|compiler.runtime=static|' $(conan config home)/profiles/default ``` -#### Workaround for CMake 4 - -If your system CMake is version 4 rather than 3, you may have to configure Conan -profile to use CMake version 3 for dependencies, by adding the following two -lines to your profile: - -```text -[tool_requires] -!cmake/*: cmake/[>=3 <4] -``` - -This will force Conan to download and use a locally cached CMake 3 version, and -is needed because some of the dependencies used by this project do not support -CMake 4. - #### Clang workaround for grpc If your compiler is clang, version 19 or later, or apple-clang, version 17 or diff --git a/conan/profiles/default b/conan/profiles/default index 3a7bcda1c6..03f19ca118 100644 --- a/conan/profiles/default +++ b/conan/profiles/default @@ -29,6 +29,3 @@ tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw'] {% if compiler == "gcc" and compiler_version < 13 %} tools.build:cxxflags=['-Wno-restrict'] {% endif %} - -[tool_requires] -!cmake/*: cmake/[>=3 <4] From 3509de9c5f6a62ebe02a916265a4238dd4f3bcd1 Mon Sep 17 00:00:00 2001 From: tequ Date: Wed, 15 Oct 2025 03:37:39 +0900 Subject: [PATCH 230/244] refactor: Add `paychan` namespace and update related tests (#5840) This change adds a paychan namespace to the TestHelpers and implementation files, improving organization and clarity. Additionally, it updates the AMM test to use the new `paychan::create` function for payment channel creation. Co-authored-by: Bart Thomee <11445373+bthomee@users.noreply.github.com> --- src/test/app/AMM_test.cpp | 2 +- src/test/app/PayChan_test.cpp | 2 ++ src/test/jtx/TestHelpers.h | 3 +++ src/test/jtx/impl/TestHelpers.cpp | 4 ++++ 4 files changed, 10 insertions(+), 1 deletion(-) diff --git a/src/test/app/AMM_test.cpp b/src/test/app/AMM_test.cpp index 1fe37bb7c1..3e0d65894f 100644 --- a/src/test/app/AMM_test.cpp +++ b/src/test/app/AMM_test.cpp @@ -3666,7 +3666,7 @@ private: auto const settleDelay = 100s; NetClock::time_point const cancelAfter = env.current()->info().parentCloseTime + 200s; - env(create( + env(paychan::create( carol, ammAlice.ammAccount(), XRP(1'000), diff --git a/src/test/app/PayChan_test.cpp b/src/test/app/PayChan_test.cpp index fe9b70cf7f..595a12aed9 100644 --- a/src/test/app/PayChan_test.cpp +++ b/src/test/app/PayChan_test.cpp @@ -31,6 +31,8 @@ namespace ripple { namespace test { +using namespace jtx::paychan; + struct PayChan_test : public beast::unit_test::suite { FeatureBitset const disallowIncoming{featureDisallowIncoming}; diff --git a/src/test/jtx/TestHelpers.h b/src/test/jtx/TestHelpers.h index 7d14f23c92..a7bdbb9b9f 100644 --- a/src/test/jtx/TestHelpers.h +++ b/src/test/jtx/TestHelpers.h @@ -502,6 +502,7 @@ expectLedgerEntryRoot( /* Payment Channel */ /******************************************************************************/ +namespace paychan { Json::Value create( @@ -561,6 +562,8 @@ channelBalance(ReadView const& view, uint256 const& chan); bool channelExists(ReadView const& view, uint256 const& chan); +} // namespace paychan + /* Crossing Limits */ /******************************************************************************/ diff --git a/src/test/jtx/impl/TestHelpers.cpp b/src/test/jtx/impl/TestHelpers.cpp index 20f24f0d84..71f44c691e 100644 --- a/src/test/jtx/impl/TestHelpers.cpp +++ b/src/test/jtx/impl/TestHelpers.cpp @@ -237,6 +237,8 @@ expectLedgerEntryRoot( /* Payment Channel */ /******************************************************************************/ +namespace paychan { + Json::Value create( AccountID const& account, @@ -328,6 +330,8 @@ channelExists(ReadView const& view, uint256 const& chan) return bool(slep); } +} // namespace paychan + /* Crossing Limits */ /******************************************************************************/ From 34619f2504c92f2967c855fcc0ca3d80682dcd3a Mon Sep 17 00:00:00 2001 From: zingero Date: Tue, 14 Oct 2025 22:10:19 +0300 Subject: [PATCH 231/244] docs: Fix typo in JSON writer documentation (#5881) Co-authored-by: Bart Thomee <11445373+bthomee@users.noreply.github.com> --- include/xrpl/json/json_writer.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/xrpl/json/json_writer.h b/include/xrpl/json/json_writer.h index 7e21e766e3..ba3a51013f 100644 --- a/include/xrpl/json/json_writer.h +++ b/include/xrpl/json/json_writer.h @@ -46,7 +46,7 @@ public: * without formatting (not human friendly). * * The JSON document is written in a single line. It is not intended for 'human' - * consumption, but may be useful to support feature such as RPC where bandwith + * consumption, but may be useful to support feature such as RPC where bandwidth * is limited. \sa Reader, Value */ From 97bc94a7f6c6be467728d1da363eabdcbd2803f9 Mon Sep 17 00:00:00 2001 From: Michael Legleux Date: Tue, 14 Oct 2025 15:02:38 -0700 Subject: [PATCH 232/244] feat: Install validator-keys (#5841) * feat: Install validator-keys * output validator-keys with everything else --- cmake/RippledInstall.cmake | 4 ++-- cmake/RippledValidatorKeys.cmake | 16 +++++++--------- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/cmake/RippledInstall.cmake b/cmake/RippledInstall.cmake index 95c25a212f..50f09d5a4b 100644 --- a/cmake/RippledInstall.cmake +++ b/cmake/RippledInstall.cmake @@ -38,7 +38,7 @@ install(CODE " set(CMAKE_MODULE_PATH \"${CMAKE_MODULE_PATH}\") include(create_symbolic_link) create_symbolic_link(xrpl \ - \${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}/ripple) + \$ENV{DESTDIR}\${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}/ripple) ") install (EXPORT RippleExports @@ -72,7 +72,7 @@ if (is_root_project AND TARGET rippled) set(CMAKE_MODULE_PATH \"${CMAKE_MODULE_PATH}\") include(create_symbolic_link) create_symbolic_link(rippled${suffix} \ - \${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_BINDIR}/xrpld${suffix}) + \$ENV{DESTDIR}\${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_BINDIR}/xrpld${suffix}) ") endif () diff --git a/cmake/RippledValidatorKeys.cmake b/cmake/RippledValidatorKeys.cmake index b6760ca496..fa520ce9c1 100644 --- a/cmake/RippledValidatorKeys.cmake +++ b/cmake/RippledValidatorKeys.cmake @@ -1,4 +1,4 @@ -option (validator_keys "Enables building of validator-keys-tool as a separate target (imported via FetchContent)" OFF) +option (validator_keys "Enables building of validator-keys tool as a separate target (imported via FetchContent)" OFF) if (validator_keys) git_branch (current_branch) @@ -6,17 +6,15 @@ if (validator_keys) if (NOT (current_branch STREQUAL "release")) set (current_branch "master") endif () - message (STATUS "tracking ValidatorKeys branch: ${current_branch}") + message (STATUS "Tracking ValidatorKeys branch: ${current_branch}") FetchContent_Declare ( - validator_keys_src + validator_keys GIT_REPOSITORY https://github.com/ripple/validator-keys-tool.git GIT_TAG "${current_branch}" ) - FetchContent_GetProperties (validator_keys_src) - if (NOT validator_keys_src_POPULATED) - message (STATUS "Pausing to download ValidatorKeys...") - FetchContent_Populate (validator_keys_src) - endif () - add_subdirectory (${validator_keys_src_SOURCE_DIR} ${CMAKE_BINARY_DIR}/validator-keys) + FetchContent_MakeAvailable(validator_keys) + set_target_properties(validator-keys PROPERTIES RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}") + install(TARGETS validator-keys RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) + endif () From 3d44758e5af811fc7dbdb47e81e887eaca87e184 Mon Sep 17 00:00:00 2001 From: Bart Date: Wed, 15 Oct 2025 14:23:44 -0400 Subject: [PATCH 233/244] fix: Update tools image shas (#5896) This change updates the Docker image hashes of the tools-rippled images to fix a missing dependency. Co-authored-by: Bart Thomee <11445373+bthomee@users.noreply.github.com> --- .github/workflows/pre-commit.yml | 2 +- .github/workflows/publish-docs.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index 9b85a3bd11..da0fb02c19 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -12,4 +12,4 @@ jobs: uses: XRPLF/actions/.github/workflows/pre-commit.yml@af1b0f0d764cda2e5435f5ac97b240d4bd4d95d3 with: runs_on: ubuntu-latest - container: '{ "image": "ghcr.io/xrplf/ci/tools-rippled-pre-commit:sha-d1496b8" }' + container: '{ "image": "ghcr.io/xrplf/ci/tools-rippled-pre-commit:sha-a8c7be1" }' diff --git a/.github/workflows/publish-docs.yml b/.github/workflows/publish-docs.yml index efd89a5b22..84771ee4f7 100644 --- a/.github/workflows/publish-docs.yml +++ b/.github/workflows/publish-docs.yml @@ -27,7 +27,7 @@ env: jobs: publish: runs-on: ubuntu-latest - container: ghcr.io/xrplf/ci/tools-rippled-documentation:sha-d1496b8 + container: ghcr.io/xrplf/ci/tools-rippled-documentation:sha-a8c7be1 permissions: contents: write steps: From 519d1dbc34d21691bc48adb97c67e0d883e36dfb Mon Sep 17 00:00:00 2001 From: tequ Date: Thu, 16 Oct 2025 05:50:06 +0900 Subject: [PATCH 234/244] refactor: Replace fee().accountReserve(0) with fee().reserve (#5843) This PR changes fee().accountReserve(0) to fee().reserve, as the current network reserve amount should be used instead of the account reserve. Co-authored-by: Bart Thomee <11445373+bthomee@users.noreply.github.com> --- src/test/app/AccountDelete_test.cpp | 2 +- src/test/app/Check_test.cpp | 4 ++-- src/test/app/Credentials_test.cpp | 2 +- src/test/app/DID_test.cpp | 2 +- src/test/app/EscrowToken_test.cpp | 4 ++-- src/test/app/MPToken_test.cpp | 2 +- src/test/app/NFToken_test.cpp | 13 ++++++------- src/test/app/PermissionedDomains_test.cpp | 2 +- src/test/app/SetTrust_test.cpp | 2 +- src/xrpld/app/misc/FeeVoteImpl.cpp | 6 +++--- src/xrpld/app/misc/NetworkOPs.cpp | 12 ++++-------- src/xrpld/app/misc/detail/TxQ.cpp | 2 +- src/xrpld/app/paths/PathRequest.cpp | 3 +-- src/xrpld/app/paths/Pathfinder.cpp | 2 +- src/xrpld/app/tx/detail/Payment.cpp | 4 ++-- src/xrpld/app/tx/detail/XChainBridge.cpp | 2 +- 16 files changed, 29 insertions(+), 35 deletions(-) diff --git a/src/test/app/AccountDelete_test.cpp b/src/test/app/AccountDelete_test.cpp index f7c4ddc509..a8f946c454 100644 --- a/src/test/app/AccountDelete_test.cpp +++ b/src/test/app/AccountDelete_test.cpp @@ -747,7 +747,7 @@ public: // Note that the fee structure for unit tests does not match the fees // on the production network (October 2019). Unit tests have a base // reserve of 200 XRP. - env.fund(env.current()->fees().accountReserve(0), noripple(alice)); + env.fund(env.current()->fees().reserve, noripple(alice)); env.close(); // Burn a chunk of alice's funds so she only has 1 XRP remaining in diff --git a/src/test/app/Check_test.cpp b/src/test/app/Check_test.cpp index e724b83535..74c8e9df6d 100644 --- a/src/test/app/Check_test.cpp +++ b/src/test/app/Check_test.cpp @@ -619,7 +619,7 @@ class Check_test : public beast::unit_test::suite } { // Write a check that chews into alice's reserve. - STAmount const reserve{env.current()->fees().accountReserve(0)}; + STAmount const reserve{env.current()->fees().reserve}; STAmount const checkAmount{ startBalance - reserve - drops(baseFeeDrops)}; uint256 const chkId{getCheckIndex(alice, env.seq(alice))}; @@ -657,7 +657,7 @@ class Check_test : public beast::unit_test::suite } { // Write a check that goes one drop past what alice can pay. - STAmount const reserve{env.current()->fees().accountReserve(0)}; + STAmount const reserve{env.current()->fees().reserve}; STAmount const checkAmount{ startBalance - reserve - drops(baseFeeDrops - 1)}; uint256 const chkId{getCheckIndex(alice, env.seq(alice))}; diff --git a/src/test/app/Credentials_test.cpp b/src/test/app/Credentials_test.cpp index 102d516b89..8f953bf35c 100644 --- a/src/test/app/Credentials_test.cpp +++ b/src/test/app/Credentials_test.cpp @@ -578,7 +578,7 @@ struct Credentials_test : public beast::unit_test::suite using namespace jtx; Env env{*this, features}; - auto const reserve = drops(env.current()->fees().accountReserve(0)); + auto const reserve = drops(env.current()->fees().reserve); env.fund(reserve, subject, issuer); env.close(); diff --git a/src/test/app/DID_test.cpp b/src/test/app/DID_test.cpp index 21fb6b584e..870a42fd99 100644 --- a/src/test/app/DID_test.cpp +++ b/src/test/app/DID_test.cpp @@ -64,7 +64,7 @@ struct DID_test : public beast::unit_test::suite // Fund alice enough to exist, but not enough to meet // the reserve for creating a DID. - auto const acctReserve = env.current()->fees().accountReserve(0); + auto const acctReserve = env.current()->fees().reserve; auto const incReserve = env.current()->fees().increment; auto const baseFee = env.current()->fees().base; env.fund(acctReserve, alice); diff --git a/src/test/app/EscrowToken_test.cpp b/src/test/app/EscrowToken_test.cpp index 6caedb53f1..8535582af9 100644 --- a/src/test/app/EscrowToken_test.cpp +++ b/src/test/app/EscrowToken_test.cpp @@ -691,7 +691,7 @@ struct EscrowToken_test : public beast::unit_test::suite { Env env{*this, features}; auto const baseFee = env.current()->fees().base; - auto const acctReserve = env.current()->fees().accountReserve(0); + auto const acctReserve = env.current()->fees().reserve; auto const incReserve = env.current()->fees().increment; auto const alice = Account("alice"); auto const bob = Account("bob"); @@ -2691,7 +2691,7 @@ struct EscrowToken_test : public beast::unit_test::suite { Env env{*this, features}; auto const baseFee = env.current()->fees().base; - auto const acctReserve = env.current()->fees().accountReserve(0); + auto const acctReserve = env.current()->fees().reserve; auto const incReserve = env.current()->fees().increment; auto const alice = Account("alice"); diff --git a/src/test/app/MPToken_test.cpp b/src/test/app/MPToken_test.cpp index e9740e67de..f776695337 100644 --- a/src/test/app/MPToken_test.cpp +++ b/src/test/app/MPToken_test.cpp @@ -447,7 +447,7 @@ class MPToken_test : public beast::unit_test::suite // Test mptoken reserve requirement - first two mpts free (doApply) { Env env{*this, features}; - auto const acctReserve = env.current()->fees().accountReserve(0); + auto const acctReserve = env.current()->fees().reserve; auto const incReserve = env.current()->fees().increment; // 1 drop diff --git a/src/test/app/NFToken_test.cpp b/src/test/app/NFToken_test.cpp index 1c4314643c..9edbe4652c 100644 --- a/src/test/app/NFToken_test.cpp +++ b/src/test/app/NFToken_test.cpp @@ -208,7 +208,7 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite // Fund alice and minter enough to exist, but not enough to meet // the reserve for creating their first NFT. - auto const acctReserve = env.current()->fees().accountReserve(0); + auto const acctReserve = env.current()->fees().reserve; auto const incReserve = env.current()->fees().increment; auto const baseFee = env.current()->fees().base; @@ -6744,8 +6744,7 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite { // check reserve - auto const acctReserve = - env.current()->fees().accountReserve(0); + auto const acctReserve = env.current()->fees().reserve; auto const incReserve = env.current()->fees().increment; env.fund(acctReserve + incReserve, bob); @@ -7134,7 +7133,7 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite Account const bob{"bob"}; Env env{*this, features}; - auto const acctReserve = env.current()->fees().accountReserve(0); + auto const acctReserve = env.current()->fees().reserve; auto const incReserve = env.current()->fees().increment; auto const baseFee = env.current()->fees().base; @@ -7217,7 +7216,7 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite Account const bob{"bob"}; Env env{*this, features}; - auto const acctReserve = env.current()->fees().accountReserve(0); + auto const acctReserve = env.current()->fees().reserve; auto const incReserve = env.current()->fees().increment; env.fund(XRP(10000), alice); @@ -7314,7 +7313,7 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite Account const bob{"bob"}; Env env{*this, features}; - auto const acctReserve = env.current()->fees().accountReserve(0); + auto const acctReserve = env.current()->fees().reserve; auto const incReserve = env.current()->fees().increment; auto const baseFee = env.current()->fees().base; @@ -7365,7 +7364,7 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite Account const broker{"broker"}; Env env{*this, features}; - auto const acctReserve = env.current()->fees().accountReserve(0); + auto const acctReserve = env.current()->fees().reserve; auto const incReserve = env.current()->fees().increment; auto const baseFee = env.current()->fees().base; diff --git a/src/test/app/PermissionedDomains_test.cpp b/src/test/app/PermissionedDomains_test.cpp index 31e34ccf17..b177261e37 100644 --- a/src/test/app/PermissionedDomains_test.cpp +++ b/src/test/app/PermissionedDomains_test.cpp @@ -528,7 +528,7 @@ class PermissionedDomains_test : public beast::unit_test::suite // Fund alice enough to exist, but not enough to meet // the reserve. - auto const acctReserve = env.current()->fees().accountReserve(0); + auto const acctReserve = env.current()->fees().reserve; auto const incReserve = env.current()->fees().increment; env.fund(acctReserve, alice); env.close(); diff --git a/src/test/app/SetTrust_test.cpp b/src/test/app/SetTrust_test.cpp index 18457b5faf..00ca0aa3a7 100644 --- a/src/test/app/SetTrust_test.cpp +++ b/src/test/app/SetTrust_test.cpp @@ -192,7 +192,7 @@ public: auto const& assistor = createOnHighAcct ? acctC : acctD; auto const txFee = env.current()->fees().base; - auto const baseReserve = env.current()->fees().accountReserve(0); + auto const baseReserve = env.current()->fees().reserve; auto const threelineReserve = env.current()->fees().accountReserve(3); env.fund(XRP(10000), gwA, gwB, assistor); diff --git a/src/xrpld/app/misc/FeeVoteImpl.cpp b/src/xrpld/app/misc/FeeVoteImpl.cpp index 85b5791d67..c688cd3b19 100644 --- a/src/xrpld/app/misc/FeeVoteImpl.cpp +++ b/src/xrpld/app/misc/FeeVoteImpl.cpp @@ -142,7 +142,7 @@ FeeVoteImpl::doValidation( }; vote(lastFees.base, target_.reference_fee, "base fee", sfBaseFeeDrops); vote( - lastFees.accountReserve(0), + lastFees.reserve, target_.account_reserve, "base reserve", sfReserveBaseDrops); @@ -178,7 +178,7 @@ FeeVoteImpl::doValidation( vote(lastFees.base, target_.reference_fee, to64, "base fee", sfBaseFee); vote( - lastFees.accountReserve(0), + lastFees.reserve, target_.account_reserve, to32, "base reserve", @@ -207,7 +207,7 @@ FeeVoteImpl::doVoting( lastClosedLedger->fees().base, target_.reference_fee); detail::VotableValue baseReserveVote( - lastClosedLedger->fees().accountReserve(0), target_.account_reserve); + lastClosedLedger->fees().reserve, target_.account_reserve); detail::VotableValue incReserveVote( lastClosedLedger->fees().increment, target_.owner_reserve); diff --git a/src/xrpld/app/misc/NetworkOPs.cpp b/src/xrpld/app/misc/NetworkOPs.cpp index 3154426696..d649bb3a29 100644 --- a/src/xrpld/app/misc/NetworkOPs.cpp +++ b/src/xrpld/app/misc/NetworkOPs.cpp @@ -2936,8 +2936,7 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) if (!human) { l[jss::base_fee] = baseFee.jsonClipped(); - l[jss::reserve_base] = - lpClosed->fees().accountReserve(0).jsonClipped(); + l[jss::reserve_base] = lpClosed->fees().reserve.jsonClipped(); l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped(); l[jss::close_time] = Json::Value::UInt( lpClosed->info().closeTime.time_since_epoch().count()); @@ -2945,8 +2944,7 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) else { l[jss::base_fee_xrp] = baseFee.decimalXRP(); - l[jss::reserve_base_xrp] = - lpClosed->fees().accountReserve(0).decimalXRP(); + l[jss::reserve_base_xrp] = lpClosed->fees().reserve.decimalXRP(); l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP(); if (auto const closeOffset = app_.timeKeeper().closeOffset(); @@ -3136,8 +3134,7 @@ NetworkOPsImp::pubLedger(std::shared_ptr const& lpAccepted) if (!lpAccepted->rules().enabled(featureXRPFees)) jvObj[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED; jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped(); - jvObj[jss::reserve_base] = - lpAccepted->fees().accountReserve(0).jsonClipped(); + jvObj[jss::reserve_base] = lpAccepted->fees().reserve.jsonClipped(); jvObj[jss::reserve_inc] = lpAccepted->fees().increment.jsonClipped(); @@ -4215,8 +4212,7 @@ NetworkOPsImp::subLedger(InfoSub::ref isrListener, Json::Value& jvResult) if (!lpClosed->rules().enabled(featureXRPFees)) jvResult[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED; jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped(); - jvResult[jss::reserve_base] = - lpClosed->fees().accountReserve(0).jsonClipped(); + jvResult[jss::reserve_base] = lpClosed->fees().reserve.jsonClipped(); jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped(); jvResult[jss::network_id] = app_.config().NETWORK_ID; } diff --git a/src/xrpld/app/misc/detail/TxQ.cpp b/src/xrpld/app/misc/detail/TxQ.cpp index 6924dae6c8..7c0a6f07e2 100644 --- a/src/xrpld/app/misc/detail/TxQ.cpp +++ b/src/xrpld/app/misc/detail/TxQ.cpp @@ -1113,7 +1113,7 @@ TxQ::apply( comparable scale to the base fee, ignore the reserve. Only check the account balance. */ - auto const reserve = view.fees().accountReserve(0); + auto const reserve = view.fees().reserve; auto const base = view.fees().base; if (totalFee >= balance || (reserve > 10 * base && totalFee >= reserve)) diff --git a/src/xrpld/app/paths/PathRequest.cpp b/src/xrpld/app/paths/PathRequest.cpp index 8a88e774d0..a9db61d5b3 100644 --- a/src/xrpld/app/paths/PathRequest.cpp +++ b/src/xrpld/app/paths/PathRequest.cpp @@ -206,8 +206,7 @@ PathRequest::isValid(std::shared_ptr const& crCache) return false; } - if (!convert_all_ && - saDstAmount < STAmount(lrLedger->fees().accountReserve(0))) + if (!convert_all_ && saDstAmount < STAmount(lrLedger->fees().reserve)) { // Payment must meet reserve. jvStatus = rpcError(rpcDST_AMT_MALFORMED); diff --git a/src/xrpld/app/paths/Pathfinder.cpp b/src/xrpld/app/paths/Pathfinder.cpp index 4bc9304853..7f9c2f8437 100644 --- a/src/xrpld/app/paths/Pathfinder.cpp +++ b/src/xrpld/app/paths/Pathfinder.cpp @@ -278,7 +278,7 @@ Pathfinder::findPaths( return false; } - auto const reserve = STAmount(mLedger->fees().accountReserve(0)); + auto const reserve = STAmount(mLedger->fees().reserve); if (mDstAmount < reserve) { JLOG(j_.debug()) diff --git a/src/xrpld/app/tx/detail/Payment.cpp b/src/xrpld/app/tx/detail/Payment.cpp index 81a083e336..b9bdbd0a34 100644 --- a/src/xrpld/app/tx/detail/Payment.cpp +++ b/src/xrpld/app/tx/detail/Payment.cpp @@ -346,7 +346,7 @@ Payment::preclaim(PreclaimContext const& ctx) // transaction would succeed. return telNO_DST_PARTIAL; } - else if (dstAmount < STAmount(ctx.view.fees().accountReserve(0))) + else if (dstAmount < STAmount(ctx.view.fees().reserve)) { // accountReserve is the minimum amount that an account can have. // Reserve is not scaled by load. @@ -690,7 +690,7 @@ Payment::doApply() // to get the account un-wedged. // Get the base reserve. - XRPAmount const dstReserve{view().fees().accountReserve(0)}; + XRPAmount const dstReserve{view().fees().reserve}; if (dstAmount > dstReserve || sleDst->getFieldAmount(sfBalance) > dstReserve) diff --git a/src/xrpld/app/tx/detail/XChainBridge.cpp b/src/xrpld/app/tx/detail/XChainBridge.cpp index d7731d8d98..503a01c3c9 100644 --- a/src/xrpld/app/tx/detail/XChainBridge.cpp +++ b/src/xrpld/app/tx/detail/XChainBridge.cpp @@ -476,7 +476,7 @@ transferHelper( // Already checked, but OK to check again return tecNO_DST; } - if (amt < psb.fees().accountReserve(0)) + if (amt < psb.fees().reserve) { JLOG(j.trace()) << "Insufficient payment to create account."; return tecNO_DST_INSUF_XRP; From 108f90586c68a83680e2f2710767684e22f5d97d Mon Sep 17 00:00:00 2001 From: Jingchen Date: Wed, 15 Oct 2025 21:53:01 +0100 Subject: [PATCH 235/244] chore: Reduce build log verbosity on Windows (#5865) Windows is extremely chatty and generates tons of logs when building, making it practically impossible to use the build logs to debug issues. This change sets the verbosity to 'quiet' on Windows. Co-authored-by: Bart Thomee <11445373+bthomee@users.noreply.github.com> --- .github/actions/build-deps/action.yml | 7 +++++++ .github/workflows/upload-conan-deps.yml | 3 +++ conan/global.conf | 5 +---- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/.github/actions/build-deps/action.yml b/.github/actions/build-deps/action.yml index a908c656e8..7b2a3c385a 100644 --- a/.github/actions/build-deps/action.yml +++ b/.github/actions/build-deps/action.yml @@ -4,6 +4,10 @@ description: "Install Conan dependencies, optionally forcing a rebuild of all de # Note that actions do not support 'type' and all inputs are strings, see # https://docs.github.com/en/actions/reference/workflows-and-actions/metadata-syntax#inputs. inputs: + verbosity: + description: "The build verbosity." + required: false + default: "verbose" build_dir: description: "The directory where to build." required: true @@ -34,4 +38,7 @@ runs: --options:host='&:tests=True' \ --options:host='&:xrpld=True' \ --settings:all build_type='${{ env.BUILD_TYPE }}' \ + --conf:all tools.build:verbosity='${{ inputs.verbosity }}' \ + --conf:all tools.compilation:verbosity='${{ inputs.verbosity }}' \ + --conf:all tools.build:jobs=$(nproc) \ .. diff --git a/.github/workflows/upload-conan-deps.yml b/.github/workflows/upload-conan-deps.yml index 680602d978..aedd367f65 100644 --- a/.github/workflows/upload-conan-deps.yml +++ b/.github/workflows/upload-conan-deps.yml @@ -79,6 +79,9 @@ jobs: build_dir: .build build_type: ${{ matrix.build_type }} force_build: ${{ github.event_name == 'schedule' || github.event.inputs.force_source_build == 'true' }} + # The verbosity is set to "quiet" for Windows to avoid an excessive amount of logs, while it + # is set to "verbose" otherwise to provide more information during the build process. + verbosity: ${{ runner.os == 'Windows' && 'quiet' || 'verbose' }} - name: Log into Conan remote if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' }} diff --git a/conan/global.conf b/conan/global.conf index ae03818232..41ac76da89 100644 --- a/conan/global.conf +++ b/conan/global.conf @@ -1,9 +1,6 @@ # Global configuration for Conan. This is used to set the number of parallel -# downloads, uploads, and build jobs. The verbosity is set to verbose to -# provide more information during the build process. +# downloads, uploads, and build jobs. core:non_interactive=True core.download:parallel={{ os.cpu_count() }} core.upload:parallel={{ os.cpu_count() }} tools.build:jobs={{ (os.cpu_count() * 4/5) | int }} -tools.build:verbosity=verbose -tools.compilation:verbosity=verbose From a422855ea7415c1e2e8d135812021823d8a641c1 Mon Sep 17 00:00:00 2001 From: Mayukha Vadari Date: Wed, 15 Oct 2025 16:55:11 -0400 Subject: [PATCH 236/244] refactor: replace JSON LastLedgerSequence with last_ledger_seq (#5884) This change replaces instances of JSON LastLedgerSequence with last_ledger_seq, which makes the tests a bit simpler and easier to read. Co-authored-by: Bart Thomee <11445373+bthomee@users.noreply.github.com> --- src/test/app/Transaction_ordering_test.cpp | 16 +++------ src/test/app/TxQ_test.cpp | 42 +++++++++------------- src/test/rpc/LedgerRPC_test.cpp | 2 +- 3 files changed, 23 insertions(+), 37 deletions(-) diff --git a/src/test/app/Transaction_ordering_test.cpp b/src/test/app/Transaction_ordering_test.cpp index 472d4e2ab8..5ace79db59 100644 --- a/src/test/app/Transaction_ordering_test.cpp +++ b/src/test/app/Transaction_ordering_test.cpp @@ -37,10 +37,8 @@ struct Transaction_ordering_test : public beast::unit_test::suite auto const aliceSequence = env.seq(alice); auto const tx1 = env.jt(noop(alice), seq(aliceSequence)); - auto const tx2 = env.jt( - noop(alice), - seq(aliceSequence + 1), - json(R"({"LastLedgerSequence":7})")); + auto const tx2 = + env.jt(noop(alice), seq(aliceSequence + 1), last_ledger_seq(7)); env(tx1); env.close(); @@ -83,10 +81,8 @@ struct Transaction_ordering_test : public beast::unit_test::suite auto const aliceSequence = env.seq(alice); auto const tx1 = env.jt(noop(alice), seq(aliceSequence)); - auto const tx2 = env.jt( - noop(alice), - seq(aliceSequence + 1), - json(R"({"LastLedgerSequence":7})")); + auto const tx2 = + env.jt(noop(alice), seq(aliceSequence + 1), last_ledger_seq(7)); env(tx2, ter(terPRE_SEQ)); BEAST_EXPECT(env.seq(alice) == aliceSequence); @@ -131,9 +127,7 @@ struct Transaction_ordering_test : public beast::unit_test::suite for (auto i = 0; i < 5; ++i) { tx.emplace_back(env.jt( - noop(alice), - seq(aliceSequence + i), - json(R"({"LastLedgerSequence":7})"))); + noop(alice), seq(aliceSequence + i), last_ledger_seq(7))); } for (auto i = 1; i < 5; ++i) diff --git a/src/test/app/TxQ_test.cpp b/src/test/app/TxQ_test.cpp index d0965cc8ff..190acfeddf 100644 --- a/src/test/app/TxQ_test.cpp +++ b/src/test/app/TxQ_test.cpp @@ -747,11 +747,9 @@ public: BEAST_EXPECT(env.current()->info().seq == 6); // Fail to queue an item with a low LastLedgerSeq - env(noop(alice), - json(R"({"LastLedgerSequence":7})"), - ter(telCAN_NOT_QUEUE)); + env(noop(alice), last_ledger_seq(7), ter(telCAN_NOT_QUEUE)); // Queue an item with a sufficient LastLedgerSeq. - env(noop(alice), json(R"({"LastLedgerSequence":8})"), queued); + env(noop(alice), last_ledger_seq(8), queued); constexpr auto largeFeeMultiplier = 700; auto const largeFee = baseFee * largeFeeMultiplier; @@ -2705,21 +2703,15 @@ public: auto const aliceSeq = env.seq(alice); BEAST_EXPECT(env.current()->info().seq == 3); - env(noop(alice), - seq(aliceSeq), - json(R"({"LastLedgerSequence":5})"), - ter(terQUEUED)); - env(noop(alice), - seq(aliceSeq + 1), - json(R"({"LastLedgerSequence":5})"), - ter(terQUEUED)); + env(noop(alice), seq(aliceSeq), last_ledger_seq(5), ter(terQUEUED)); + env(noop(alice), seq(aliceSeq + 1), last_ledger_seq(5), ter(terQUEUED)); env(noop(alice), seq(aliceSeq + 2), - json(R"({"LastLedgerSequence":10})"), + last_ledger_seq(10), ter(terQUEUED)); env(noop(alice), seq(aliceSeq + 3), - json(R"({"LastLedgerSequence":11})"), + last_ledger_seq(11), ter(terQUEUED)); checkMetrics(*this, env, 4, std::nullopt, 2, 1); auto const bobSeq = env.seq(bob); @@ -2816,39 +2808,39 @@ public: ter(terQUEUED)); env(noop(alice), seq(aliceSeq + 11), - json(R"({"LastLedgerSequence":11})"), + last_ledger_seq(11), ter(terQUEUED)); env(noop(alice), seq(aliceSeq + 12), - json(R"({"LastLedgerSequence":11})"), + last_ledger_seq(11), ter(terQUEUED)); env(noop(alice), seq(aliceSeq + 13), - json(R"({"LastLedgerSequence":11})"), + last_ledger_seq(11), ter(terQUEUED)); env(noop(alice), seq(aliceSeq + 14), - json(R"({"LastLedgerSequence":11})"), + last_ledger_seq(11), ter(terQUEUED)); env(noop(alice), seq(aliceSeq + 15), - json(R"({"LastLedgerSequence":11})"), + last_ledger_seq(11), ter(terQUEUED)); env(noop(alice), seq(aliceSeq + 16), - json(R"({"LastLedgerSequence": 5})"), + last_ledger_seq(5), ter(terQUEUED)); env(noop(alice), seq(aliceSeq + 17), - json(R"({"LastLedgerSequence": 5})"), + last_ledger_seq(5), ter(terQUEUED)); env(noop(alice), seq(aliceSeq + 18), - json(R"({"LastLedgerSequence": 5})"), + last_ledger_seq(5), ter(terQUEUED)); env(noop(alice), seq(aliceSeq + 19), - json(R"({"LastLedgerSequence":11})"), + last_ledger_seq(11), ter(terQUEUED)); checkMetrics(*this, env, 10, std::nullopt, 2, 1); @@ -4575,7 +4567,7 @@ public: env(noop(alice), seq(seqAlice++), fee(--feeDrops), - json(R"({"LastLedgerSequence": 7})"), + last_ledger_seq(7), ter(terQUEUED)); env(noop(alice), seq(seqAlice++), fee(--feeDrops), ter(terQUEUED)); env(noop(alice), seq(seqAlice++), fee(--feeDrops), ter(terQUEUED)); @@ -4585,7 +4577,7 @@ public: // The drop penalty works a little differently with tickets. env(noop(bob), ticket::use(bobTicketSeq + 0), - json(R"({"LastLedgerSequence": 7})"), + last_ledger_seq(7), ter(terQUEUED)); env(noop(bob), ticket::use(bobTicketSeq + 1), diff --git a/src/test/rpc/LedgerRPC_test.cpp b/src/test/rpc/LedgerRPC_test.cpp index 9309fbdd6c..3c8da6dc13 100644 --- a/src/test/rpc/LedgerRPC_test.cpp +++ b/src/test/rpc/LedgerRPC_test.cpp @@ -527,7 +527,7 @@ class LedgerRPC_test : public beast::unit_test::suite // Alice auto aliceSeq = env.seq(alice); env(pay(alice, "george", XRP(1000)), - json(R"({"LastLedgerSequence":7})"), + last_ledger_seq(7), ter(terQUEUED)); env(offer(alice, XRP(50000), alice["USD"](5000)), seq(aliceSeq + 1), From 640ce4988fdb98bfcfda3e6fd973d4fc7cfb1e56 Mon Sep 17 00:00:00 2001 From: Mayukha Vadari Date: Thu, 16 Oct 2025 08:46:21 -0400 Subject: [PATCH 237/244] refactor: replace boost::lexical_cast with to_string (#5883) This change replaces boost::lexical_cast with to_string in some of the tests to make them more readable. Co-authored-by: Bart Thomee <11445373+bthomee@users.noreply.github.com> --- src/test/app/NFTokenBurn_test.cpp | 24 +++---- src/test/app/NFTokenDir_test.cpp | 6 +- src/test/app/Oracle_test.cpp | 6 +- src/test/jtx/impl/Env.cpp | 2 +- src/test/rpc/AccountCurrencies_test.cpp | 36 +++------- src/test/rpc/Feature_test.cpp | 18 ++--- src/test/rpc/LedgerData_test.cpp | 65 +++++------------- src/test/rpc/LedgerRPC_test.cpp | 89 ++++++------------------- src/test/rpc/NoRippleCheck_test.cpp | 66 ++++++------------ src/test/rpc/TransactionEntry_test.cpp | 15 ++--- 10 files changed, 91 insertions(+), 236 deletions(-) diff --git a/src/test/app/NFTokenBurn_test.cpp b/src/test/app/NFTokenBurn_test.cpp index 44c55f2b8c..7e582446ef 100644 --- a/src/test/app/NFTokenBurn_test.cpp +++ b/src/test/app/NFTokenBurn_test.cpp @@ -88,10 +88,8 @@ class NFTokenBurnBaseUtil_test : public beast::unit_test::suite jvParams[jss::ledger_index] = "current"; jvParams[jss::binary] = false; { - Json::Value jrr = env.rpc( - "json", - "ledger_data", - boost::lexical_cast(jvParams)); + Json::Value jrr = + env.rpc("json", "ledger_data", to_string(jvParams)); // Iterate the state and print all NFTokenPages. if (!jrr.isMember(jss::result) || @@ -413,10 +411,8 @@ class NFTokenBurnBaseUtil_test : public beast::unit_test::suite jvParams[jss::ledger_index] = "current"; jvParams[jss::binary] = false; { - Json::Value jrr = env.rpc( - "json", - "ledger_data", - boost::lexical_cast(jvParams)); + Json::Value jrr = + env.rpc("json", "ledger_data", to_string(jvParams)); Json::Value& state = jrr[jss::result][jss::state]; @@ -460,10 +456,8 @@ class NFTokenBurnBaseUtil_test : public beast::unit_test::suite jvParams[jss::ledger_index] = "current"; jvParams[jss::binary] = false; { - Json::Value jrr = env.rpc( - "json", - "ledger_data", - boost::lexical_cast(jvParams)); + Json::Value jrr = + env.rpc("json", "ledger_data", to_string(jvParams)); Json::Value& state = jrr[jss::result][jss::state]; @@ -1235,10 +1229,8 @@ class NFTokenBurnBaseUtil_test : public beast::unit_test::suite jvParams[jss::ledger_index] = "current"; jvParams[jss::binary] = false; { - Json::Value jrr = env.rpc( - "json", - "ledger_data", - boost::lexical_cast(jvParams)); + Json::Value jrr = + env.rpc("json", "ledger_data", to_string(jvParams)); Json::Value& state = jrr[jss::result][jss::state]; diff --git a/src/test/app/NFTokenDir_test.cpp b/src/test/app/NFTokenDir_test.cpp index a63653d8dc..19f4f7efba 100644 --- a/src/test/app/NFTokenDir_test.cpp +++ b/src/test/app/NFTokenDir_test.cpp @@ -47,10 +47,8 @@ class NFTokenDir_test : public beast::unit_test::suite jvParams[jss::ledger_index] = "current"; jvParams[jss::binary] = false; { - Json::Value jrr = env.rpc( - "json", - "ledger_data", - boost::lexical_cast(jvParams)); + Json::Value jrr = + env.rpc("json", "ledger_data", to_string(jvParams)); // Iterate the state and print all NFTokenPages. if (!jrr.isMember(jss::result) || diff --git a/src/test/app/Oracle_test.cpp b/src/test/app/Oracle_test.cpp index f0cde41394..c81d441abc 100644 --- a/src/test/app/Oracle_test.cpp +++ b/src/test/app/Oracle_test.cpp @@ -592,10 +592,8 @@ private: jvParams[field] = value; jvParams[jss::binary] = false; jvParams[jss::type] = jss::oracle; - Json::Value jrr = env.rpc( - "json", - "ledger_data", - boost::lexical_cast(jvParams)); + Json::Value jrr = + env.rpc("json", "ledger_data", to_string(jvParams)); BEAST_EXPECT(jrr[jss::result][jss::state].size() == 2); }; verifyLedgerData(jss::ledger_index, index); diff --git a/src/test/jtx/impl/Env.cpp b/src/test/jtx/impl/Env.cpp index ae99e1b5d6..f17f6cb39d 100644 --- a/src/test/jtx/impl/Env.cpp +++ b/src/test/jtx/impl/Env.cpp @@ -411,7 +411,7 @@ Env::sign_and_submit(JTx const& jt, Json::Value params) if (params.isNull()) { // Use the command line interface - auto const jv = boost::lexical_cast(jt.jv); + auto const jv = to_string(jt.jv); jr = rpc("submit", passphrase, jv); } else diff --git a/src/test/rpc/AccountCurrencies_test.cpp b/src/test/rpc/AccountCurrencies_test.cpp index 3ccb89c471..b21aacf865 100644 --- a/src/test/rpc/AccountCurrencies_test.cpp +++ b/src/test/rpc/AccountCurrencies_test.cpp @@ -43,9 +43,7 @@ class AccountCurrencies_test : public beast::unit_test::suite params[jss::account] = Account{"bob"}.human(); params[jss::ledger_hash] = 1; auto const result = env.rpc( - "json", - "account_currencies", - boost::lexical_cast(params))[jss::result]; + "json", "account_currencies", to_string(params))[jss::result]; BEAST_EXPECT(result[jss::error] == "invalidParams"); BEAST_EXPECT(result[jss::error_message] == "ledgerHashNotString"); } @@ -107,9 +105,7 @@ class AccountCurrencies_test : public beast::unit_test::suite params[jss::account] = "llIIOO"; // these are invalid in bitcoin alphabet auto const result = env.rpc( - "json", - "account_currencies", - boost::lexical_cast(params))[jss::result]; + "json", "account_currencies", to_string(params))[jss::result]; BEAST_EXPECT(result[jss::error] == "actMalformed"); BEAST_EXPECT(result[jss::error_message] == "Account malformed."); } @@ -119,9 +115,7 @@ class AccountCurrencies_test : public beast::unit_test::suite Json::Value params; params[jss::account] = "Bob"; auto const result = env.rpc( - "json", - "account_currencies", - boost::lexical_cast(params))[jss::result]; + "json", "account_currencies", to_string(params))[jss::result]; BEAST_EXPECT(result[jss::error] == "actMalformed"); BEAST_EXPECT(result[jss::error_message] == "Account malformed."); } @@ -130,9 +124,7 @@ class AccountCurrencies_test : public beast::unit_test::suite Json::Value params; params[jss::account] = Account{"bob"}.human(); auto const result = env.rpc( - "json", - "account_currencies", - boost::lexical_cast(params))[jss::result]; + "json", "account_currencies", to_string(params))[jss::result]; BEAST_EXPECT(result[jss::error] == "actNotFound"); BEAST_EXPECT(result[jss::error_message] == "Account not found."); } @@ -161,9 +153,7 @@ class AccountCurrencies_test : public beast::unit_test::suite Json::Value params; params[jss::account] = alice.human(); auto result = env.rpc( - "json", - "account_currencies", - boost::lexical_cast(params))[jss::result]; + "json", "account_currencies", to_string(params))[jss::result]; auto arrayCheck = [&result]( @@ -189,9 +179,7 @@ class AccountCurrencies_test : public beast::unit_test::suite // send_currencies should be populated now result = env.rpc( - "json", - "account_currencies", - boost::lexical_cast(params))[jss::result]; + "json", "account_currencies", to_string(params))[jss::result]; BEAST_EXPECT(arrayCheck(jss::receive_currencies, gwCurrencies)); BEAST_EXPECT(arrayCheck(jss::send_currencies, gwCurrencies)); @@ -203,9 +191,7 @@ class AccountCurrencies_test : public beast::unit_test::suite BEAST_EXPECT( l[jss::freeze].asBool() == (l[jss::currency] == "USD")); result = env.rpc( - "json", - "account_currencies", - boost::lexical_cast(params))[jss::result]; + "json", "account_currencies", to_string(params))[jss::result]; BEAST_EXPECT(arrayCheck(jss::receive_currencies, gwCurrencies)); BEAST_EXPECT(arrayCheck(jss::send_currencies, gwCurrencies)); // clear the freeze @@ -215,9 +201,7 @@ class AccountCurrencies_test : public beast::unit_test::suite env(pay(gw, alice, gw["USA"](50))); // USA should now be missing from receive_currencies result = env.rpc( - "json", - "account_currencies", - boost::lexical_cast(params))[jss::result]; + "json", "account_currencies", to_string(params))[jss::result]; decltype(gwCurrencies) gwCurrenciesNoUSA( gwCurrencies.begin() + 1, gwCurrencies.end()); BEAST_EXPECT(arrayCheck(jss::receive_currencies, gwCurrenciesNoUSA)); @@ -228,9 +212,7 @@ class AccountCurrencies_test : public beast::unit_test::suite env(trust(gw, alice["USA"](100))); env(pay(alice, gw, alice["USA"](200))); result = env.rpc( - "json", - "account_currencies", - boost::lexical_cast(params))[jss::result]; + "json", "account_currencies", to_string(params))[jss::result]; BEAST_EXPECT(arrayCheck(jss::receive_currencies, gwCurrencies)); BEAST_EXPECT(arrayCheck(jss::send_currencies, gwCurrenciesNoUSA)); } diff --git a/src/test/rpc/Feature_test.cpp b/src/test/rpc/Feature_test.cpp index 06697f80c1..84fc284b13 100644 --- a/src/test/rpc/Feature_test.cpp +++ b/src/test/rpc/Feature_test.cpp @@ -309,10 +309,8 @@ class Feature_test : public beast::unit_test::suite params[jss::feature] = "1234567890ABCDEF1234567890ABCDEF1234567890ABCDEF1234567890ABCD" "EF"; - auto const result = env.rpc( - "json", - "feature", - boost::lexical_cast(params))[jss::result]; + auto const result = + env.rpc("json", "feature", to_string(params))[jss::result]; BEAST_EXPECTS( result[jss::error] == "badFeature", result.toStyledString()); BEAST_EXPECT( @@ -326,10 +324,8 @@ class Feature_test : public beast::unit_test::suite "A7"; // invalid param params[jss::vetoed] = true; - auto const result = env.rpc( - "json", - "feature", - boost::lexical_cast(params))[jss::result]; + auto const result = + env.rpc("json", "feature", to_string(params))[jss::result]; BEAST_EXPECTS( result[jss::error] == "noPermission", result[jss::error].asString()); @@ -344,10 +340,8 @@ class Feature_test : public beast::unit_test::suite "37"; Json::Value params; params[jss::feature] = feature; - auto const result = env.rpc( - "json", - "feature", - boost::lexical_cast(params))[jss::result]; + auto const result = + env.rpc("json", "feature", to_string(params))[jss::result]; BEAST_EXPECT(result.isMember(feature)); auto const amendmentResult = result[feature]; BEAST_EXPECT(amendmentResult[jss::enabled].asBool() == false); diff --git a/src/test/rpc/LedgerData_test.cpp b/src/test/rpc/LedgerData_test.cpp index d57b33013a..120e49f129 100644 --- a/src/test/rpc/LedgerData_test.cpp +++ b/src/test/rpc/LedgerData_test.cpp @@ -63,9 +63,7 @@ public: jvParams[jss::binary] = false; { auto const jrr = env.rpc( - "json", - "ledger_data", - boost::lexical_cast(jvParams))[jss::result]; + "json", "ledger_data", to_string(jvParams))[jss::result]; BEAST_EXPECT( jrr[jss::ledger_current_index].isIntegral() && jrr[jss::ledger_current_index].asInt() > 0); @@ -78,9 +76,7 @@ public: { jvParams[jss::limit] = max_limit + delta; auto const jrr = env.rpc( - "json", - "ledger_data", - boost::lexical_cast(jvParams))[jss::result]; + "json", "ledger_data", to_string(jvParams))[jss::result]; BEAST_EXPECT(checkArraySize( jrr[jss::state], (delta > 0 && !asAdmin) ? max_limit : max_limit + delta)); @@ -109,10 +105,8 @@ public: Json::Value jvParams; jvParams[jss::ledger_index] = "current"; jvParams[jss::binary] = true; - auto const jrr = env.rpc( - "json", - "ledger_data", - boost::lexical_cast(jvParams))[jss::result]; + auto const jrr = + env.rpc("json", "ledger_data", to_string(jvParams))[jss::result]; BEAST_EXPECT( jrr[jss::ledger_current_index].isIntegral() && jrr[jss::ledger_current_index].asInt() > 0); @@ -137,9 +131,7 @@ public: Json::Value jvParams; jvParams[jss::limit] = "0"; // NOT an integer auto const jrr = env.rpc( - "json", - "ledger_data", - boost::lexical_cast(jvParams))[jss::result]; + "json", "ledger_data", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr[jss::error] == "invalidParams"); BEAST_EXPECT(jrr[jss::status] == "error"); BEAST_EXPECT( @@ -152,9 +144,7 @@ public: Json::Value jvParams; jvParams[jss::marker] = "NOT_A_MARKER"; auto const jrr = env.rpc( - "json", - "ledger_data", - boost::lexical_cast(jvParams))[jss::result]; + "json", "ledger_data", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr[jss::error] == "invalidParams"); BEAST_EXPECT(jrr[jss::status] == "error"); BEAST_EXPECT( @@ -167,9 +157,7 @@ public: Json::Value jvParams; jvParams[jss::marker] = 1; auto const jrr = env.rpc( - "json", - "ledger_data", - boost::lexical_cast(jvParams))[jss::result]; + "json", "ledger_data", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr[jss::error] == "invalidParams"); BEAST_EXPECT(jrr[jss::status] == "error"); BEAST_EXPECT( @@ -182,9 +170,7 @@ public: Json::Value jvParams; jvParams[jss::ledger_index] = 10u; auto const jrr = env.rpc( - "json", - "ledger_data", - boost::lexical_cast(jvParams))[jss::result]; + "json", "ledger_data", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr[jss::error] == "lgrNotFound"); BEAST_EXPECT(jrr[jss::status] == "error"); BEAST_EXPECT(jrr[jss::error_message] == "ledgerNotFound"); @@ -213,27 +199,20 @@ public: Json::Value jvParams; jvParams[jss::ledger_index] = "current"; jvParams[jss::binary] = false; - auto jrr = env.rpc( - "json", - "ledger_data", - boost::lexical_cast(jvParams))[jss::result]; + auto jrr = + env.rpc("json", "ledger_data", to_string(jvParams))[jss::result]; auto const total_count = jrr[jss::state].size(); // now make request with a limit and loop until we get all jvParams[jss::limit] = 5; - jrr = env.rpc( - "json", - "ledger_data", - boost::lexical_cast(jvParams))[jss::result]; + jrr = env.rpc("json", "ledger_data", to_string(jvParams))[jss::result]; BEAST_EXPECT(checkMarker(jrr)); auto running_total = jrr[jss::state].size(); while (jrr.isMember(jss::marker)) { jvParams[jss::marker] = jrr[jss::marker]; jrr = env.rpc( - "json", - "ledger_data", - boost::lexical_cast(jvParams))[jss::result]; + "json", "ledger_data", to_string(jvParams))[jss::result]; running_total += jrr[jss::state].size(); } BEAST_EXPECT(running_total == total_count); @@ -253,9 +232,7 @@ public: Json::Value jvParams; jvParams[jss::ledger_index] = "closed"; auto jrr = env.rpc( - "json", - "ledger_data", - boost::lexical_cast(jvParams))[jss::result]; + "json", "ledger_data", to_string(jvParams))[jss::result]; if (BEAST_EXPECT(jrr.isMember(jss::ledger))) BEAST_EXPECT( jrr[jss::ledger][jss::ledger_hash] == @@ -267,9 +244,7 @@ public: jvParams[jss::ledger_index] = "closed"; jvParams[jss::binary] = true; auto jrr = env.rpc( - "json", - "ledger_data", - boost::lexical_cast(jvParams))[jss::result]; + "json", "ledger_data", to_string(jvParams))[jss::result]; if (BEAST_EXPECT(jrr.isMember(jss::ledger))) { auto data = @@ -288,9 +263,7 @@ public: Json::Value jvParams; jvParams[jss::binary] = true; auto jrr = env.rpc( - "json", - "ledger_data", - boost::lexical_cast(jvParams))[jss::result]; + "json", "ledger_data", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr.isMember(jss::ledger)); BEAST_EXPECT(!jrr[jss::ledger].isMember(jss::ledger_data)); } @@ -319,9 +292,7 @@ public: jvParams[jss::ledger_index] = "current"; jvParams[jss::type] = type; return env.rpc( - "json", - "ledger_data", - boost::lexical_cast(jvParams))[jss::result]; + "json", "ledger_data", to_string(jvParams))[jss::result]; }; // Assert that state is an empty array. @@ -500,9 +471,7 @@ public: jvParams[jss::ledger_index] = "current"; jvParams[jss::type] = "misspelling"; auto const jrr = env.rpc( - "json", - "ledger_data", - boost::lexical_cast(jvParams))[jss::result]; + "json", "ledger_data", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr.isMember("error")); BEAST_EXPECT(jrr["error"] == "invalidParams"); BEAST_EXPECT(jrr["error_message"] == "Invalid field 'type'."); diff --git a/src/test/rpc/LedgerRPC_test.cpp b/src/test/rpc/LedgerRPC_test.cpp index 3c8da6dc13..6e132857ae 100644 --- a/src/test/rpc/LedgerRPC_test.cpp +++ b/src/test/rpc/LedgerRPC_test.cpp @@ -287,56 +287,39 @@ class LedgerRPC_test : public beast::unit_test::suite // access via the legacy ledger field, keyword index values Json::Value jvParams; jvParams[jss::ledger] = "closed"; - auto jrr = env.rpc( - "json", - "ledger", - boost::lexical_cast(jvParams))[jss::result]; + auto jrr = + env.rpc("json", "ledger", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr.isMember(jss::ledger)); BEAST_EXPECT(jrr.isMember(jss::ledger_hash)); BEAST_EXPECT(jrr[jss::ledger][jss::ledger_index] == "5"); jvParams[jss::ledger] = "validated"; - jrr = env.rpc( - "json", - "ledger", - boost::lexical_cast(jvParams))[jss::result]; + jrr = env.rpc("json", "ledger", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr.isMember(jss::ledger)); BEAST_EXPECT(jrr.isMember(jss::ledger_hash)); BEAST_EXPECT(jrr[jss::ledger][jss::ledger_index] == "5"); jvParams[jss::ledger] = "current"; - jrr = env.rpc( - "json", - "ledger", - boost::lexical_cast(jvParams))[jss::result]; + jrr = env.rpc("json", "ledger", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr.isMember(jss::ledger)); BEAST_EXPECT(jrr[jss::ledger][jss::ledger_index] == "6"); // ask for a bad ledger keyword jvParams[jss::ledger] = "invalid"; - jrr = env.rpc( - "json", - "ledger", - boost::lexical_cast(jvParams))[jss::result]; + jrr = env.rpc("json", "ledger", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr[jss::error] == "invalidParams"); BEAST_EXPECT(jrr[jss::error_message] == "ledgerIndexMalformed"); // numeric index jvParams[jss::ledger] = 4; - jrr = env.rpc( - "json", - "ledger", - boost::lexical_cast(jvParams))[jss::result]; + jrr = env.rpc("json", "ledger", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr.isMember(jss::ledger)); BEAST_EXPECT(jrr.isMember(jss::ledger_hash)); BEAST_EXPECT(jrr[jss::ledger][jss::ledger_index] == "4"); // numeric index - out of range jvParams[jss::ledger] = 20; - jrr = env.rpc( - "json", - "ledger", - boost::lexical_cast(jvParams))[jss::result]; + jrr = env.rpc("json", "ledger", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr[jss::error] == "lgrNotFound"); BEAST_EXPECT(jrr[jss::error_message] == "ledgerNotFound"); } @@ -348,29 +331,21 @@ class LedgerRPC_test : public beast::unit_test::suite // access via the ledger_hash field Json::Value jvParams; jvParams[jss::ledger_hash] = hash3; - auto jrr = env.rpc( - "json", - "ledger", - boost::lexical_cast(jvParams))[jss::result]; + auto jrr = + env.rpc("json", "ledger", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr.isMember(jss::ledger)); BEAST_EXPECT(jrr.isMember(jss::ledger_hash)); BEAST_EXPECT(jrr[jss::ledger][jss::ledger_index] == "3"); // extra leading hex chars in hash are not allowed jvParams[jss::ledger_hash] = "DEADBEEF" + hash3; - jrr = env.rpc( - "json", - "ledger", - boost::lexical_cast(jvParams))[jss::result]; + jrr = env.rpc("json", "ledger", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr[jss::error] == "invalidParams"); BEAST_EXPECT(jrr[jss::error_message] == "ledgerHashMalformed"); // request with non-string ledger_hash jvParams[jss::ledger_hash] = 2; - jrr = env.rpc( - "json", - "ledger", - boost::lexical_cast(jvParams))[jss::result]; + jrr = env.rpc("json", "ledger", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr[jss::error] == "invalidParams"); BEAST_EXPECT(jrr[jss::error_message] == "ledgerHashNotString"); @@ -378,10 +353,7 @@ class LedgerRPC_test : public beast::unit_test::suite jvParams[jss::ledger_hash] = "2E81FC6EC0DD943197EGC7E3FBE9AE30" "7F2775F2F7485BB37307984C3C0F2340"; - jrr = env.rpc( - "json", - "ledger", - boost::lexical_cast(jvParams))[jss::result]; + jrr = env.rpc("json", "ledger", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr[jss::error] == "invalidParams"); BEAST_EXPECT(jrr[jss::error_message] == "ledgerHashMalformed"); @@ -389,10 +361,7 @@ class LedgerRPC_test : public beast::unit_test::suite jvParams[jss::ledger_hash] = "8C3EEDB3124D92E49E75D81A8826A2E6" "5A75FD71FC3FD6F36FEB803C5F1D812D"; - jrr = env.rpc( - "json", - "ledger", - boost::lexical_cast(jvParams))[jss::result]; + jrr = env.rpc("json", "ledger", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr[jss::error] == "lgrNotFound"); BEAST_EXPECT(jrr[jss::error_message] == "ledgerNotFound"); } @@ -401,39 +370,28 @@ class LedgerRPC_test : public beast::unit_test::suite // access via the ledger_index field, keyword index values Json::Value jvParams; jvParams[jss::ledger_index] = "closed"; - auto jrr = env.rpc( - "json", - "ledger", - boost::lexical_cast(jvParams))[jss::result]; + auto jrr = + env.rpc("json", "ledger", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr.isMember(jss::ledger)); BEAST_EXPECT(jrr.isMember(jss::ledger_hash)); BEAST_EXPECT(jrr[jss::ledger][jss::ledger_index] == "5"); BEAST_EXPECT(jrr.isMember(jss::ledger_index)); jvParams[jss::ledger_index] = "validated"; - jrr = env.rpc( - "json", - "ledger", - boost::lexical_cast(jvParams))[jss::result]; + jrr = env.rpc("json", "ledger", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr.isMember(jss::ledger)); BEAST_EXPECT(jrr.isMember(jss::ledger_hash)); BEAST_EXPECT(jrr[jss::ledger][jss::ledger_index] == "5"); jvParams[jss::ledger_index] = "current"; - jrr = env.rpc( - "json", - "ledger", - boost::lexical_cast(jvParams))[jss::result]; + jrr = env.rpc("json", "ledger", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr.isMember(jss::ledger)); BEAST_EXPECT(jrr[jss::ledger][jss::ledger_index] == "6"); BEAST_EXPECT(jrr.isMember(jss::ledger_current_index)); // ask for a bad ledger keyword jvParams[jss::ledger_index] = "invalid"; - jrr = env.rpc( - "json", - "ledger", - boost::lexical_cast(jvParams))[jss::result]; + jrr = env.rpc("json", "ledger", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr[jss::error] == "invalidParams"); BEAST_EXPECT(jrr[jss::error_message] == "ledgerIndexMalformed"); @@ -441,10 +399,8 @@ class LedgerRPC_test : public beast::unit_test::suite for (auto i : {1, 2, 3, 4, 5, 6}) { jvParams[jss::ledger_index] = i; - jrr = env.rpc( - "json", - "ledger", - boost::lexical_cast(jvParams))[jss::result]; + jrr = + env.rpc("json", "ledger", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr.isMember(jss::ledger)); if (i < 6) BEAST_EXPECT(jrr.isMember(jss::ledger_hash)); @@ -454,10 +410,7 @@ class LedgerRPC_test : public beast::unit_test::suite // numeric index - out of range jvParams[jss::ledger_index] = 7; - jrr = env.rpc( - "json", - "ledger", - boost::lexical_cast(jvParams))[jss::result]; + jrr = env.rpc("json", "ledger", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr[jss::error] == "lgrNotFound"); BEAST_EXPECT(jrr[jss::error_message] == "ledgerNotFound"); } diff --git a/src/test/rpc/NoRippleCheck_test.cpp b/src/test/rpc/NoRippleCheck_test.cpp index 6cd566e144..f379e273dd 100644 --- a/src/test/rpc/NoRippleCheck_test.cpp +++ b/src/test/rpc/NoRippleCheck_test.cpp @@ -59,9 +59,7 @@ class NoRippleCheck_test : public beast::unit_test::suite Json::Value params; params[jss::account] = alice.human(); auto const result = env.rpc( - "json", - "noripple_check", - boost::lexical_cast(params))[jss::result]; + "json", "noripple_check", to_string(params))[jss::result]; BEAST_EXPECT(result[jss::error] == "invalidParams"); BEAST_EXPECT(result[jss::error_message] == "Missing field 'role'."); } @@ -92,9 +90,7 @@ class NoRippleCheck_test : public beast::unit_test::suite params[jss::account] = alice.human(); params[jss::role] = "not_a_role"; auto const result = env.rpc( - "json", - "noripple_check", - boost::lexical_cast(params))[jss::result]; + "json", "noripple_check", to_string(params))[jss::result]; BEAST_EXPECT(result[jss::error] == "invalidParams"); BEAST_EXPECT(result[jss::error_message] == "Invalid field 'role'."); } @@ -105,9 +101,7 @@ class NoRippleCheck_test : public beast::unit_test::suite params[jss::role] = "user"; params[jss::limit] = -1; auto const result = env.rpc( - "json", - "noripple_check", - boost::lexical_cast(params))[jss::result]; + "json", "noripple_check", to_string(params))[jss::result]; BEAST_EXPECT(result[jss::error] == "invalidParams"); BEAST_EXPECT( result[jss::error_message] == @@ -120,9 +114,7 @@ class NoRippleCheck_test : public beast::unit_test::suite params[jss::role] = "user"; params[jss::ledger_hash] = 1; auto const result = env.rpc( - "json", - "noripple_check", - boost::lexical_cast(params))[jss::result]; + "json", "noripple_check", to_string(params))[jss::result]; BEAST_EXPECT(result[jss::error] == "invalidParams"); BEAST_EXPECT(result[jss::error_message] == "ledgerHashNotString"); } @@ -133,9 +125,7 @@ class NoRippleCheck_test : public beast::unit_test::suite params[jss::role] = "user"; params[jss::ledger] = "current"; auto const result = env.rpc( - "json", - "noripple_check", - boost::lexical_cast(params))[jss::result]; + "json", "noripple_check", to_string(params))[jss::result]; BEAST_EXPECT(result[jss::error] == "actNotFound"); BEAST_EXPECT(result[jss::error_message] == "Account not found."); } @@ -147,9 +137,7 @@ class NoRippleCheck_test : public beast::unit_test::suite params[jss::role] = "user"; params[jss::ledger] = "current"; auto const result = env.rpc( - "json", - "noripple_check", - boost::lexical_cast(params))[jss::result]; + "json", "noripple_check", to_string(params))[jss::result]; BEAST_EXPECT(result[jss::error] == "actMalformed"); BEAST_EXPECT(result[jss::error_message] == "Account malformed."); } @@ -184,10 +172,8 @@ class NoRippleCheck_test : public beast::unit_test::suite params[jss::account] = alice.human(); params[jss::role] = (user ? "user" : "gateway"); params[jss::ledger] = "current"; - auto result = env.rpc( - "json", - "noripple_check", - boost::lexical_cast(params))[jss::result]; + auto result = + env.rpc("json", "noripple_check", to_string(params))[jss::result]; auto const pa = result["problems"]; if (!BEAST_EXPECT(pa.isArray())) @@ -221,10 +207,8 @@ class NoRippleCheck_test : public beast::unit_test::suite // now make a second request asking for the relevant transactions this // time. params[jss::transactions] = true; - result = env.rpc( - "json", - "noripple_check", - boost::lexical_cast(params))[jss::result]; + result = + env.rpc("json", "noripple_check", to_string(params))[jss::result]; if (!BEAST_EXPECT(result[jss::transactions].isArray())) return; @@ -343,43 +327,33 @@ class NoRippleCheckLimits_test : public beast::unit_test::suite params[jss::account] = alice.human(); params[jss::role] = "user"; params[jss::ledger] = "current"; - auto result = env.rpc( - "json", - "noripple_check", - boost::lexical_cast(params))[jss::result]; + auto result = + env.rpc("json", "noripple_check", to_string(params))[jss::result]; BEAST_EXPECT(result["problems"].size() == 301); // one below minimum params[jss::limit] = 9; - result = env.rpc( - "json", - "noripple_check", - boost::lexical_cast(params))[jss::result]; + result = + env.rpc("json", "noripple_check", to_string(params))[jss::result]; BEAST_EXPECT(result["problems"].size() == (admin ? 10 : 11)); // at minimum params[jss::limit] = 10; - result = env.rpc( - "json", - "noripple_check", - boost::lexical_cast(params))[jss::result]; + result = + env.rpc("json", "noripple_check", to_string(params))[jss::result]; BEAST_EXPECT(result["problems"].size() == 11); // at max params[jss::limit] = 400; - result = env.rpc( - "json", - "noripple_check", - boost::lexical_cast(params))[jss::result]; + result = + env.rpc("json", "noripple_check", to_string(params))[jss::result]; BEAST_EXPECT(result["problems"].size() == 401); // at max+1 params[jss::limit] = 401; - result = env.rpc( - "json", - "noripple_check", - boost::lexical_cast(params))[jss::result]; + result = + env.rpc("json", "noripple_check", to_string(params))[jss::result]; BEAST_EXPECT(result["problems"].size() == (admin ? 402 : 401)); } diff --git a/src/test/rpc/TransactionEntry_test.cpp b/src/test/rpc/TransactionEntry_test.cpp index e07fdf0320..d8f685f568 100644 --- a/src/test/rpc/TransactionEntry_test.cpp +++ b/src/test/rpc/TransactionEntry_test.cpp @@ -258,15 +258,13 @@ class TransactionEntry_test : public beast::unit_test::suite Account A2{"A2"}; env.fund(XRP(10000), A1); - auto fund_1_tx = - boost::lexical_cast(env.tx()->getTransactionID()); + auto fund_1_tx = to_string(env.tx()->getTransactionID()); BEAST_EXPECT( fund_1_tx == "F4E9DF90D829A9E8B423FF68C34413E240D8D8BB0EFD080DF08114ED398E2506"); env.fund(XRP(10000), A2); - auto fund_2_tx = - boost::lexical_cast(env.tx()->getTransactionID()); + auto fund_2_tx = to_string(env.tx()->getTransactionID()); BEAST_EXPECT( fund_2_tx == "6853CD8226A05068C951CB1F54889FF4E40C5B440DC1C5BA38F114C4E0B1E705"); @@ -308,15 +306,13 @@ class TransactionEntry_test : public beast::unit_test::suite // the trust tx is actually a payment since the trust method // refunds fees with a payment after TrustSet..so just ignore the type // in the check below - auto trust_tx = - boost::lexical_cast(env.tx()->getTransactionID()); + auto trust_tx = to_string(env.tx()->getTransactionID()); BEAST_EXPECT( trust_tx == "C992D97D88FF444A1AB0C06B27557EC54B7F7DA28254778E60238BEA88E0C101"); env(pay(A2, A1, A2["USD"](5))); - auto pay_tx = - boost::lexical_cast(env.tx()->getTransactionID()); + auto pay_tx = to_string(env.tx()->getTransactionID()); env.close(); BEAST_EXPECT( pay_tx == @@ -362,8 +358,7 @@ class TransactionEntry_test : public beast::unit_test::suite "2000-01-01T00:00:20Z"); env(offer(A2, XRP(100), A2["USD"](1))); - auto offer_tx = - boost::lexical_cast(env.tx()->getTransactionID()); + auto offer_tx = to_string(env.tx()->getTransactionID()); BEAST_EXPECT( offer_tx == "5FCC1A27A7664F82A0CC4BE5766FBBB7C560D52B93AA7B550CD33B27AEC7EFFB"); From e80642fc121562217a3ce4c5b196e22d8ad910bd Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Thu, 16 Oct 2025 13:54:36 +0100 Subject: [PATCH 238/244] fix: Fix regression in ConnectAttempt (#5900) A regression was introduced in #5669 which would cause rippled to potentially dereference a disengaged std::optional when connecting to a peer. This would cause UB in release build and crash in debug. Co-authored-by: Bart Thomee <11445373+bthomee@users.noreply.github.com> --- src/xrpld/overlay/detail/ConnectAttempt.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/xrpld/overlay/detail/ConnectAttempt.cpp b/src/xrpld/overlay/detail/ConnectAttempt.cpp index c1bc4bb069..15a3b91802 100644 --- a/src/xrpld/overlay/detail/ConnectAttempt.cpp +++ b/src/xrpld/overlay/detail/ConnectAttempt.cpp @@ -600,8 +600,8 @@ ConnectAttempt::processResponse() JLOG(journal_.info()) << "Cluster name: " << *member; } - auto const result = - overlay_.peerFinder().activate(slot_, publicKey, !member->empty()); + auto const result = overlay_.peerFinder().activate( + slot_, publicKey, member.has_value()); if (result != PeerFinder::Result::success) { std::stringstream ss; From 92281a4edeab0f4fd43e7773a0e7cd882bf5649f Mon Sep 17 00:00:00 2001 From: Mayukha Vadari Date: Thu, 16 Oct 2025 12:02:25 -0400 Subject: [PATCH 239/244] refactor: replace string JSONs with Json::Value (#5886) There are some tests that write out JSONs as a string instead of using the Json::Value library, which are cleaned up by this change. Co-authored-by: Bart Thomee <11445373+bthomee@users.noreply.github.com> --- src/test/app/TxQ_test.cpp | 39 +- src/test/rpc/AccountInfo_test.cpp | 67 ++- src/test/rpc/AccountLines_test.cpp | 847 +++++++++++++---------------- 3 files changed, 443 insertions(+), 510 deletions(-) diff --git a/src/test/app/TxQ_test.cpp b/src/test/app/TxQ_test.cpp index 190acfeddf..ebc9c7d413 100644 --- a/src/test/app/TxQ_test.cpp +++ b/src/test/app/TxQ_test.cpp @@ -3081,16 +3081,23 @@ public: env.fund(XRP(1000000), alice); env.close(); - auto const withQueue = - R"({ "account": ")" + alice.human() + R"(", "queue": true })"; - auto const withoutQueue = R"({ "account": ")" + alice.human() + R"("})"; - auto const prevLedgerWithQueue = R"({ "account": ")" + alice.human() + - R"(", "queue": true, "ledger_index": 3 })"; + Json::Value withQueue; + withQueue[jss::account] = alice.human(); + withQueue[jss::queue] = true; + + Json::Value withoutQueue; + withoutQueue[jss::account] = alice.human(); + + Json::Value prevLedgerWithQueue; + prevLedgerWithQueue[jss::account] = alice.human(); + prevLedgerWithQueue[jss::queue] = true; + prevLedgerWithQueue[jss::ledger_index] = 3; BEAST_EXPECT(env.current()->info().seq > 3); { // account_info without the "queue" argument. - auto const info = env.rpc("json", "account_info", withoutQueue); + auto const info = + env.rpc("json", "account_info", to_string(withoutQueue)); BEAST_EXPECT( info.isMember(jss::result) && info[jss::result].isMember(jss::account_data)); @@ -3098,7 +3105,8 @@ public: } { // account_info with the "queue" argument. - auto const info = env.rpc("json", "account_info", withQueue); + auto const info = + env.rpc("json", "account_info", to_string(withQueue)); BEAST_EXPECT( info.isMember(jss::result) && info[jss::result].isMember(jss::account_data)); @@ -3120,7 +3128,8 @@ public: checkMetrics(*this, env, 0, 6, 4, 3); { - auto const info = env.rpc("json", "account_info", withQueue); + auto const info = + env.rpc("json", "account_info", to_string(withQueue)); BEAST_EXPECT( info.isMember(jss::result) && info[jss::result].isMember(jss::account_data)); @@ -3149,7 +3158,8 @@ public: checkMetrics(*this, env, 4, 6, 4, 3); { - auto const info = env.rpc("json", "account_info", withQueue); + auto const info = + env.rpc("json", "account_info", to_string(withQueue)); BEAST_EXPECT( info.isMember(jss::result) && info[jss::result].isMember(jss::account_data)); @@ -3212,7 +3222,8 @@ public: checkMetrics(*this, env, 1, 8, 5, 4); { - auto const info = env.rpc("json", "account_info", withQueue); + auto const info = + env.rpc("json", "account_info", to_string(withQueue)); BEAST_EXPECT( info.isMember(jss::result) && info[jss::result].isMember(jss::account_data)); @@ -3276,7 +3287,8 @@ public: checkMetrics(*this, env, 1, 8, 5, 4); { - auto const info = env.rpc("json", "account_info", withQueue); + auto const info = + env.rpc("json", "account_info", to_string(withQueue)); BEAST_EXPECT( info.isMember(jss::result) && info[jss::result].isMember(jss::account_data)); @@ -3344,7 +3356,7 @@ public: { auto const info = - env.rpc("json", "account_info", prevLedgerWithQueue); + env.rpc("json", "account_info", to_string(prevLedgerWithQueue)); BEAST_EXPECT( info.isMember(jss::result) && RPC::contains_error(info[jss::result])); @@ -3356,7 +3368,8 @@ public: checkMetrics(*this, env, 0, 10, 0, 5); { - auto const info = env.rpc("json", "account_info", withQueue); + auto const info = + env.rpc("json", "account_info", to_string(withQueue)); BEAST_EXPECT( info.isMember(jss::result) && info[jss::result].isMember(jss::account_data)); diff --git a/src/test/rpc/AccountInfo_test.cpp b/src/test/rpc/AccountInfo_test.cpp index 18c8bf5a1c..32a1202622 100644 --- a/src/test/rpc/AccountInfo_test.cpp +++ b/src/test/rpc/AccountInfo_test.cpp @@ -58,10 +58,10 @@ public: { // account_info with an account that's not in the ledger. Account const bogie{"bogie"}; - auto const info = env.rpc( - "json", - "account_info", - R"({ "account": ")" + bogie.human() + R"("})"); + Json::Value params; + params[jss::account] = bogie.human(); + auto const info = + env.rpc("json", "account_info", to_string(params)); BEAST_EXPECT( info[jss::result][jss::error_code] == rpcACT_NOT_FOUND); BEAST_EXPECT( @@ -128,16 +128,18 @@ public: Account const alice{"alice"}; env.fund(XRP(1000), alice); - auto const withoutSigners = - std::string("{ ") + "\"account\": \"" + alice.human() + "\"}"; + Json::Value withoutSigners; + withoutSigners[jss::account] = alice.human(); - auto const withSigners = std::string("{ ") + "\"account\": \"" + - alice.human() + "\", " + "\"signer_lists\": true }"; + Json::Value withSigners; + withSigners[jss::account] = alice.human(); + withSigners[jss::signer_lists] = true; // Alice has no SignerList yet. { // account_info without the "signer_lists" argument. - auto const info = env.rpc("json", "account_info", withoutSigners); + auto const info = + env.rpc("json", "account_info", to_string(withoutSigners)); BEAST_EXPECT( info.isMember(jss::result) && info[jss::result].isMember(jss::account_data)); @@ -146,7 +148,8 @@ public: } { // account_info with the "signer_lists" argument. - auto const info = env.rpc("json", "account_info", withSigners); + auto const info = + env.rpc("json", "account_info", to_string(withSigners)); BEAST_EXPECT( info.isMember(jss::result) && info[jss::result].isMember(jss::account_data)); @@ -164,7 +167,8 @@ public: env(smallSigners); { // account_info without the "signer_lists" argument. - auto const info = env.rpc("json", "account_info", withoutSigners); + auto const info = + env.rpc("json", "account_info", to_string(withoutSigners)); BEAST_EXPECT( info.isMember(jss::result) && info[jss::result].isMember(jss::account_data)); @@ -173,7 +177,8 @@ public: } { // account_info with the "signer_lists" argument. - auto const info = env.rpc("json", "account_info", withSigners); + auto const info = + env.rpc("json", "account_info", to_string(withSigners)); BEAST_EXPECT( info.isMember(jss::result) && info[jss::result].isMember(jss::account_data)); @@ -216,7 +221,8 @@ public: env(bigSigners); { // account_info with the "signer_lists" argument. - auto const info = env.rpc("json", "account_info", withSigners); + auto const info = + env.rpc("json", "account_info", to_string(withSigners)); BEAST_EXPECT( info.isMember(jss::result) && info[jss::result].isMember(jss::account_data)); @@ -250,12 +256,14 @@ public: Account const alice{"alice"}; env.fund(XRP(1000), alice); - auto const withoutSigners = std::string("{ ") + - "\"api_version\": 2, \"account\": \"" + alice.human() + "\"}"; + Json::Value withoutSigners; + withoutSigners[jss::api_version] = 2; + withoutSigners[jss::account] = alice.human(); - auto const withSigners = std::string("{ ") + - "\"api_version\": 2, \"account\": \"" + alice.human() + "\", " + - "\"signer_lists\": true }"; + Json::Value withSigners; + withSigners[jss::api_version] = 2; + withSigners[jss::account] = alice.human(); + withSigners[jss::signer_lists] = true; auto const withSignersAsString = std::string("{ ") + "\"api_version\": 2, \"account\": \"" + alice.human() + "\", " + @@ -264,13 +272,15 @@ public: // Alice has no SignerList yet. { // account_info without the "signer_lists" argument. - auto const info = env.rpc("json", "account_info", withoutSigners); + auto const info = + env.rpc("json", "account_info", to_string(withoutSigners)); BEAST_EXPECT(info.isMember(jss::result)); BEAST_EXPECT(!info[jss::result].isMember(jss::signer_lists)); } { // account_info with the "signer_lists" argument. - auto const info = env.rpc("json", "account_info", withSigners); + auto const info = + env.rpc("json", "account_info", to_string(withSigners)); BEAST_EXPECT(info.isMember(jss::result)); auto const& data = info[jss::result]; BEAST_EXPECT(data.isMember(jss::signer_lists)); @@ -286,13 +296,15 @@ public: env(smallSigners); { // account_info without the "signer_lists" argument. - auto const info = env.rpc("json", "account_info", withoutSigners); + auto const info = + env.rpc("json", "account_info", to_string(withoutSigners)); BEAST_EXPECT(info.isMember(jss::result)); BEAST_EXPECT(!info[jss::result].isMember(jss::signer_lists)); } { // account_info with the "signer_lists" argument. - auto const info = env.rpc("json", "account_info", withSigners); + auto const info = + env.rpc("json", "account_info", to_string(withSigners)); BEAST_EXPECT(info.isMember(jss::result)); auto const& data = info[jss::result]; BEAST_EXPECT(data.isMember(jss::signer_lists)); @@ -340,7 +352,8 @@ public: env(bigSigners); { // account_info with the "signer_lists" argument. - auto const info = env.rpc("json", "account_info", withSigners); + auto const info = + env.rpc("json", "account_info", to_string(withSigners)); BEAST_EXPECT(info.isMember(jss::result)); auto const& data = info[jss::result]; BEAST_EXPECT(data.isMember(jss::signer_lists)); @@ -567,10 +580,10 @@ public: auto getAccountFlag = [&env]( std::string_view fName, Account const& account) { - auto const info = env.rpc( - "json", - "account_info", - R"({"account" : ")" + account.human() + R"("})"); + Json::Value params; + params[jss::account] = account.human(); + auto const info = + env.rpc("json", "account_info", to_string(params)); std::optional res; if (info[jss::result][jss::status] == "success" && diff --git a/src/test/rpc/AccountLines_test.cpp b/src/test/rpc/AccountLines_test.cpp index 9215f4087a..10d8c1b4ac 100644 --- a/src/test/rpc/AccountLines_test.cpp +++ b/src/test/rpc/AccountLines_test.cpp @@ -46,11 +46,11 @@ public: } { // account_lines with a malformed account. - auto const lines = env.rpc( - "json", - "account_lines", - R"({"account": )" - R"("n9MJkEKHDhy5eTLuHUQeAAjo382frHNbFK4C8hcwN4nwM2SrLdBj"})"); + Json::Value params; + params[jss::account] = + "n9MJkEKHDhy5eTLuHUQeAAjo382frHNbFK4C8hcwN4nwM2SrLdBj"; + auto const lines = + env.rpc("json", "account_lines", to_string(params)); BEAST_EXPECT( lines[jss::result][jss::error_message] == RPC::make_error(rpcACT_MALFORMED)[jss::error_message]); @@ -77,10 +77,10 @@ public: Account const alice{"alice"}; { // account_lines on an unfunded account. - auto const lines = env.rpc( - "json", - "account_lines", - R"({"account": ")" + alice.human() + R"("})"); + Json::Value params; + params[jss::account] = alice.human(); + auto const lines = + env.rpc("json", "account_lines", to_string(params)); BEAST_EXPECT( lines[jss::result][jss::error_message] == RPC::make_error(rpcACT_NOT_FOUND)[jss::error_message]); @@ -92,33 +92,31 @@ public: { // alice is funded but has no lines. An empty array is returned. - auto const lines = env.rpc( - "json", - "account_lines", - R"({"account": ")" + alice.human() + R"("})"); + Json::Value params; + params[jss::account] = alice.human(); + auto const lines = + env.rpc("json", "account_lines", to_string(params)); BEAST_EXPECT(lines[jss::result][jss::lines].isArray()); BEAST_EXPECT(lines[jss::result][jss::lines].size() == 0); } { // Specify a ledger that doesn't exist. - auto const lines = env.rpc( - "json", - "account_lines", - R"({"account": ")" + alice.human() + - R"(", )" - R"("ledger_index": "nonsense"})"); + Json::Value params; + params[jss::account] = alice.human(); + params[jss::ledger_index] = "nonsense"; + auto const lines = + env.rpc("json", "account_lines", to_string(params)); BEAST_EXPECT( lines[jss::result][jss::error_message] == "ledgerIndexMalformed"); } { // Specify a different ledger that doesn't exist. - auto const lines = env.rpc( - "json", - "account_lines", - R"({"account": ")" + alice.human() + - R"(", )" - R"("ledger_index": 50000})"); + Json::Value params; + params[jss::account] = alice.human(); + params[jss::ledger_index] = 50000; + auto const lines = + env.rpc("json", "account_lines", to_string(params)); BEAST_EXPECT( lines[jss::result][jss::error_message] == "ledgerNotFound"); } @@ -183,24 +181,20 @@ public: LedgerInfo const& info, int count) { // Get account_lines by ledger index. - auto const linesSeq = env.rpc( - "json", - "account_lines", - R"({"account": ")" + account.human() + - R"(", )" - R"("ledger_index": )" + - std::to_string(info.seq) + "}"); + Json::Value paramsSeq; + paramsSeq[jss::account] = account.human(); + paramsSeq[jss::ledger_index] = info.seq; + auto const linesSeq = + env.rpc("json", "account_lines", to_string(paramsSeq)); BEAST_EXPECT(linesSeq[jss::result][jss::lines].isArray()); BEAST_EXPECT(linesSeq[jss::result][jss::lines].size() == count); // Get account_lines by ledger hash. - auto const linesHash = env.rpc( - "json", - "account_lines", - R"({"account": ")" + account.human() + - R"(", )" - R"("ledger_hash": ")" + - to_string(info.hash) + R"("})"); + Json::Value paramsHash; + paramsHash[jss::account] = account.human(); + paramsHash[jss::ledger_hash] = to_string(info.hash); + auto const linesHash = + env.rpc("json", "account_lines", to_string(paramsHash)); BEAST_EXPECT(linesHash[jss::result][jss::lines].isArray()); BEAST_EXPECT(linesHash[jss::result][jss::lines].size() == count); }; @@ -217,37 +211,31 @@ public: { // Surprisingly, it's valid to specify both index and hash, in // which case the hash wins. - auto const lines = env.rpc( - "json", - "account_lines", - R"({"account": ")" + alice.human() + - R"(", )" - R"("ledger_hash": ")" + - to_string(ledger4Info.hash) + - R"(", )" - R"("ledger_index": )" + - std::to_string(ledger58Info.seq) + "}"); + Json::Value params; + params[jss::account] = alice.human(); + params[jss::ledger_hash] = to_string(ledger4Info.hash); + params[jss::ledger_index] = ledger58Info.seq; + auto const lines = + env.rpc("json", "account_lines", to_string(params)); BEAST_EXPECT(lines[jss::result][jss::lines].isArray()); BEAST_EXPECT(lines[jss::result][jss::lines].size() == 26); } { // alice should have 52 trust lines in the current ledger. - auto const lines = env.rpc( - "json", - "account_lines", - R"({"account": ")" + alice.human() + R"("})"); + Json::Value params; + params[jss::account] = alice.human(); + auto const lines = + env.rpc("json", "account_lines", to_string(params)); BEAST_EXPECT(lines[jss::result][jss::lines].isArray()); BEAST_EXPECT(lines[jss::result][jss::lines].size() == 52); } { // alice should have 26 trust lines with gw1. - auto const lines = env.rpc( - "json", - "account_lines", - R"({"account": ")" + alice.human() + - R"(", )" - R"("peer": ")" + - gw1.human() + R"("})"); + Json::Value params; + params[jss::account] = alice.human(); + params[jss::peer] = gw1.human(); + auto const lines = + env.rpc("json", "account_lines", to_string(params)); BEAST_EXPECT(lines[jss::result][jss::lines].isArray()); BEAST_EXPECT(lines[jss::result][jss::lines].size() == 26); @@ -257,99 +245,87 @@ public: } { // Use a malformed peer. - auto const lines = env.rpc( - "json", - "account_lines", - R"({"account": ")" + alice.human() + - R"(", )" - R"("peer": )" - R"("n9MJkEKHDhy5eTLuHUQeAAjo382frHNbFK4C8hcwN4nwM2SrLdBj"})"); + Json::Value params; + params[jss::account] = alice.human(); + params[jss::peer] = + "n9MJkEKHDhy5eTLuHUQeAAjo382frHNbFK4C8hcwN4nwM2SrLdBj"; + auto const lines = + env.rpc("json", "account_lines", to_string(params)); BEAST_EXPECT( lines[jss::result][jss::error_message] == RPC::make_error(rpcACT_MALFORMED)[jss::error_message]); } { // A negative limit should fail. - auto const lines = env.rpc( - "json", - "account_lines", - R"({"account": ")" + alice.human() + - R"(", )" - R"("limit": -1})"); + Json::Value params; + params[jss::account] = alice.human(); + params[jss::limit] = -1; + auto const lines = + env.rpc("json", "account_lines", to_string(params)); BEAST_EXPECT( lines[jss::result][jss::error_message] == RPC::expected_field_message(jss::limit, "unsigned integer")); } { // Limit the response to 1 trust line. - auto const linesA = env.rpc( - "json", - "account_lines", - R"({"account": ")" + alice.human() + - R"(", )" - R"("limit": 1})"); + Json::Value paramsA; + paramsA[jss::account] = alice.human(); + paramsA[jss::limit] = 1; + auto const linesA = + env.rpc("json", "account_lines", to_string(paramsA)); BEAST_EXPECT(linesA[jss::result][jss::lines].isArray()); BEAST_EXPECT(linesA[jss::result][jss::lines].size() == 1); // Pick up from where the marker left off. We should get 51. auto marker = linesA[jss::result][jss::marker].asString(); - auto const linesB = env.rpc( - "json", - "account_lines", - R"({"account": ")" + alice.human() + - R"(", )" - R"("marker": ")" + - marker + R"("})"); + Json::Value paramsB; + paramsB[jss::account] = alice.human(); + paramsB[jss::marker] = marker; + auto const linesB = + env.rpc("json", "account_lines", to_string(paramsB)); BEAST_EXPECT(linesB[jss::result][jss::lines].isArray()); BEAST_EXPECT(linesB[jss::result][jss::lines].size() == 51); // Go again from where the marker left off, but set a limit of 3. - auto const linesC = env.rpc( - "json", - "account_lines", - R"({"account": ")" + alice.human() + - R"(", )" - R"("limit": 3, )" - R"("marker": ")" + - marker + R"("})"); + Json::Value paramsC; + paramsC[jss::account] = alice.human(); + paramsC[jss::limit] = 3; + paramsC[jss::marker] = marker; + auto const linesC = + env.rpc("json", "account_lines", to_string(paramsC)); BEAST_EXPECT(linesC[jss::result][jss::lines].isArray()); BEAST_EXPECT(linesC[jss::result][jss::lines].size() == 3); // Mess with the marker so it becomes bad and check for the error. marker[5] = marker[5] == '7' ? '8' : '7'; - auto const linesD = env.rpc( - "json", - "account_lines", - R"({"account": ")" + alice.human() + - R"(", )" - R"("marker": ")" + - marker + R"("})"); + Json::Value paramsD; + paramsD[jss::account] = alice.human(); + paramsD[jss::marker] = marker; + auto const linesD = + env.rpc("json", "account_lines", to_string(paramsD)); BEAST_EXPECT( linesD[jss::result][jss::error_message] == RPC::make_error(rpcINVALID_PARAMS)[jss::error_message]); } { // A non-string marker should also fail. - auto const lines = env.rpc( - "json", - "account_lines", - R"({"account": ")" + alice.human() + - R"(", )" - R"("marker": true})"); + Json::Value params; + params[jss::account] = alice.human(); + params[jss::marker] = true; + auto const lines = + env.rpc("json", "account_lines", to_string(params)); BEAST_EXPECT( lines[jss::result][jss::error_message] == RPC::expected_field_message(jss::marker, "string")); } { // Check that the flags we expect from alice to gw2 are present. - auto const lines = env.rpc( - "json", - "account_lines", - R"({"account": ")" + alice.human() + - R"(", )" - R"("limit": 10, )" - R"("peer": ")" + - gw2.human() + R"("})"); + Json::Value params; + params[jss::account] = alice.human(); + params[jss::limit] = 10; + params[jss::peer] = gw2.human(); + auto const lines = + env.rpc("json", "account_lines", to_string(params)); auto const& line = lines[jss::result][jss::lines][0u]; BEAST_EXPECT(line[jss::freeze].asBool() == true); BEAST_EXPECT(line[jss::deep_freeze].asBool() == true); @@ -358,14 +334,12 @@ public: } { // Check that the flags we expect from gw2 to alice are present. - auto const linesA = env.rpc( - "json", - "account_lines", - R"({"account": ")" + gw2.human() + - R"(", )" - R"("limit": 1, )" - R"("peer": ")" + - alice.human() + R"("})"); + Json::Value paramsA; + paramsA[jss::account] = gw2.human(); + paramsA[jss::limit] = 1; + paramsA[jss::peer] = alice.human(); + auto const linesA = + env.rpc("json", "account_lines", to_string(paramsA)); auto const& lineA = linesA[jss::result][jss::lines][0u]; BEAST_EXPECT(lineA[jss::freeze_peer].asBool() == true); BEAST_EXPECT(lineA[jss::deep_freeze_peer].asBool() == true); @@ -375,17 +349,13 @@ public: // Continue from the returned marker to make sure that works. BEAST_EXPECT(linesA[jss::result].isMember(jss::marker)); auto const marker = linesA[jss::result][jss::marker].asString(); - auto const linesB = env.rpc( - "json", - "account_lines", - R"({"account": ")" + gw2.human() + - R"(", )" - R"("limit": 25, )" - R"("marker": ")" + - marker + - R"(", )" - R"("peer": ")" + - alice.human() + R"("})"); + Json::Value paramsB; + paramsB[jss::account] = gw2.human(); + paramsB[jss::limit] = 25; + paramsB[jss::marker] = marker; + paramsB[jss::peer] = alice.human(); + auto const linesB = + env.rpc("json", "account_lines", to_string(paramsB)); BEAST_EXPECT(linesB[jss::result][jss::lines].isArray()); BEAST_EXPECT(linesB[jss::result][jss::lines].size() == 25); BEAST_EXPECT(!linesB[jss::result].isMember(jss::marker)); @@ -425,12 +395,11 @@ public: // signerlist is first. This is only a (reliable) coincidence of // object naming. So if any of alice's objects are renamed this // may fail. - Json::Value const aliceObjects = env.rpc( - "json", - "account_objects", - R"({"account": ")" + alice.human() + - R"(", )" - R"("limit": 10})"); + Json::Value aliceObjectsParams; + aliceObjectsParams[jss::account] = alice.human(); + aliceObjectsParams[jss::limit] = 10; + Json::Value const aliceObjects = + env.rpc("json", "account_objects", to_string(aliceObjectsParams)); Json::Value const& aliceSignerList = aliceObjects[jss::result][jss::account_objects][0u]; if (!(aliceSignerList[sfLedgerEntryType.jsonName] == jss::SignerList)) @@ -445,10 +414,11 @@ public: // Get account_lines for alice. Limit at 1, so we get a marker // pointing to her SignerList. - auto const aliceLines1 = env.rpc( - "json", - "account_lines", - R"({"account": ")" + alice.human() + R"(", "limit": 1})"); + Json::Value aliceLines1Params; + aliceLines1Params[jss::account] = alice.human(); + aliceLines1Params[jss::limit] = 1; + auto const aliceLines1 = + env.rpc("json", "account_lines", to_string(aliceLines1Params)); BEAST_EXPECT(aliceLines1[jss::result].isMember(jss::marker)); // Verify that the marker points at the signer list. @@ -459,21 +429,21 @@ public: BEAST_EXPECT(markerIndex == aliceSignerList[jss::index].asString()); // When we fetch Alice's remaining lines we should find one and no more. - auto const aliceLines2 = env.rpc( - "json", - "account_lines", - R"({"account": ")" + alice.human() + R"(", "marker": ")" + - aliceMarker + R"("})"); + Json::Value aliceLines2Params; + aliceLines2Params[jss::account] = alice.human(); + aliceLines2Params[jss::marker] = aliceMarker; + auto const aliceLines2 = + env.rpc("json", "account_lines", to_string(aliceLines2Params)); BEAST_EXPECT(aliceLines2[jss::result][jss::lines].size() == 1); BEAST_EXPECT(!aliceLines2[jss::result].isMember(jss::marker)); // Get account lines for beckys account, using alices SignerList as a // marker. This should cause an error. - auto const beckyLines = env.rpc( - "json", - "account_lines", - R"({"account": ")" + becky.human() + R"(", "marker": ")" + - aliceMarker + R"("})"); + Json::Value beckyLinesParams; + beckyLinesParams[jss::account] = becky.human(); + beckyLinesParams[jss::marker] = aliceMarker; + auto const beckyLines = + env.rpc("json", "account_lines", to_string(beckyLinesParams)); BEAST_EXPECT(beckyLines[jss::result].isMember(jss::error_message)); } @@ -525,12 +495,11 @@ public: env.close(); // Get account_lines for alice. Limit at 1, so we get a marker. - auto const linesBeg = env.rpc( - "json", - "account_lines", - R"({"account": ")" + alice.human() + - R"(", )" - R"("limit": 2})"); + Json::Value linesBegParams; + linesBegParams[jss::account] = alice.human(); + linesBegParams[jss::limit] = 2; + auto const linesBeg = + env.rpc("json", "account_lines", to_string(linesBegParams)); BEAST_EXPECT( linesBeg[jss::result][jss::lines][0u][jss::currency] == "USD"); BEAST_EXPECT(linesBeg[jss::result].isMember(jss::marker)); @@ -541,13 +510,11 @@ public: // Since alice paid all her EUR to cheri, alice should no longer // have a trust line to gw1. So the old marker should now be invalid. - auto const linesEnd = env.rpc( - "json", - "account_lines", - R"({"account": ")" + alice.human() + - R"(", )" - R"("marker": ")" + - linesBeg[jss::result][jss::marker].asString() + R"("})"); + Json::Value linesEndParams; + linesEndParams[jss::account] = alice.human(); + linesEndParams[jss::marker] = linesBeg[jss::result][jss::marker]; + auto const linesEnd = + env.rpc("json", "account_lines", to_string(linesEndParams)); BEAST_EXPECT( linesEnd[jss::result][jss::error_message] == RPC::make_error(rpcINVALID_PARAMS)[jss::error_message]); @@ -726,12 +693,11 @@ public: } BEAST_EXPECT(expectedLines == foundLines); + Json::Value aliceObjectsParams2; + aliceObjectsParams2[jss::account] = alice.human(); + aliceObjectsParams2[jss::limit] = 200; Json::Value const aliceObjects = env.rpc( - "json", - "account_objects", - R"({"account": ")" + alice.human() + - R"(", )" - R"("limit": 200})"); + "json", "account_objects", to_string(aliceObjectsParams2)); BEAST_EXPECT(aliceObjects.isMember(jss::result)); BEAST_EXPECT( !aliceObjects[jss::result].isMember(jss::error_message)); @@ -751,12 +717,11 @@ public: iterations == expectedIterations, std::to_string(iterations)); // Get becky's objects just to confirm that they're symmetrical + Json::Value beckyObjectsParams; + beckyObjectsParams[jss::account] = becky.human(); + beckyObjectsParams[jss::limit] = 200; Json::Value const beckyObjects = env.rpc( - "json", - "account_objects", - R"({"account": ")" + becky.human() + - R"(", )" - R"("limit": 200})"); + "json", "account_objects", to_string(beckyObjectsParams)); BEAST_EXPECT(beckyObjects.isMember(jss::result)); BEAST_EXPECT( !beckyObjects[jss::result].isMember(jss::error_message)); @@ -782,13 +747,11 @@ public: Env env(*this); { // account_lines with mal-formed json2 (missing id field). - auto const lines = env.rpc( - "json2", - "{ " - R"("method" : "account_lines",)" - R"("jsonrpc" : "2.0",)" - R"("ripplerpc" : "2.0")" - " }"); + Json::Value request; + request[jss::method] = "account_lines"; + request[jss::jsonrpc] = "2.0"; + request[jss::ripplerpc] = "2.0"; + auto const lines = env.rpc("json2", to_string(request)); BEAST_EXPECT( lines.isMember(jss::jsonrpc) && lines[jss::jsonrpc] == "2.0"); BEAST_EXPECT( @@ -797,14 +760,12 @@ public: } { // account_lines with no account. - auto const lines = env.rpc( - "json2", - "{ " - R"("method" : "account_lines",)" - R"("jsonrpc" : "2.0",)" - R"("ripplerpc" : "2.0",)" - R"("id" : 5)" - " }"); + Json::Value request; + request[jss::method] = "account_lines"; + request[jss::jsonrpc] = "2.0"; + request[jss::ripplerpc] = "2.0"; + request[jss::id] = 5; + auto const lines = env.rpc("json2", to_string(request)); BEAST_EXPECT( lines[jss::error][jss::message] == RPC::missing_field_error(jss::account)[jss::error_message]); @@ -817,16 +778,16 @@ public: } { // account_lines with a malformed account. - auto const lines = env.rpc( - "json2", - "{ " - R"("method" : "account_lines",)" - R"("jsonrpc" : "2.0",)" - R"("ripplerpc" : "2.0",)" - R"("id" : 5,)" - R"("params": )" - R"({"account": )" - R"("n9MJkEKHDhy5eTLuHUQeAAjo382frHNbFK4C8hcwN4nwM2SrLdBj"}})"); + Json::Value params; + params[jss::account] = + "n9MJkEKHDhy5eTLuHUQeAAjo382frHNbFK4C8hcwN4nwM2SrLdBj"; + Json::Value request; + request[jss::method] = "account_lines"; + request[jss::jsonrpc] = "2.0"; + request[jss::ripplerpc] = "2.0"; + request[jss::id] = 5; + request[jss::params] = params; + auto const lines = env.rpc("json2", to_string(request)); BEAST_EXPECT( lines[jss::error][jss::message] == RPC::make_error(rpcACT_MALFORMED)[jss::error_message]); @@ -840,16 +801,15 @@ public: Account const alice{"alice"}; { // account_lines on an unfunded account. - auto const lines = env.rpc( - "json2", - "{ " - R"("method" : "account_lines",)" - R"("jsonrpc" : "2.0",)" - R"("ripplerpc" : "2.0",)" - R"("id" : 5,)" - R"("params": )" - R"({"account": ")" + - alice.human() + R"("}})"); + Json::Value params; + params[jss::account] = alice.human(); + Json::Value request; + request[jss::method] = "account_lines"; + request[jss::jsonrpc] = "2.0"; + request[jss::ripplerpc] = "2.0"; + request[jss::id] = 5; + request[jss::params] = params; + auto const lines = env.rpc("json2", to_string(request)); BEAST_EXPECT( lines[jss::error][jss::message] == RPC::make_error(rpcACT_NOT_FOUND)[jss::error_message]); @@ -867,16 +827,15 @@ public: { // alice is funded but has no lines. An empty array is returned. - auto const lines = env.rpc( - "json2", - "{ " - R"("method" : "account_lines",)" - R"("jsonrpc" : "2.0",)" - R"("ripplerpc" : "2.0",)" - R"("id" : 5,)" - R"("params": )" - R"({"account": ")" + - alice.human() + R"("}})"); + Json::Value params; + params[jss::account] = alice.human(); + Json::Value request; + request[jss::method] = "account_lines"; + request[jss::jsonrpc] = "2.0"; + request[jss::ripplerpc] = "2.0"; + request[jss::id] = 5; + request[jss::params] = params; + auto const lines = env.rpc("json2", to_string(request)); BEAST_EXPECT(lines[jss::result][jss::lines].isArray()); BEAST_EXPECT(lines[jss::result][jss::lines].size() == 0); BEAST_EXPECT( @@ -888,18 +847,16 @@ public: } { // Specify a ledger that doesn't exist. - auto const lines = env.rpc( - "json2", - "{ " - R"("method" : "account_lines",)" - R"("jsonrpc" : "2.0",)" - R"("ripplerpc" : "2.0",)" - R"("id" : 5,)" - R"("params": )" - R"({"account": ")" + - alice.human() + - R"(", )" - R"("ledger_index": "nonsense"}})"); + Json::Value params; + params[jss::account] = alice.human(); + params[jss::ledger_index] = "nonsense"; + Json::Value request; + request[jss::method] = "account_lines"; + request[jss::jsonrpc] = "2.0"; + request[jss::ripplerpc] = "2.0"; + request[jss::id] = 5; + request[jss::params] = params; + auto const lines = env.rpc("json2", to_string(request)); BEAST_EXPECT( lines[jss::error][jss::message] == "ledgerIndexMalformed"); BEAST_EXPECT( @@ -911,18 +868,16 @@ public: } { // Specify a different ledger that doesn't exist. - auto const lines = env.rpc( - "json2", - "{ " - R"("method" : "account_lines",)" - R"("jsonrpc" : "2.0",)" - R"("ripplerpc" : "2.0",)" - R"("id" : 5,)" - R"("params": )" - R"({"account": ")" + - alice.human() + - R"(", )" - R"("ledger_index": 50000}})"); + Json::Value params; + params[jss::account] = alice.human(); + params[jss::ledger_index] = 50000; + Json::Value request; + request[jss::method] = "account_lines"; + request[jss::jsonrpc] = "2.0"; + request[jss::ripplerpc] = "2.0"; + request[jss::id] = 5; + request[jss::params] = params; + auto const lines = env.rpc("json2", to_string(request)); BEAST_EXPECT(lines[jss::error][jss::message] == "ledgerNotFound"); BEAST_EXPECT( lines.isMember(jss::jsonrpc) && lines[jss::jsonrpc] == "2.0"); @@ -992,19 +947,16 @@ public: LedgerInfo const& info, int count) { // Get account_lines by ledger index. - auto const linesSeq = env.rpc( - "json2", - "{ " - R"("method" : "account_lines",)" - R"("jsonrpc" : "2.0",)" - R"("ripplerpc" : "2.0",)" - R"("id" : 5,)" - R"("params": )" - R"({"account": ")" + - account.human() + - R"(", )" - R"("ledger_index": )" + - std::to_string(info.seq) + "}}"); + Json::Value paramsSeq; + paramsSeq[jss::account] = account.human(); + paramsSeq[jss::ledger_index] = info.seq; + Json::Value requestSeq; + requestSeq[jss::method] = "account_lines"; + requestSeq[jss::jsonrpc] = "2.0"; + requestSeq[jss::ripplerpc] = "2.0"; + requestSeq[jss::id] = 5; + requestSeq[jss::params] = paramsSeq; + auto const linesSeq = env.rpc("json2", to_string(requestSeq)); BEAST_EXPECT(linesSeq[jss::result][jss::lines].isArray()); BEAST_EXPECT(linesSeq[jss::result][jss::lines].size() == count); BEAST_EXPECT( @@ -1016,19 +968,16 @@ public: BEAST_EXPECT(linesSeq.isMember(jss::id) && linesSeq[jss::id] == 5); // Get account_lines by ledger hash. - auto const linesHash = env.rpc( - "json2", - "{ " - R"("method" : "account_lines",)" - R"("jsonrpc" : "2.0",)" - R"("ripplerpc" : "2.0",)" - R"("id" : 5,)" - R"("params": )" - R"({"account": ")" + - account.human() + - R"(", )" - R"("ledger_hash": ")" + - to_string(info.hash) + R"("}})"); + Json::Value paramsHash; + paramsHash[jss::account] = account.human(); + paramsHash[jss::ledger_hash] = to_string(info.hash); + Json::Value requestHash; + requestHash[jss::method] = "account_lines"; + requestHash[jss::jsonrpc] = "2.0"; + requestHash[jss::ripplerpc] = "2.0"; + requestHash[jss::id] = 5; + requestHash[jss::params] = paramsHash; + auto const linesHash = env.rpc("json2", to_string(requestHash)); BEAST_EXPECT(linesHash[jss::result][jss::lines].isArray()); BEAST_EXPECT(linesHash[jss::result][jss::lines].size() == count); BEAST_EXPECT( @@ -1053,22 +1002,17 @@ public: { // Surprisingly, it's valid to specify both index and hash, in // which case the hash wins. - auto const lines = env.rpc( - "json2", - "{ " - R"("method" : "account_lines",)" - R"("jsonrpc" : "2.0",)" - R"("ripplerpc" : "2.0",)" - R"("id" : 5,)" - R"("params": )" - R"({"account": ")" + - alice.human() + - R"(", )" - R"("ledger_hash": ")" + - to_string(ledger4Info.hash) + - R"(", )" - R"("ledger_index": )" + - std::to_string(ledger58Info.seq) + "}}"); + Json::Value params; + params[jss::account] = alice.human(); + params[jss::ledger_hash] = to_string(ledger4Info.hash); + params[jss::ledger_index] = ledger58Info.seq; + Json::Value request; + request[jss::method] = "account_lines"; + request[jss::jsonrpc] = "2.0"; + request[jss::ripplerpc] = "2.0"; + request[jss::id] = 5; + request[jss::params] = params; + auto const lines = env.rpc("json2", to_string(request)); BEAST_EXPECT(lines[jss::result][jss::lines].isArray()); BEAST_EXPECT(lines[jss::result][jss::lines].size() == 26); BEAST_EXPECT( @@ -1080,16 +1024,15 @@ public: } { // alice should have 52 trust lines in the current ledger. - auto const lines = env.rpc( - "json2", - "{ " - R"("method" : "account_lines",)" - R"("jsonrpc" : "2.0",)" - R"("ripplerpc" : "2.0",)" - R"("id" : 5,)" - R"("params": )" - R"({"account": ")" + - alice.human() + R"("}})"); + Json::Value params; + params[jss::account] = alice.human(); + Json::Value request; + request[jss::method] = "account_lines"; + request[jss::jsonrpc] = "2.0"; + request[jss::ripplerpc] = "2.0"; + request[jss::id] = 5; + request[jss::params] = params; + auto const lines = env.rpc("json2", to_string(request)); BEAST_EXPECT(lines[jss::result][jss::lines].isArray()); BEAST_EXPECT(lines[jss::result][jss::lines].size() == 52); BEAST_EXPECT( @@ -1101,19 +1044,16 @@ public: } { // alice should have 26 trust lines with gw1. - auto const lines = env.rpc( - "json2", - "{ " - R"("method" : "account_lines",)" - R"("jsonrpc" : "2.0",)" - R"("ripplerpc" : "2.0",)" - R"("id" : 5,)" - R"("params": )" - R"({"account": ")" + - alice.human() + - R"(", )" - R"("peer": ")" + - gw1.human() + R"("}})"); + Json::Value params; + params[jss::account] = alice.human(); + params[jss::peer] = gw1.human(); + Json::Value request; + request[jss::method] = "account_lines"; + request[jss::jsonrpc] = "2.0"; + request[jss::ripplerpc] = "2.0"; + request[jss::id] = 5; + request[jss::params] = params; + auto const lines = env.rpc("json2", to_string(request)); BEAST_EXPECT(lines[jss::result][jss::lines].isArray()); BEAST_EXPECT(lines[jss::result][jss::lines].size() == 26); BEAST_EXPECT( @@ -1125,19 +1065,17 @@ public: } { // Use a malformed peer. - auto const lines = env.rpc( - "json2", - "{ " - R"("method" : "account_lines",)" - R"("jsonrpc" : "2.0",)" - R"("ripplerpc" : "2.0",)" - R"("id" : 5,)" - R"("params": )" - R"({"account": ")" + - alice.human() + - R"(", )" - R"("peer": )" - R"("n9MJkEKHDhy5eTLuHUQeAAjo382frHNbFK4C8hcwN4nwM2SrLdBj"}})"); + Json::Value params; + params[jss::account] = alice.human(); + params[jss::peer] = + "n9MJkEKHDhy5eTLuHUQeAAjo382frHNbFK4C8hcwN4nwM2SrLdBj"; + Json::Value request; + request[jss::method] = "account_lines"; + request[jss::jsonrpc] = "2.0"; + request[jss::ripplerpc] = "2.0"; + request[jss::id] = 5; + request[jss::params] = params; + auto const lines = env.rpc("json2", to_string(request)); BEAST_EXPECT( lines[jss::error][jss::message] == RPC::make_error(rpcACT_MALFORMED)[jss::error_message]); @@ -1150,18 +1088,16 @@ public: } { // A negative limit should fail. - auto const lines = env.rpc( - "json2", - "{ " - R"("method" : "account_lines",)" - R"("jsonrpc" : "2.0",)" - R"("ripplerpc" : "2.0",)" - R"("id" : 5,)" - R"("params": )" - R"({"account": ")" + - alice.human() + - R"(", )" - R"("limit": -1}})"); + Json::Value params; + params[jss::account] = alice.human(); + params[jss::limit] = -1; + Json::Value request; + request[jss::method] = "account_lines"; + request[jss::jsonrpc] = "2.0"; + request[jss::ripplerpc] = "2.0"; + request[jss::id] = 5; + request[jss::params] = params; + auto const lines = env.rpc("json2", to_string(request)); BEAST_EXPECT( lines[jss::error][jss::message] == RPC::expected_field_message(jss::limit, "unsigned integer")); @@ -1174,18 +1110,16 @@ public: } { // Limit the response to 1 trust line. - auto const linesA = env.rpc( - "json2", - "{ " - R"("method" : "account_lines",)" - R"("jsonrpc" : "2.0",)" - R"("ripplerpc" : "2.0",)" - R"("id" : 5,)" - R"("params": )" - R"({"account": ")" + - alice.human() + - R"(", )" - R"("limit": 1}})"); + Json::Value paramsA; + paramsA[jss::account] = alice.human(); + paramsA[jss::limit] = 1; + Json::Value requestA; + requestA[jss::method] = "account_lines"; + requestA[jss::jsonrpc] = "2.0"; + requestA[jss::ripplerpc] = "2.0"; + requestA[jss::id] = 5; + requestA[jss::params] = paramsA; + auto const linesA = env.rpc("json2", to_string(requestA)); BEAST_EXPECT(linesA[jss::result][jss::lines].isArray()); BEAST_EXPECT(linesA[jss::result][jss::lines].size() == 1); BEAST_EXPECT( @@ -1197,19 +1131,16 @@ public: // Pick up from where the marker left off. We should get 51. auto marker = linesA[jss::result][jss::marker].asString(); - auto const linesB = env.rpc( - "json2", - "{ " - R"("method" : "account_lines",)" - R"("jsonrpc" : "2.0",)" - R"("ripplerpc" : "2.0",)" - R"("id" : 5,)" - R"("params": )" - R"({"account": ")" + - alice.human() + - R"(", )" - R"("marker": ")" + - marker + R"("}})"); + Json::Value paramsB; + paramsB[jss::account] = alice.human(); + paramsB[jss::marker] = marker; + Json::Value requestB; + requestB[jss::method] = "account_lines"; + requestB[jss::jsonrpc] = "2.0"; + requestB[jss::ripplerpc] = "2.0"; + requestB[jss::id] = 5; + requestB[jss::params] = paramsB; + auto const linesB = env.rpc("json2", to_string(requestB)); BEAST_EXPECT(linesB[jss::result][jss::lines].isArray()); BEAST_EXPECT(linesB[jss::result][jss::lines].size() == 51); BEAST_EXPECT( @@ -1220,20 +1151,17 @@ public: BEAST_EXPECT(linesB.isMember(jss::id) && linesB[jss::id] == 5); // Go again from where the marker left off, but set a limit of 3. - auto const linesC = env.rpc( - "json2", - "{ " - R"("method" : "account_lines",)" - R"("jsonrpc" : "2.0",)" - R"("ripplerpc" : "2.0",)" - R"("id" : 5,)" - R"("params": )" - R"({"account": ")" + - alice.human() + - R"(", )" - R"("limit": 3, )" - R"("marker": ")" + - marker + R"("}})"); + Json::Value paramsC; + paramsC[jss::account] = alice.human(); + paramsC[jss::limit] = 3; + paramsC[jss::marker] = marker; + Json::Value requestC; + requestC[jss::method] = "account_lines"; + requestC[jss::jsonrpc] = "2.0"; + requestC[jss::ripplerpc] = "2.0"; + requestC[jss::id] = 5; + requestC[jss::params] = paramsC; + auto const linesC = env.rpc("json2", to_string(requestC)); BEAST_EXPECT(linesC[jss::result][jss::lines].isArray()); BEAST_EXPECT(linesC[jss::result][jss::lines].size() == 3); BEAST_EXPECT( @@ -1245,19 +1173,16 @@ public: // Mess with the marker so it becomes bad and check for the error. marker[5] = marker[5] == '7' ? '8' : '7'; - auto const linesD = env.rpc( - "json2", - "{ " - R"("method" : "account_lines",)" - R"("jsonrpc" : "2.0",)" - R"("ripplerpc" : "2.0",)" - R"("id" : 5,)" - R"("params": )" - R"({"account": ")" + - alice.human() + - R"(", )" - R"("marker": ")" + - marker + R"("}})"); + Json::Value paramsD; + paramsD[jss::account] = alice.human(); + paramsD[jss::marker] = marker; + Json::Value requestD; + requestD[jss::method] = "account_lines"; + requestD[jss::jsonrpc] = "2.0"; + requestD[jss::ripplerpc] = "2.0"; + requestD[jss::id] = 5; + requestD[jss::params] = paramsD; + auto const linesD = env.rpc("json2", to_string(requestD)); BEAST_EXPECT( linesD[jss::error][jss::message] == RPC::make_error(rpcINVALID_PARAMS)[jss::error_message]); @@ -1270,18 +1195,16 @@ public: } { // A non-string marker should also fail. - auto const lines = env.rpc( - "json2", - "{ " - R"("method" : "account_lines",)" - R"("jsonrpc" : "2.0",)" - R"("ripplerpc" : "2.0",)" - R"("id" : 5,)" - R"("params": )" - R"({"account": ")" + - alice.human() + - R"(", )" - R"("marker": true}})"); + Json::Value params; + params[jss::account] = alice.human(); + params[jss::marker] = true; + Json::Value request; + request[jss::method] = "account_lines"; + request[jss::jsonrpc] = "2.0"; + request[jss::ripplerpc] = "2.0"; + request[jss::id] = 5; + request[jss::params] = params; + auto const lines = env.rpc("json2", to_string(request)); BEAST_EXPECT( lines[jss::error][jss::message] == RPC::expected_field_message(jss::marker, "string")); @@ -1294,20 +1217,17 @@ public: } { // Check that the flags we expect from alice to gw2 are present. - auto const lines = env.rpc( - "json2", - "{ " - R"("method" : "account_lines",)" - R"("jsonrpc" : "2.0",)" - R"("ripplerpc" : "2.0",)" - R"("id" : 5,)" - R"("params": )" - R"({"account": ")" + - alice.human() + - R"(", )" - R"("limit": 10, )" - R"("peer": ")" + - gw2.human() + R"("}})"); + Json::Value params; + params[jss::account] = alice.human(); + params[jss::limit] = 10; + params[jss::peer] = gw2.human(); + Json::Value request; + request[jss::method] = "account_lines"; + request[jss::jsonrpc] = "2.0"; + request[jss::ripplerpc] = "2.0"; + request[jss::id] = 5; + request[jss::params] = params; + auto const lines = env.rpc("json2", to_string(request)); auto const& line = lines[jss::result][jss::lines][0u]; BEAST_EXPECT(line[jss::freeze].asBool() == true); BEAST_EXPECT(line[jss::deep_freeze].asBool() == true); @@ -1322,20 +1242,17 @@ public: } { // Check that the flags we expect from gw2 to alice are present. - auto const linesA = env.rpc( - "json2", - "{ " - R"("method" : "account_lines",)" - R"("jsonrpc" : "2.0",)" - R"("ripplerpc" : "2.0",)" - R"("id" : 5,)" - R"("params": )" - R"({"account": ")" + - gw2.human() + - R"(", )" - R"("limit": 1, )" - R"("peer": ")" + - alice.human() + R"("}})"); + Json::Value paramsA; + paramsA[jss::account] = gw2.human(); + paramsA[jss::limit] = 1; + paramsA[jss::peer] = alice.human(); + Json::Value requestA; + requestA[jss::method] = "account_lines"; + requestA[jss::jsonrpc] = "2.0"; + requestA[jss::ripplerpc] = "2.0"; + requestA[jss::id] = 5; + requestA[jss::params] = paramsA; + auto const linesA = env.rpc("json2", to_string(requestA)); auto const& lineA = linesA[jss::result][jss::lines][0u]; BEAST_EXPECT(lineA[jss::freeze_peer].asBool() == true); BEAST_EXPECT(lineA[jss::deep_freeze_peer].asBool() == true); @@ -1351,23 +1268,18 @@ public: // Continue from the returned marker to make sure that works. BEAST_EXPECT(linesA[jss::result].isMember(jss::marker)); auto const marker = linesA[jss::result][jss::marker].asString(); - auto const linesB = env.rpc( - "json2", - "{ " - R"("method" : "account_lines",)" - R"("jsonrpc" : "2.0",)" - R"("ripplerpc" : "2.0",)" - R"("id" : 5,)" - R"("params": )" - R"({"account": ")" + - gw2.human() + - R"(", )" - R"("limit": 25, )" - R"("marker": ")" + - marker + - R"(", )" - R"("peer": ")" + - alice.human() + R"("}})"); + Json::Value paramsB; + paramsB[jss::account] = gw2.human(); + paramsB[jss::limit] = 25; + paramsB[jss::marker] = marker; + paramsB[jss::peer] = alice.human(); + Json::Value requestB; + requestB[jss::method] = "account_lines"; + requestB[jss::jsonrpc] = "2.0"; + requestB[jss::ripplerpc] = "2.0"; + requestB[jss::id] = 5; + requestB[jss::params] = paramsB; + auto const linesB = env.rpc("json2", to_string(requestB)); BEAST_EXPECT(linesB[jss::result][jss::lines].isArray()); BEAST_EXPECT(linesB[jss::result][jss::lines].size() == 25); BEAST_EXPECT(!linesB[jss::result].isMember(jss::marker)); @@ -1430,18 +1342,16 @@ public: env.close(); // Get account_lines for alice. Limit at 1, so we get a marker. - auto const linesBeg = env.rpc( - "json2", - "{ " - R"("method" : "account_lines",)" - R"("jsonrpc" : "2.0",)" - R"("ripplerpc" : "2.0",)" - R"("id" : 5,)" - R"("params": )" - R"({"account": ")" + - alice.human() + - R"(", )" - R"("limit": 2}})"); + Json::Value linesBegParams; + linesBegParams[jss::account] = alice.human(); + linesBegParams[jss::limit] = 2; + Json::Value linesBegRequest; + linesBegRequest[jss::method] = "account_lines"; + linesBegRequest[jss::jsonrpc] = "2.0"; + linesBegRequest[jss::ripplerpc] = "2.0"; + linesBegRequest[jss::id] = 5; + linesBegRequest[jss::params] = linesBegParams; + auto const linesBeg = env.rpc("json2", to_string(linesBegRequest)); BEAST_EXPECT( linesBeg[jss::result][jss::lines][0u][jss::currency] == "USD"); BEAST_EXPECT(linesBeg[jss::result].isMember(jss::marker)); @@ -1458,19 +1368,16 @@ public: // Since alice paid all her EUR to cheri, alice should no longer // have a trust line to gw1. So the old marker should now be invalid. - auto const linesEnd = env.rpc( - "json2", - "{ " - R"("method" : "account_lines",)" - R"("jsonrpc" : "2.0",)" - R"("ripplerpc" : "2.0",)" - R"("id" : 5,)" - R"("params": )" - R"({"account": ")" + - alice.human() + - R"(", )" - R"("marker": ")" + - linesBeg[jss::result][jss::marker].asString() + R"("}})"); + Json::Value linesEndParams; + linesEndParams[jss::account] = alice.human(); + linesEndParams[jss::marker] = linesBeg[jss::result][jss::marker]; + Json::Value linesEndRequest; + linesEndRequest[jss::method] = "account_lines"; + linesEndRequest[jss::jsonrpc] = "2.0"; + linesEndRequest[jss::ripplerpc] = "2.0"; + linesEndRequest[jss::id] = 5; + linesEndRequest[jss::params] = linesEndParams; + auto const linesEnd = env.rpc("json2", to_string(linesEndRequest)); BEAST_EXPECT( linesEnd[jss::error][jss::message] == RPC::make_error(rpcINVALID_PARAMS)[jss::error_message]); From b4c894c1baa784aa21131fbf5f2792225247d606 Mon Sep 17 00:00:00 2001 From: tequ Date: Fri, 17 Oct 2025 06:18:53 +0900 Subject: [PATCH 240/244] refactor: Autofill signature for Simulate RPC (#5852) This change enables autofilling of signature-related fields in the Simulate RPC. Co-authored-by: Bart Thomee <11445373+bthomee@users.noreply.github.com> --- src/xrpld/rpc/handlers/Simulate.cpp | 63 ++++++++++++++++------------- 1 file changed, 36 insertions(+), 27 deletions(-) diff --git a/src/xrpld/rpc/handlers/Simulate.cpp b/src/xrpld/rpc/handlers/Simulate.cpp index 092b0b4562..35cb7587d4 100644 --- a/src/xrpld/rpc/handlers/Simulate.cpp +++ b/src/xrpld/rpc/handlers/Simulate.cpp @@ -72,39 +72,23 @@ getAutofillSequence(Json::Value const& tx_json, RPC::JsonContext& context) } static std::optional -autofillTx(Json::Value& tx_json, RPC::JsonContext& context) +autofillSignature(Json::Value& sigObject) { - if (!tx_json.isMember(jss::Fee)) - { - // autofill Fee - // Must happen after all the other autofills happen - // Error handling/messaging works better that way - auto feeOrError = RPC::getCurrentNetworkFee( - context.role, - context.app.config(), - context.app.getFeeTrack(), - context.app.getTxQ(), - context.app, - tx_json); - if (feeOrError.isMember(jss::error)) - return feeOrError; - tx_json[jss::Fee] = feeOrError; - } - - if (!tx_json.isMember(jss::SigningPubKey)) + if (!sigObject.isMember(jss::SigningPubKey)) { // autofill SigningPubKey - tx_json[jss::SigningPubKey] = ""; + sigObject[jss::SigningPubKey] = ""; } - if (tx_json.isMember(jss::Signers)) + if (sigObject.isMember(jss::Signers)) { - if (!tx_json[jss::Signers].isArray()) + if (!sigObject[jss::Signers].isArray()) return RPC::invalid_field_error("tx.Signers"); // check multisigned signers - for (unsigned index = 0; index < tx_json[jss::Signers].size(); index++) + for (unsigned index = 0; index < sigObject[jss::Signers].size(); + index++) { - auto& signer = tx_json[jss::Signers][index]; + auto& signer = sigObject[jss::Signers][index]; if (!signer.isObject() || !signer.isMember(jss::Signer) || !signer[jss::Signer].isObject()) return RPC::invalid_field_error( @@ -129,16 +113,41 @@ autofillTx(Json::Value& tx_json, RPC::JsonContext& context) } } - if (!tx_json.isMember(jss::TxnSignature)) + if (!sigObject.isMember(jss::TxnSignature)) { // autofill TxnSignature - tx_json[jss::TxnSignature] = ""; + sigObject[jss::TxnSignature] = ""; } - else if (tx_json[jss::TxnSignature] != "") + else if (sigObject[jss::TxnSignature] != "") { // Transaction must not be signed return rpcError(rpcTX_SIGNED); } + return std::nullopt; +} + +static std::optional +autofillTx(Json::Value& tx_json, RPC::JsonContext& context) +{ + if (!tx_json.isMember(jss::Fee)) + { + // autofill Fee + // Must happen after all the other autofills happen + // Error handling/messaging works better that way + auto feeOrError = RPC::getCurrentNetworkFee( + context.role, + context.app.config(), + context.app.getFeeTrack(), + context.app.getTxQ(), + context.app, + tx_json); + if (feeOrError.isMember(jss::error)) + return feeOrError; + tx_json[jss::Fee] = feeOrError; + } + + if (auto error = autofillSignature(tx_json)) + return *error; if (!tx_json.isMember(jss::Sequence)) { From 0b113f371fafb4fa34fe7cb1f61d8cbc0dbe5764 Mon Sep 17 00:00:00 2001 From: Ayaz Salikhov Date: Fri, 17 Oct 2025 14:40:10 +0100 Subject: [PATCH 241/244] refactor: Update pre-commit workflow to latest version (#5902) Co-authored-by: Bart Thomee <11445373+bthomee@users.noreply.github.com> --- .github/workflows/pre-commit.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index da0fb02c19..66ee2f3334 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -9,7 +9,7 @@ on: jobs: # Call the workflow in the XRPLF/actions repo that runs the pre-commit hooks. run-hooks: - uses: XRPLF/actions/.github/workflows/pre-commit.yml@af1b0f0d764cda2e5435f5ac97b240d4bd4d95d3 + uses: XRPLF/actions/.github/workflows/pre-commit.yml@a8d7472b450eb53a1e5228f64552e5974457a21a with: runs_on: ubuntu-latest container: '{ "image": "ghcr.io/xrplf/ci/tools-rippled-pre-commit:sha-a8c7be1" }' From b64707f53b5f06cc508327eecddee847152e80cc Mon Sep 17 00:00:00 2001 From: Bart Date: Fri, 17 Oct 2025 10:09:47 -0400 Subject: [PATCH 242/244] chore: Add support for RHEL 8 (#5880) Co-authored-by: Bart Thomee <11445373+bthomee@users.noreply.github.com> --- .github/scripts/strategy-matrix/linux.json | 26 +++++++++++++++++----- 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/.github/scripts/strategy-matrix/linux.json b/.github/scripts/strategy-matrix/linux.json index b8da322118..317fa640b2 100644 --- a/.github/scripts/strategy-matrix/linux.json +++ b/.github/scripts/strategy-matrix/linux.json @@ -73,47 +73,61 @@ "compiler_version": "20", "image_sha": "6948666" }, + { + "distro_name": "rhel", + "distro_version": "8", + "compiler_name": "gcc", + "compiler_version": "14", + "image_sha": "10e69b4" + }, + { + "distro_name": "rhel", + "distro_version": "8", + "compiler_name": "clang", + "compiler_version": "any", + "image_sha": "10e69b4" + }, { "distro_name": "rhel", "distro_version": "9", "compiler_name": "gcc", "compiler_version": "12", - "image_sha": "6948666" + "image_sha": "10e69b4" }, { "distro_name": "rhel", "distro_version": "9", "compiler_name": "gcc", "compiler_version": "13", - "image_sha": "6948666" + "image_sha": "10e69b4" }, { "distro_name": "rhel", "distro_version": "9", "compiler_name": "gcc", "compiler_version": "14", - "image_sha": "6948666" + "image_sha": "10e69b4" }, { "distro_name": "rhel", "distro_version": "9", "compiler_name": "clang", "compiler_version": "any", - "image_sha": "6948666" + "image_sha": "10e69b4" }, { "distro_name": "rhel", "distro_version": "10", "compiler_name": "gcc", "compiler_version": "14", - "image_sha": "6948666" + "image_sha": "10e69b4" }, { "distro_name": "rhel", "distro_version": "10", "compiler_name": "clang", "compiler_version": "any", - "image_sha": "6948666" + "image_sha": "10e69b4" }, { "distro_name": "ubuntu", From 55235572261a8bb8148fe77ecfa79ba00fa0b0bc Mon Sep 17 00:00:00 2001 From: Bart Date: Fri, 17 Oct 2025 12:04:58 -0400 Subject: [PATCH 243/244] chore: Clean up Conan variables in CI (#5903) This change sanitizes inputs by setting them as environment variables, and adjusts the number of CPUs used for building. Namely, GitHub inputs should be sanitized, per recommendation by Semgrep, as using them directly poses a security risk. A recent change further overrode the global configuration by having builds use all cores, but as we have noticed an increased number of job cancelation this change updates it to use all cores less one. Co-authored-by: Bart Thomee <11445373+bthomee@users.noreply.github.com> --- .github/actions/build-deps/action.yml | 6 +++--- .github/workflows/reusable-notify-clio.yml | 4 +++- conan/global.conf | 2 +- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/.github/actions/build-deps/action.yml b/.github/actions/build-deps/action.yml index 7b2a3c385a..d99ea77bf5 100644 --- a/.github/actions/build-deps/action.yml +++ b/.github/actions/build-deps/action.yml @@ -28,6 +28,7 @@ runs: BUILD_DIR: ${{ inputs.build_dir }} BUILD_OPTION: ${{ inputs.force_build == 'true' && '*' || 'missing' }} BUILD_TYPE: ${{ inputs.build_type }} + VERBOSITY: ${{ inputs.verbosity }} run: | echo 'Installing dependencies.' mkdir -p '${{ env.BUILD_DIR }}' @@ -38,7 +39,6 @@ runs: --options:host='&:tests=True' \ --options:host='&:xrpld=True' \ --settings:all build_type='${{ env.BUILD_TYPE }}' \ - --conf:all tools.build:verbosity='${{ inputs.verbosity }}' \ - --conf:all tools.compilation:verbosity='${{ inputs.verbosity }}' \ - --conf:all tools.build:jobs=$(nproc) \ + --conf:all tools.build:verbosity='${{ env.VERBOSITY }}' \ + --conf:all tools.compilation:verbosity='${{ env.VERBOSITY }}' \ .. diff --git a/.github/workflows/reusable-notify-clio.yml b/.github/workflows/reusable-notify-clio.yml index 99009d953e..fe749beac9 100644 --- a/.github/workflows/reusable-notify-clio.yml +++ b/.github/workflows/reusable-notify-clio.yml @@ -64,7 +64,9 @@ jobs: conan_remote_name: ${{ inputs.conan_remote_name }} conan_remote_url: ${{ inputs.conan_remote_url }} - name: Log into Conan remote - run: conan remote login ${{ inputs.conan_remote_name }} "${{ secrets.conan_remote_username }}" --password "${{ secrets.conan_remote_password }}" + env: + CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }} + run: conan remote login ${{ env.CONAN_REMOTE_NAME }} "${{ secrets.conan_remote_username }}" --password "${{ secrets.conan_remote_password }}" - name: Upload package env: CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }} diff --git a/conan/global.conf b/conan/global.conf index 41ac76da89..a184adf629 100644 --- a/conan/global.conf +++ b/conan/global.conf @@ -3,4 +3,4 @@ core:non_interactive=True core.download:parallel={{ os.cpu_count() }} core.upload:parallel={{ os.cpu_count() }} -tools.build:jobs={{ (os.cpu_count() * 4/5) | int }} +tools.build:jobs={{ os.cpu_count() - 1 }} From afb6e0e41b456365645f02a19569947956d1382a Mon Sep 17 00:00:00 2001 From: Bart Date: Fri, 17 Oct 2025 12:17:02 -0400 Subject: [PATCH 244/244] chore: Set fail fast to false, except for when the merge group is used (#5897) This PR sets the fail-fast strategy option to false (it defaults to true), unless it is run by a merge group. Co-authored-by: Bart Thomee <11445373+bthomee@users.noreply.github.com> --- .github/workflows/on-pr.yml | 1 + .github/workflows/on-trigger.yml | 1 + .github/workflows/reusable-build-test.yml | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/on-pr.yml b/.github/workflows/on-pr.yml index 47323ee4a7..6d74486e96 100644 --- a/.github/workflows/on-pr.yml +++ b/.github/workflows/on-pr.yml @@ -103,6 +103,7 @@ jobs: if: ${{ needs.should-run.outputs.go == 'true' }} uses: ./.github/workflows/reusable-build-test.yml strategy: + fail-fast: false matrix: os: [linux, macos, windows] with: diff --git a/.github/workflows/on-trigger.yml b/.github/workflows/on-trigger.yml index 9d2ea81520..c1f6839d2d 100644 --- a/.github/workflows/on-trigger.yml +++ b/.github/workflows/on-trigger.yml @@ -65,6 +65,7 @@ jobs: build-test: uses: ./.github/workflows/reusable-build-test.yml strategy: + fail-fast: ${{ github.event_name == 'merge_group' }} matrix: os: [linux, macos, windows] with: diff --git a/.github/workflows/reusable-build-test.yml b/.github/workflows/reusable-build-test.yml index 5bc9cf2557..c6e991df79 100644 --- a/.github/workflows/reusable-build-test.yml +++ b/.github/workflows/reusable-build-test.yml @@ -42,7 +42,7 @@ jobs: - generate-matrix uses: ./.github/workflows/reusable-build-test-config.yml strategy: - fail-fast: false + fail-fast: ${{ github.event_name == 'merge_group' }} matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }} max-parallel: 10 with: